refactor new version flag to make it easier to pick a specific format

instead of having multiple flags, plus add new formats for testing

Signed-off-by: Callum Styan <callumstyan@gmail.com>
This commit is contained in:
Callum Styan 2023-11-13 11:13:49 -08:00
parent 4bdb865ae3
commit ed0c3b25c7
28 changed files with 5157 additions and 19419 deletions

View file

@ -154,7 +154,8 @@ type flagConfig struct {
enableNewSDManager bool
enablePerStepStats bool
enableAutoGOMAXPROCS bool
enableSenderRemoteWrite11 bool
// todo: how to use the enable feature flag properly + use the remote format enum type
rwFormat int
prometheusURL string
corsRegexString string
@ -211,11 +212,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
continue
case "promql-at-modifier", "promql-negative-offset":
level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o)
case "rw-1-1-sender":
c.enableSenderRemoteWrite11 = true
level.Info(logger).Log("msg", "Experimental remote write 1.1 will be used on the sender end, receiver must be able to parse this new protobuf format.")
case "rw-1-1-receiver":
c.web.EnableReceiverRemoteWrite11 = true
default:
level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o)
}
@ -429,6 +425,9 @@ func main() {
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList)
a.Flag("remote-write-format", "remote write proto format to use, valid options: 0 (1.0), 1 (reduced format), 3 (min64 format)").
Default("0").IntVar(&cfg.rwFormat)
promlogflag.AddFlags(a, &cfg.promlogConfig)
a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error {
@ -601,7 +600,7 @@ func main() {
var (
localStorage = &readyStorage{stats: tsdb.NewDBStats()}
scraper = &readyScrapeManager{}
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.enableSenderRemoteWrite11)
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, remote.RemoteWriteFormat(cfg.rwFormat))
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
)
@ -725,6 +724,7 @@ func main() {
cfg.web.Flags[f.Name] = f.Value.String()
}
cfg.web.RemoteWriteFormat = remote.RemoteWriteFormat(cfg.rwFormat)
// Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager.
webHandler := web.New(log.With(logger, "component", "web"), &cfg.web)

View file

@ -53,6 +53,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
| <code class="text-nowrap">--enable-feature</code> | Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--remote-write-format</code> | remote write proto format to use, valid options: 0 (1.0), 1 (reduced format), 3 (min64 format) | `0` |
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |

View file

@ -57,38 +57,6 @@ func main() {
}
})
http.HandleFunc("/receiveReduced", func(w http.ResponseWriter, r *http.Request) {
req, err := remote.DecodeReducedWriteRequest(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
for _, ts := range req.Timeseries {
m := make(model.Metric, len(ts.Labels))
for _, l := range ts.Labels {
m[model.LabelName(req.StringSymbolTable[l.NameRef])] = model.LabelValue(req.StringSymbolTable[l.ValueRef])
}
for _, s := range ts.Samples {
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
}
for _, e := range ts.Exemplars {
m := make(model.Metric, len(e.Labels))
for _, l := range e.Labels {
m[model.LabelName(req.StringSymbolTable[l.NameRef])] = model.LabelValue(req.StringSymbolTable[l.ValueRef])
}
fmt.Printf("\tExemplar: %+v %f %d\n", m, e.Value, e.Timestamp)
}
for _, hp := range ts.Histograms {
h := remote.HistogramProtoToHistogram(hp)
fmt.Printf("\tHistogram: %s\n", h.String())
}
}
})
http.HandleFunc("/receiveMinimized", func(w http.ResponseWriter, r *http.Request) {
req, err := remote.DecodeMinimizedWriteRequest(r.Body)
if err != nil {
@ -97,8 +65,25 @@ func main() {
}
for _, ts := range req.Timeseries {
ls := remote.Uint32RefToLabels(req.Symbols, ts.LabelSymbols)
fmt.Println(ls)
m := make(model.Metric, len(ts.LabelSymbols)/2)
labelIdx := 0
for labelIdx < len(ts.LabelSymbols) {
// todo, check for overflow?
offset := ts.LabelSymbols[labelIdx]
labelIdx++
length := ts.LabelSymbols[labelIdx]
labelIdx++
name := req.Symbols[offset : offset+length]
// todo, check for overflow?
offset = ts.LabelSymbols[labelIdx]
labelIdx++
length = ts.LabelSymbols[labelIdx]
labelIdx++
value := req.Symbols[offset : offset+length]
m[model.LabelName(name)] = model.LabelValue(value)
}
fmt.Println(m)
for _, s := range ts.Samples {
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)

View file

@ -60,7 +60,7 @@ func (x ReadRequest_ResponseType) String() string {
}
func (ReadRequest_ResponseType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{2, 0}
return fileDescriptor_eefc82927d57d89b, []int{5, 0}
}
type WriteRequest struct {
@ -118,6 +118,63 @@ func (m *WriteRequest) GetMetadata() []MetricMetadata {
return nil
}
type MinimizedWriteRequestPacking struct {
Timeseries []MinimizedTimeSeriesPacking `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
// The symbols table. All symbols are concatenated strings. To read the symbols table, it's required
// to know the offset:length range of the actual symbol to read from this string.
Symbols string `protobuf:"bytes,4,opt,name=symbols,proto3" json:"symbols,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MinimizedWriteRequestPacking) Reset() { *m = MinimizedWriteRequestPacking{} }
func (m *MinimizedWriteRequestPacking) String() string { return proto.CompactTextString(m) }
func (*MinimizedWriteRequestPacking) ProtoMessage() {}
func (*MinimizedWriteRequestPacking) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{1}
}
func (m *MinimizedWriteRequestPacking) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MinimizedWriteRequestPacking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MinimizedWriteRequestPacking.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *MinimizedWriteRequestPacking) XXX_Merge(src proto.Message) {
xxx_messageInfo_MinimizedWriteRequestPacking.Merge(m, src)
}
func (m *MinimizedWriteRequestPacking) XXX_Size() int {
return m.Size()
}
func (m *MinimizedWriteRequestPacking) XXX_DiscardUnknown() {
xxx_messageInfo_MinimizedWriteRequestPacking.DiscardUnknown(m)
}
var xxx_messageInfo_MinimizedWriteRequestPacking proto.InternalMessageInfo
func (m *MinimizedWriteRequestPacking) GetTimeseries() []MinimizedTimeSeriesPacking {
if m != nil {
return m.Timeseries
}
return nil
}
func (m *MinimizedWriteRequestPacking) GetSymbols() string {
if m != nil {
return m.Symbols
}
return ""
}
type MinimizedWriteRequest struct {
Timeseries []MinimizedTimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
// The symbols table. All symbols are concatenated strings. To read the symbols table, it's required
@ -132,7 +189,7 @@ func (m *MinimizedWriteRequest) Reset() { *m = MinimizedWriteRequest{} }
func (m *MinimizedWriteRequest) String() string { return proto.CompactTextString(m) }
func (*MinimizedWriteRequest) ProtoMessage() {}
func (*MinimizedWriteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{1}
return fileDescriptor_eefc82927d57d89b, []int{2}
}
func (m *MinimizedWriteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -175,6 +232,120 @@ func (m *MinimizedWriteRequest) GetSymbols() string {
return ""
}
type MinimizedWriteRequestFixed32 struct {
Timeseries []MinimizedTimeSeriesFixed32 `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
// The symbols table. All symbols are concatenated strings. To read the symbols table, it's required
// to know the offset:length range of the actual symbol to read from this string.
Symbols string `protobuf:"bytes,4,opt,name=symbols,proto3" json:"symbols,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MinimizedWriteRequestFixed32) Reset() { *m = MinimizedWriteRequestFixed32{} }
func (m *MinimizedWriteRequestFixed32) String() string { return proto.CompactTextString(m) }
func (*MinimizedWriteRequestFixed32) ProtoMessage() {}
func (*MinimizedWriteRequestFixed32) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{3}
}
func (m *MinimizedWriteRequestFixed32) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MinimizedWriteRequestFixed32) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MinimizedWriteRequestFixed32.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *MinimizedWriteRequestFixed32) XXX_Merge(src proto.Message) {
xxx_messageInfo_MinimizedWriteRequestFixed32.Merge(m, src)
}
func (m *MinimizedWriteRequestFixed32) XXX_Size() int {
return m.Size()
}
func (m *MinimizedWriteRequestFixed32) XXX_DiscardUnknown() {
xxx_messageInfo_MinimizedWriteRequestFixed32.DiscardUnknown(m)
}
var xxx_messageInfo_MinimizedWriteRequestFixed32 proto.InternalMessageInfo
func (m *MinimizedWriteRequestFixed32) GetTimeseries() []MinimizedTimeSeriesFixed32 {
if m != nil {
return m.Timeseries
}
return nil
}
func (m *MinimizedWriteRequestFixed32) GetSymbols() string {
if m != nil {
return m.Symbols
}
return ""
}
type MinimizedWriteRequestFixed64 struct {
Timeseries []MinimizedTimeSeriesFixed64 `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
// The symbols table. All symbols are concatenated strings. To read the symbols table, it's required
// to know the offset:length range of the actual symbol to read from this string.
Symbols string `protobuf:"bytes,4,opt,name=symbols,proto3" json:"symbols,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MinimizedWriteRequestFixed64) Reset() { *m = MinimizedWriteRequestFixed64{} }
func (m *MinimizedWriteRequestFixed64) String() string { return proto.CompactTextString(m) }
func (*MinimizedWriteRequestFixed64) ProtoMessage() {}
func (*MinimizedWriteRequestFixed64) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{4}
}
func (m *MinimizedWriteRequestFixed64) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MinimizedWriteRequestFixed64) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MinimizedWriteRequestFixed64.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *MinimizedWriteRequestFixed64) XXX_Merge(src proto.Message) {
xxx_messageInfo_MinimizedWriteRequestFixed64.Merge(m, src)
}
func (m *MinimizedWriteRequestFixed64) XXX_Size() int {
return m.Size()
}
func (m *MinimizedWriteRequestFixed64) XXX_DiscardUnknown() {
xxx_messageInfo_MinimizedWriteRequestFixed64.DiscardUnknown(m)
}
var xxx_messageInfo_MinimizedWriteRequestFixed64 proto.InternalMessageInfo
func (m *MinimizedWriteRequestFixed64) GetTimeseries() []MinimizedTimeSeriesFixed64 {
if m != nil {
return m.Timeseries
}
return nil
}
func (m *MinimizedWriteRequestFixed64) GetSymbols() string {
if m != nil {
return m.Symbols
}
return ""
}
// ReadRequest represents a remote read request.
type ReadRequest struct {
Queries []*Query `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"`
@ -193,7 +364,7 @@ func (m *ReadRequest) Reset() { *m = ReadRequest{} }
func (m *ReadRequest) String() string { return proto.CompactTextString(m) }
func (*ReadRequest) ProtoMessage() {}
func (*ReadRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{2}
return fileDescriptor_eefc82927d57d89b, []int{5}
}
func (m *ReadRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -249,7 +420,7 @@ func (m *ReadResponse) Reset() { *m = ReadResponse{} }
func (m *ReadResponse) String() string { return proto.CompactTextString(m) }
func (*ReadResponse) ProtoMessage() {}
func (*ReadResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{3}
return fileDescriptor_eefc82927d57d89b, []int{6}
}
func (m *ReadResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -299,7 +470,7 @@ func (m *Query) Reset() { *m = Query{} }
func (m *Query) String() string { return proto.CompactTextString(m) }
func (*Query) ProtoMessage() {}
func (*Query) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{4}
return fileDescriptor_eefc82927d57d89b, []int{7}
}
func (m *Query) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -368,7 +539,7 @@ func (m *QueryResult) Reset() { *m = QueryResult{} }
func (m *QueryResult) String() string { return proto.CompactTextString(m) }
func (*QueryResult) ProtoMessage() {}
func (*QueryResult) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{5}
return fileDescriptor_eefc82927d57d89b, []int{8}
}
func (m *QueryResult) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -421,7 +592,7 @@ func (m *ChunkedReadResponse) Reset() { *m = ChunkedReadResponse{} }
func (m *ChunkedReadResponse) String() string { return proto.CompactTextString(m) }
func (*ChunkedReadResponse) ProtoMessage() {}
func (*ChunkedReadResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{6}
return fileDescriptor_eefc82927d57d89b, []int{9}
}
func (m *ChunkedReadResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -467,7 +638,10 @@ func (m *ChunkedReadResponse) GetQueryIndex() int64 {
func init() {
proto.RegisterEnum("prometheus.ReadRequest_ResponseType", ReadRequest_ResponseType_name, ReadRequest_ResponseType_value)
proto.RegisterType((*WriteRequest)(nil), "prometheus.WriteRequest")
proto.RegisterType((*MinimizedWriteRequestPacking)(nil), "prometheus.MinimizedWriteRequestPacking")
proto.RegisterType((*MinimizedWriteRequest)(nil), "prometheus.MinimizedWriteRequest")
proto.RegisterType((*MinimizedWriteRequestFixed32)(nil), "prometheus.MinimizedWriteRequestFixed32")
proto.RegisterType((*MinimizedWriteRequestFixed64)(nil), "prometheus.MinimizedWriteRequestFixed64")
proto.RegisterType((*ReadRequest)(nil), "prometheus.ReadRequest")
proto.RegisterType((*ReadResponse)(nil), "prometheus.ReadResponse")
proto.RegisterType((*Query)(nil), "prometheus.Query")
@ -478,41 +652,45 @@ func init() {
func init() { proto.RegisterFile("remote.proto", fileDescriptor_eefc82927d57d89b) }
var fileDescriptor_eefc82927d57d89b = []byte{
// 543 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40,
0x10, 0xae, 0xeb, 0xb4, 0x09, 0xe3, 0x10, 0x99, 0x6d, 0x43, 0x4c, 0x0e, 0x49, 0x64, 0x71, 0x88,
0x54, 0x14, 0x44, 0xa8, 0x38, 0xf5, 0x40, 0x5a, 0x22, 0x95, 0x52, 0xf3, 0xb3, 0x09, 0x02, 0x21,
0x24, 0xcb, 0xb1, 0x47, 0x8d, 0x45, 0xfc, 0x53, 0xef, 0x5a, 0x6a, 0x38, 0xf3, 0x00, 0x3c, 0x13,
0xa7, 0x9e, 0x10, 0x4f, 0x80, 0x50, 0x9e, 0x04, 0x79, 0x6d, 0x87, 0x0d, 0x20, 0xc4, 0xcd, 0xfb,
0xfd, 0xcd, 0xec, 0xec, 0x18, 0xea, 0x09, 0x06, 0x11, 0xc7, 0x41, 0x9c, 0x44, 0x3c, 0x22, 0x10,
0x27, 0x51, 0x80, 0x7c, 0x8e, 0x29, 0x6b, 0x6b, 0x7c, 0x19, 0x23, 0xcb, 0x89, 0xf6, 0xfe, 0x45,
0x74, 0x11, 0x89, 0xcf, 0xfb, 0xd9, 0x57, 0x8e, 0x9a, 0x9f, 0x15, 0xa8, 0xbf, 0x49, 0x7c, 0x8e,
0x14, 0x2f, 0x53, 0x64, 0x9c, 0x1c, 0x01, 0x70, 0x3f, 0x40, 0x86, 0x89, 0x8f, 0xcc, 0x50, 0x7a,
0x6a, 0x5f, 0x1b, 0xde, 0x1e, 0xfc, 0x0a, 0x1d, 0x4c, 0xfd, 0x00, 0x27, 0x82, 0x3d, 0xae, 0x5c,
0x7f, 0xef, 0x6e, 0x51, 0x49, 0x4f, 0x8e, 0xa0, 0x16, 0x20, 0x77, 0x3c, 0x87, 0x3b, 0x86, 0x2a,
0xbc, 0x6d, 0xd9, 0x6b, 0x21, 0x4f, 0x7c, 0xd7, 0x2a, 0x14, 0x85, 0x7f, 0xed, 0x38, 0xab, 0xd4,
0xb6, 0x75, 0xd5, 0xfc, 0xa4, 0x40, 0xd3, 0xf2, 0x43, 0x3f, 0xf0, 0x3f, 0xa2, 0xb7, 0xd1, 0xdb,
0xf8, 0x2f, 0xbd, 0x75, 0x37, 0xf2, 0x4b, 0xdb, 0x3f, 0x9b, 0x34, 0xa0, 0xca, 0x96, 0xc1, 0x2c,
0x5a, 0x30, 0xa3, 0xd2, 0x53, 0xfa, 0x37, 0x68, 0x79, 0xcc, 0x1b, 0x38, 0xab, 0xd4, 0x54, 0xbd,
0x62, 0x7e, 0x55, 0x40, 0xa3, 0xe8, 0x78, 0x65, 0xf1, 0x03, 0xa8, 0x5e, 0xa6, 0x72, 0xe5, 0x5b,
0x72, 0xe5, 0x57, 0x29, 0x26, 0x4b, 0x5a, 0x2a, 0xc8, 0x7b, 0x68, 0x39, 0xae, 0x8b, 0x31, 0x47,
0xcf, 0x4e, 0x90, 0xc5, 0x51, 0xc8, 0xd0, 0x16, 0xaf, 0x61, 0x6c, 0xf7, 0xd4, 0x7e, 0x63, 0x78,
0x57, 0x36, 0x4b, 0x65, 0x06, 0xb4, 0x50, 0x4f, 0x97, 0x31, 0xd2, 0x66, 0x19, 0x22, 0xa3, 0xcc,
0x3c, 0x84, 0xba, 0x0c, 0x10, 0x0d, 0xaa, 0x93, 0x91, 0xf5, 0xf2, 0x7c, 0x3c, 0xd1, 0xb7, 0x48,
0x0b, 0xf6, 0x26, 0x53, 0x3a, 0x1e, 0x59, 0xe3, 0x27, 0xf6, 0xdb, 0x17, 0xd4, 0x3e, 0x39, 0x7d,
0xfd, 0xfc, 0xd9, 0x44, 0x57, 0xcc, 0x51, 0xe6, 0x72, 0xd6, 0x51, 0xe4, 0x01, 0x54, 0x13, 0x64,
0xe9, 0x82, 0x97, 0x17, 0x6a, 0xfd, 0x79, 0x21, 0xc1, 0xd3, 0x52, 0x67, 0x7e, 0x51, 0x60, 0x47,
0x10, 0xe4, 0x1e, 0x10, 0xc6, 0x9d, 0x84, 0xdb, 0x62, 0xae, 0xdc, 0x09, 0x62, 0x3b, 0xc8, 0x72,
0x94, 0xbe, 0x4a, 0x75, 0xc1, 0x4c, 0x4b, 0xc2, 0x62, 0xa4, 0x0f, 0x3a, 0x86, 0xde, 0xa6, 0x76,
0x5b, 0x68, 0x1b, 0x18, 0x7a, 0xb2, 0xf2, 0x10, 0x6a, 0x81, 0xc3, 0xdd, 0x39, 0x26, 0xac, 0x58,
0x20, 0x43, 0xee, 0xea, 0xdc, 0x99, 0xe1, 0xc2, 0xca, 0x05, 0x74, 0xad, 0x24, 0x07, 0xb0, 0x33,
0xf7, 0x43, 0x9e, 0xbf, 0xa7, 0x36, 0x6c, 0xfe, 0x3e, 0xdc, 0xd3, 0x8c, 0xa4, 0xb9, 0xc6, 0x1c,
0x83, 0x26, 0x5d, 0x8e, 0x3c, 0xfa, 0xff, 0x85, 0x97, 0xb7, 0xc8, 0xbc, 0x82, 0xbd, 0x93, 0x79,
0x1a, 0x7e, 0xc8, 0x1e, 0x47, 0x9a, 0xea, 0x63, 0x68, 0xb8, 0x39, 0x6c, 0x6f, 0x44, 0xde, 0x91,
0x23, 0x0b, 0x63, 0x91, 0x7a, 0xd3, 0x95, 0x8f, 0xa4, 0x0b, 0x5a, 0xb6, 0x46, 0x4b, 0xdb, 0x0f,
0x3d, 0xbc, 0x2a, 0xe6, 0x04, 0x02, 0x7a, 0x9a, 0x21, 0xc7, 0xfb, 0xd7, 0xab, 0x8e, 0xf2, 0x6d,
0xd5, 0x51, 0x7e, 0xac, 0x3a, 0xca, 0xbb, 0xdd, 0x2c, 0x37, 0x9e, 0xcd, 0x76, 0xc5, 0x0f, 0xfd,
0xf0, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x3e, 0xdc, 0x81, 0x0f, 0x04, 0x00, 0x00,
// 601 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x94, 0xdf, 0x4e, 0x13, 0x41,
0x14, 0xc6, 0x19, 0x5a, 0x68, 0x3d, 0x8b, 0x64, 0x1d, 0x40, 0x56, 0x62, 0x80, 0x6c, 0x8c, 0x69,
0x82, 0xa9, 0xb1, 0x34, 0x5c, 0x71, 0x21, 0x60, 0x0d, 0x22, 0xab, 0x38, 0xad, 0xd1, 0x18, 0x93,
0xcd, 0x76, 0xf7, 0x84, 0x4e, 0x60, 0xff, 0xb0, 0x33, 0x9b, 0x50, 0xaf, 0x7d, 0x00, 0xe3, 0x23,
0x79, 0xc5, 0x95, 0xf1, 0x09, 0x8c, 0xe1, 0x49, 0xcc, 0xfe, 0x83, 0xa9, 0xd6, 0x68, 0x9a, 0x78,
0xb7, 0x7b, 0xce, 0xf7, 0x7d, 0xfb, 0x9b, 0x33, 0x3b, 0x03, 0x73, 0x31, 0xfa, 0xa1, 0xc4, 0x66,
0x14, 0x87, 0x32, 0xa4, 0x10, 0xc5, 0xa1, 0x8f, 0x72, 0x80, 0x89, 0x58, 0xd1, 0xe4, 0x30, 0x42,
0x91, 0x37, 0x56, 0x16, 0x8f, 0xc3, 0xe3, 0x30, 0x7b, 0x7c, 0x98, 0x3e, 0xe5, 0x55, 0xf3, 0x13,
0x81, 0xb9, 0x37, 0x31, 0x97, 0xc8, 0xf0, 0x2c, 0x41, 0x21, 0xe9, 0x36, 0x80, 0xe4, 0x3e, 0x0a,
0x8c, 0x39, 0x0a, 0x83, 0xac, 0x57, 0x1a, 0x5a, 0xeb, 0x76, 0xf3, 0x3a, 0xb4, 0xd9, 0xe3, 0x3e,
0x76, 0xb3, 0xee, 0x6e, 0xf5, 0xe2, 0xfb, 0xda, 0x14, 0x53, 0xf4, 0x74, 0x1b, 0xea, 0x3e, 0x4a,
0xc7, 0x73, 0xa4, 0x63, 0x54, 0x32, 0xef, 0x8a, 0xea, 0xb5, 0x50, 0xc6, 0xdc, 0xb5, 0x0a, 0x45,
0xe1, 0xbf, 0x72, 0x1c, 0x54, 0xeb, 0xd3, 0x7a, 0xc5, 0xfc, 0x4c, 0xe0, 0xae, 0xc5, 0x03, 0xee,
0xf3, 0x0f, 0xe8, 0xa9, 0x6c, 0x47, 0x8e, 0x7b, 0xc2, 0x83, 0x63, 0x7a, 0x38, 0x06, 0xf1, 0xfe,
0xc8, 0x67, 0x4a, 0xf7, 0x35, 0x6b, 0xe1, 0x1d, 0x83, 0x6c, 0x40, 0x4d, 0x0c, 0xfd, 0x7e, 0x78,
0x2a, 0x8c, 0xea, 0x3a, 0x69, 0xdc, 0x60, 0xe5, 0x6b, 0x8e, 0x73, 0x50, 0xad, 0x57, 0xf4, 0xaa,
0xf9, 0x91, 0xc0, 0xd2, 0x58, 0x28, 0xda, 0x19, 0x43, 0xb3, 0xf6, 0x17, 0x9a, 0x89, 0x31, 0xfe,
0x38, 0x9b, 0xa7, 0xfc, 0x1c, 0xbd, 0xcd, 0xd6, 0x44, 0xb3, 0x29, 0xbc, 0xff, 0x09, 0x6a, 0xab,
0x3d, 0x39, 0xd4, 0x56, 0x7b, 0x62, 0xa8, 0xaf, 0x04, 0x34, 0x86, 0x8e, 0x57, 0x6e, 0xd3, 0x06,
0xd4, 0xce, 0x12, 0x15, 0xe0, 0x96, 0x0a, 0xf0, 0x2a, 0xc1, 0x78, 0xc8, 0x4a, 0x05, 0x7d, 0x0f,
0xcb, 0x8e, 0xeb, 0x62, 0x24, 0xd1, 0xb3, 0x63, 0x14, 0x51, 0x18, 0x08, 0xb4, 0xb3, 0xc3, 0x64,
0x4c, 0xaf, 0x57, 0x1a, 0xf3, 0xad, 0x7b, 0xaa, 0x59, 0xf9, 0x4c, 0x93, 0x15, 0xea, 0xde, 0x30,
0x42, 0xb6, 0x54, 0x86, 0xa8, 0x55, 0x61, 0xb6, 0x61, 0x4e, 0x2d, 0x50, 0x0d, 0x6a, 0xdd, 0x1d,
0xeb, 0xe8, 0xb0, 0xd3, 0xd5, 0xa7, 0xe8, 0x32, 0x2c, 0x74, 0x7b, 0xac, 0xb3, 0x63, 0x75, 0x9e,
0xd8, 0x6f, 0x5f, 0x32, 0x7b, 0x6f, 0xff, 0xf5, 0x8b, 0xe7, 0x5d, 0x9d, 0x98, 0x3b, 0xa9, 0xcb,
0xb9, 0x8a, 0xa2, 0x8f, 0xa0, 0x16, 0xa3, 0x48, 0x4e, 0x65, 0xb9, 0xa0, 0xe5, 0xdf, 0x17, 0x94,
0xf5, 0x59, 0xa9, 0x33, 0xbf, 0x10, 0x98, 0xc9, 0x1a, 0xf4, 0x01, 0x50, 0x21, 0x9d, 0x58, 0xda,
0xd9, 0x5c, 0xa5, 0xe3, 0x47, 0xb6, 0x9f, 0xe6, 0x90, 0x46, 0x85, 0xe9, 0x59, 0xa7, 0x57, 0x36,
0x2c, 0x41, 0x1b, 0xa0, 0x63, 0xe0, 0x8d, 0x6a, 0xa7, 0x33, 0xed, 0x3c, 0x06, 0x9e, 0xaa, 0x6c,
0x43, 0xdd, 0x77, 0xa4, 0x3b, 0xc0, 0x58, 0x14, 0xe7, 0xdf, 0x50, 0xa9, 0x0e, 0x9d, 0x3e, 0x9e,
0x5a, 0xb9, 0x80, 0x5d, 0x29, 0xe9, 0x06, 0xcc, 0x0c, 0x78, 0x20, 0xf3, 0xfd, 0xd4, 0x5a, 0x4b,
0xbf, 0x0e, 0x77, 0x3f, 0x6d, 0xb2, 0x5c, 0x63, 0x76, 0x40, 0x53, 0x16, 0x47, 0xb7, 0xfe, 0xfd,
0xbe, 0x52, 0xff, 0x22, 0xf3, 0x1c, 0x16, 0xf6, 0x06, 0x49, 0x70, 0x92, 0x6e, 0x8e, 0x32, 0xd5,
0xc7, 0x30, 0xef, 0xe6, 0x65, 0x7b, 0x24, 0xf2, 0x8e, 0x1a, 0x59, 0x18, 0x8b, 0xd4, 0x9b, 0xae,
0xfa, 0x4a, 0xd7, 0x40, 0x4b, 0x7f, 0xa3, 0xa1, 0xcd, 0x03, 0x0f, 0xcf, 0x8b, 0x39, 0x41, 0x56,
0x7a, 0x96, 0x56, 0x76, 0x17, 0x2f, 0x2e, 0x57, 0xc9, 0xb7, 0xcb, 0x55, 0xf2, 0xe3, 0x72, 0x95,
0xbc, 0x9b, 0x4d, 0x73, 0xa3, 0x7e, 0x7f, 0x36, 0xbb, 0x8f, 0x37, 0x7f, 0x06, 0x00, 0x00, 0xff,
0xff, 0x13, 0xe9, 0xfd, 0x50, 0xce, 0x05, 0x00, 0x00,
}
func (m *WriteRequest) Marshal() (dAtA []byte, err error) {
@ -570,6 +748,54 @@ func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
func (m *MinimizedWriteRequestPacking) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MinimizedWriteRequestPacking) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *MinimizedWriteRequestPacking) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Symbols) > 0 {
i -= len(m.Symbols)
copy(dAtA[i:], m.Symbols)
i = encodeVarintRemote(dAtA, i, uint64(len(m.Symbols)))
i--
dAtA[i] = 0x22
}
if len(m.Timeseries) > 0 {
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *MinimizedWriteRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@ -618,6 +844,102 @@ func (m *MinimizedWriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
func (m *MinimizedWriteRequestFixed32) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MinimizedWriteRequestFixed32) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *MinimizedWriteRequestFixed32) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Symbols) > 0 {
i -= len(m.Symbols)
copy(dAtA[i:], m.Symbols)
i = encodeVarintRemote(dAtA, i, uint64(len(m.Symbols)))
i--
dAtA[i] = 0x22
}
if len(m.Timeseries) > 0 {
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *MinimizedWriteRequestFixed64) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MinimizedWriteRequestFixed64) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *MinimizedWriteRequestFixed64) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Symbols) > 0 {
i -= len(m.Symbols)
copy(dAtA[i:], m.Symbols)
i = encodeVarintRemote(dAtA, i, uint64(len(m.Symbols)))
i--
dAtA[i] = 0x22
}
if len(m.Timeseries) > 0 {
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ReadRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@ -903,6 +1225,28 @@ func (m *WriteRequest) Size() (n int) {
return n
}
func (m *MinimizedWriteRequestPacking) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Timeseries) > 0 {
for _, e := range m.Timeseries {
l = e.Size()
n += 1 + l + sovRemote(uint64(l))
}
}
l = len(m.Symbols)
if l > 0 {
n += 1 + l + sovRemote(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *MinimizedWriteRequest) Size() (n int) {
if m == nil {
return 0
@ -925,6 +1269,50 @@ func (m *MinimizedWriteRequest) Size() (n int) {
return n
}
func (m *MinimizedWriteRequestFixed32) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Timeseries) > 0 {
for _, e := range m.Timeseries {
l = e.Size()
n += 1 + l + sovRemote(uint64(l))
}
}
l = len(m.Symbols)
if l > 0 {
n += 1 + l + sovRemote(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *MinimizedWriteRequestFixed64) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Timeseries) > 0 {
for _, e := range m.Timeseries {
l = e.Size()
n += 1 + l + sovRemote(uint64(l))
}
}
l = len(m.Symbols)
if l > 0 {
n += 1 + l + sovRemote(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ReadRequest) Size() (n int) {
if m == nil {
return 0
@ -1160,6 +1548,123 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *MinimizedWriteRequestPacking) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MinimizedWriteRequestPacking: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MinimizedWriteRequestPacking: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Timeseries = append(m.Timeseries, MinimizedTimeSeriesPacking{})
if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Symbols = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipRemote(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *MinimizedWriteRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@ -1277,6 +1782,240 @@ func (m *MinimizedWriteRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *MinimizedWriteRequestFixed32) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MinimizedWriteRequestFixed32: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MinimizedWriteRequestFixed32: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Timeseries = append(m.Timeseries, MinimizedTimeSeriesFixed32{})
if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Symbols = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipRemote(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *MinimizedWriteRequestFixed64) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MinimizedWriteRequestFixed64: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MinimizedWriteRequestFixed64: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Timeseries = append(m.Timeseries, MinimizedTimeSeriesFixed64{})
if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Symbols = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipRemote(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ReadRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0

View file

@ -27,6 +27,17 @@ message WriteRequest {
repeated prometheus.MetricMetadata metadata = 3 [(gogoproto.nullable) = false];
}
message MinimizedWriteRequestPacking {
repeated MinimizedTimeSeriesPacking timeseries = 1 [(gogoproto.nullable) = false];
// Cortex uses this field to determine the source of the write request.
// We reserve it to avoid any compatibility issues.
reserved 2;
// Metadata (3) has moved to be part of the TimeSeries type
reserved 3;
// The symbols table. All symbols are concatenated strings. To read the symbols table, it's required
// to know the offset:length range of the actual symbol to read from this string.
string symbols = 4;
}
message MinimizedWriteRequest {
repeated MinimizedTimeSeries timeseries = 1 [(gogoproto.nullable) = false];
@ -40,6 +51,30 @@ message MinimizedWriteRequest {
string symbols = 4;
}
message MinimizedWriteRequestFixed32 {
repeated MinimizedTimeSeriesFixed32 timeseries = 1 [(gogoproto.nullable) = false];
// Cortex uses this field to determine the source of the write request.
// We reserve it to avoid any compatibility issues.
reserved 2;
// Metadata (3) has moved to be part of the TimeSeries type
reserved 3;
// The symbols table. All symbols are concatenated strings. To read the symbols table, it's required
// to know the offset:length range of the actual symbol to read from this string.
string symbols = 4;
}
message MinimizedWriteRequestFixed64 {
repeated MinimizedTimeSeriesFixed64 timeseries = 1 [(gogoproto.nullable) = false];
// Cortex uses this field to determine the source of the write request.
// We reserve it to avoid any compatibility issues.
reserved 2;
// Metadata (3) has moved to be part of the TimeSeries type
reserved 3;
// The symbols table. All symbols are concatenated strings. To read the symbols table, it's required
// to know the offset:length range of the actual symbol to read from this string.
string symbols = 4;
}
// ReadRequest represents a remote read request.
message ReadRequest {
repeated Query queries = 1;

File diff suppressed because it is too large Load diff

View file

@ -144,6 +144,43 @@ message MinimizedTimeSeries {
// TODO: add metadata
}
// based on an experiment by marco
message MinimizedTimeSeriesFixed32 {
// Sorted list of label name-value pair references. This list's len is always multiple of 4,
// packing tuples of (label name offset, label name length, label value offset, label value length).
repeated fixed32 label_symbols = 1 [(gogoproto.nullable) = false];
// Sorted by time, oldest sample first.
// TODO: support references for other types
repeated Sample samples = 2 [(gogoproto.nullable) = false];
repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
}
// based on an experiment by marco
message MinimizedTimeSeriesFixed64 {
// Sorted list of label name-value pair references. usespackref64
repeated fixed64 label_symbols = 1 [(gogoproto.nullable) = false];
// Sorted by time, oldest sample first.
// TODO: support references for other types
repeated Sample samples = 2 [(gogoproto.nullable) = false];
repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
}
// based on an experiment by marco
message MinimizedTimeSeriesPacking {
// Sorted list of label name-value pair references. uses packref
repeated fixed32 label_symbols = 1 [(gogoproto.nullable) = false];
// Sorted by time, oldest sample first.
// TODO: support references for other types
repeated Sample samples = 2 [(gogoproto.nullable) = false];
repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
}
message Label {
string name = 1;
string value = 2;

View file

@ -10,10 +10,10 @@ if ! [[ "$0" =~ "scripts/genproto.sh" ]]; then
exit 255
fi
if ! [[ $(protoc --version) =~ "3.21.12" ]]; then
echo "could not find protoc 3.21.12, is it installed + in PATH?"
exit 255
fi
#if ! [[ $(protoc --version) =~ "3.21.12" ]]; then
# echo "could not find protoc 3.21.12, is it installed + in PATH?"
# exit 255
#fi
# Since we run go install, go mod download, the go.sum will change.
# Make a backup.

View file

@ -7,8 +7,10 @@ trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT
declare -a INSTANCES
# (sender,receiver) pairs to run: (sender_name; sender_flags; receiver_name; receiver_flags)
INSTANCES+=('sender-v1;;receiver-v1;')
INSTANCES+=('sender-v11;--enable-feature rw-1-1-sender;receiver-v11;--enable-feature rw-1-1-receiver')
INSTANCES+=('sender-v11;--remote-write-format 1;receiver-v11;--remote-write-format 1')
INSTANCES+=('sender-v11-min32-optimized-varint;--remote-write-format 2;receiver-v11-min32-optimized-varint;--remote-write-format 2')
INSTANCES+=('sender-v11-min64-fixed;--remote-write-format 3;receiver-v11-min64-fixed;--remote-write-format 3')
INSTANCES+=('sender-v11-min32-fixed;--remote-write-format 4;receiver-v11-min32-fixed;--remote-write-format 4')
# ~~~~~~~~~~~~~
# append two ports to all instances

View file

@ -1,5 +1,5 @@
global:
scrape_interval: 15s
scrape_interval: 5s
external_labels:
role: ${SENDER_NAME}
@ -8,6 +8,8 @@ remote_write:
name: ${RECEIVER_NAME}
metadata_config:
send: false
queue_config:
max_samples_per_send: 5000
scrape_configs:
${SCRAPE_CONFIGS}

View file

@ -81,11 +81,11 @@ func init() {
// Client allows reading and writing from/to a remote HTTP endpoint.
type Client struct {
remoteName string // Used to differentiate clients in metrics.
urlString string // url.String()
remoteWrite11 bool // For write clients, ignored for read clients.
Client *http.Client
timeout time.Duration
remoteName string // Used to differentiate clients in metrics.
urlString string // url.String()
rwFormat RemoteWriteFormat // For write clients, ignored for read clients.
Client *http.Client
timeout time.Duration
retryOnRateLimit bool
@ -96,14 +96,14 @@ type Client struct {
// ClientConfig configures a client.
type ClientConfig struct {
URL *config_util.URL
RemoteWrite11 bool
Timeout model.Duration
HTTPClientConfig config_util.HTTPClientConfig
SigV4Config *sigv4.SigV4Config
AzureADConfig *azuread.AzureADConfig
Headers map[string]string
RetryOnRateLimit bool
URL *config_util.URL
RemoteWriteFormat RemoteWriteFormat
Timeout model.Duration
HTTPClientConfig config_util.HTTPClientConfig
SigV4Config *sigv4.SigV4Config
AzureADConfig *azuread.AzureADConfig
Headers map[string]string
RetryOnRateLimit bool
}
// ReadClient uses the SAMPLES method of remote read to read series samples from remote server.
@ -165,7 +165,7 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
httpClient.Transport = otelhttp.NewTransport(t)
return &Client{
remoteWrite11: conf.RemoteWrite11,
rwFormat: conf.RemoteWriteFormat,
remoteName: name,
urlString: conf.URL.String(),
Client: httpClient,
@ -211,11 +211,11 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
httpReq.Header.Set("Content-Type", "application/x-protobuf")
httpReq.Header.Set("User-Agent", UserAgent)
// Set the right header if we're using v1.1 remote write protocol
if c.remoteWrite11 {
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
} else {
if c.rwFormat == Base1 {
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion1HeaderValue)
} else {
// Set the right header if we're using v1.1 remote write protocol
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
}
if attempt > 0 {

View file

@ -794,6 +794,24 @@ func labelsToUint32Slice(lbls labels.Labels, symbolTable *rwSymbolTable, buf []u
return result
}
func labelsToUint64Slice(lbls labels.Labels, symbolTable *rwSymbolTable, buf []uint64) []uint64 {
result := buf[:0]
lbls.Range(func(l labels.Label) {
result = append(result, symbolTable.Ref64Packed(l.Name))
result = append(result, symbolTable.Ref64Packed(l.Value))
})
return result
}
func labelsToUint32Packed(lbls labels.Labels, symbolTable *rwSymbolTable, buf []uint32) []uint32 {
result := buf[:0]
lbls.Range(func(l labels.Label) {
result = append(result, symbolTable.Ref32Packed(l.Name))
result = append(result, symbolTable.Ref32Packed(l.Value))
})
return result
}
func Uint32RefToLabels(symbols string, minLabels []uint32) labels.Labels {
ls := labels.NewScratchBuilder(len(minLabels) / 2)
@ -817,6 +835,23 @@ func Uint32RefToLabels(symbols string, minLabels []uint32) labels.Labels {
return ls.Labels()
}
func Uint64RefToLabels(symbols string, minLabels []uint64) labels.Labels {
ls := labels.NewScratchBuilder(len(minLabels) / 2)
labelIdx := 0
for labelIdx < len(minLabels) {
// todo, check for overflow?
offset, length := unpackRef64(minLabels[labelIdx])
name := symbols[offset : offset+length]
// todo, check for overflow?
offset, length = unpackRef64(minLabels[labelIdx+1])
value := symbols[offset : offset+length]
ls.Add(name, value)
labelIdx += 2
}
return ls.Labels()
}
// metricTypeToMetricTypeProto transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum.
func metricTypeToMetricTypeProto(t textparse.MetricType) prompb.MetricMetadata_MetricType {
mt := strings.ToUpper(string(t))
@ -923,6 +958,44 @@ func DecodeMinimizedWriteRequest(r io.Reader) (*prompb.MinimizedWriteRequest, er
return &req, nil
}
func DecodeMinimizedWriteRequestFixed64(r io.Reader) (*prompb.MinimizedWriteRequestFixed64, error) {
compressed, err := io.ReadAll(r)
if err != nil {
return nil, err
}
reqBuf, err := snappy.Decode(nil, compressed)
if err != nil {
return nil, err
}
var req prompb.MinimizedWriteRequestFixed64
if err := proto.Unmarshal(reqBuf, &req); err != nil {
return nil, err
}
return &req, nil
}
func DecodeMinimizedWriteRequestFixed32(r io.Reader) (*prompb.MinimizedWriteRequestFixed32, error) {
compressed, err := io.ReadAll(r)
if err != nil {
return nil, err
}
reqBuf, err := snappy.Decode(nil, compressed)
if err != nil {
return nil, err
}
var req prompb.MinimizedWriteRequestFixed32
if err := proto.Unmarshal(reqBuf, &req); err != nil {
return nil, err
}
return &req, nil
}
func MinimizedWriteRequestToWriteRequest(redReq *prompb.MinimizedWriteRequest) (*prompb.WriteRequest, error) {
req := &prompb.WriteRequest{
Timeseries: make([]prompb.TimeSeries, len(redReq.Timeseries)),
@ -952,6 +1025,44 @@ func MinimizedWriteRequestToWriteRequest(redReq *prompb.MinimizedWriteRequest) (
return req, nil
}
// helper for tests
func min64WriteRequestToWriteRequest(redReq *prompb.MinimizedWriteRequestFixed64) (*prompb.WriteRequest, error) {
req := &prompb.WriteRequest{
Timeseries: make([]prompb.TimeSeries, len(redReq.Timeseries)),
//Metadata: redReq.Metadata,
}
for i, rts := range redReq.Timeseries {
lbls := Uint64RefToLabels(redReq.Symbols, rts.LabelSymbols)
ls := make([]prompb.Label, len(lbls))
for j, l := range lbls {
ls[j].Name = l.Name
ls[j].Value = l.Value
}
exemplars := make([]prompb.Exemplar, len(rts.Exemplars))
// TODO handle exemplars
//for j, e := range rts.Exemplars {
// exemplars[j].Value = e.Value
// exemplars[j].Timestamp = e.Timestamp
// exemplars[j].Labels = make([]prompb.Label, len(e.Labels))
//
// for k, l := range e.Labels {
// exemplars[j].Labels[k].Name = redReq.StringSymbolTable[l.NameRef]
// exemplars[j].Labels[k].Value = redReq.StringSymbolTable[l.ValueRef]
// }
//}
req.Timeseries[i].Labels = ls
req.Timeseries[i].Samples = rts.Samples
req.Timeseries[i].Exemplars = exemplars
req.Timeseries[i].Histograms = rts.Histograms
}
return req, nil
}
// for use with minimized remote write proto format
func packRef(offset, length int) uint32 {
return uint32((offset&0xFFFFF)<<12 | (length & 0xFFF))
@ -960,3 +1071,12 @@ func packRef(offset, length int) uint32 {
func unpackRef(ref uint32) (offset, length int) {
return int(ref>>12) & 0xFFFFF, int(ref & 0xFFF)
}
// for use with minimized remote write proto format
func packRef64(offset, length uint32) uint64 {
return (uint64(offset) << 32) | uint64(length)
}
func unpackRef64(ref uint64) (offset, length uint32) {
return uint32(ref >> 32), uint32(ref & 0xFFFFFFFF)
}

View file

@ -107,6 +107,39 @@ var writeRequestMinimizedFixture = func() *prompb.MinimizedWriteRequest {
}
}()
var writeRequestMinimized64Fixture = func() *prompb.MinimizedWriteRequestFixed64 {
st := newRwSymbolTable()
labels := []uint64{}
for _, s := range []string{
"__name__", "test_metric1",
"b", "c",
"baz", "qux",
"d", "e",
"foo", "bar",
} {
ref := st.Ref64Packed(s)
labels = append(labels, ref)
}
return &prompb.MinimizedWriteRequestFixed64{
Timeseries: []prompb.MinimizedTimeSeriesFixed64{
{
LabelSymbols: labels,
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}},
Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat())},
},
{
LabelSymbols: labels,
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}},
Histograms: []prompb.Histogram{HistogramToHistogramProto(2, &testHistogram), FloatHistogramToHistogramProto(3, testHistogram.ToFloat())},
},
},
Symbols: st.LabelsString(),
}
}()
func TestValidateLabelsAndMetricName(t *testing.T) {
tests := []struct {
input []prompb.Label
@ -568,13 +601,6 @@ func TestDecodeMinWriteRequest(t *testing.T) {
require.Equal(t, writeRequestMinimizedFixture, actual)
}
func TestMinimizedWriteRequestToWriteRequest(t *testing.T) {
actual, err := MinimizedWriteRequestToWriteRequest(writeRequestMinimizedFixture)
require.NoError(t, err)
require.Equal(t, writeRequestFixture, actual)
}
func TestNilHistogramProto(t *testing.T) {
// This function will panic if it impromperly handles nil
// values, causing the test to fail.
@ -893,3 +919,16 @@ func (c *mockChunkIterator) Next() bool {
func (c *mockChunkIterator) Err() error {
return nil
}
func TestPackRef64(t *testing.T) {
rw := newRwSymbolTable()
name := "__name__"
value := "asdfasd"
nameRef := rw.Ref64Packed(name)
valRef := rw.Ref64Packed(value)
nameOffset, nameLength := unpackRef64(nameRef)
valOffset, valLength := unpackRef64(valRef)
require.Equal(t, name, string(rw.symbols[nameOffset:nameOffset+nameLength]))
require.Equal(t, value, string(rw.symbols[valOffset:valOffset+valLength]))
}

View file

@ -389,6 +389,15 @@ type WriteClient interface {
Endpoint() string
}
type RemoteWriteFormat int64
const (
Base1 RemoteWriteFormat = iota // original map based format
Min32Optimized // two 32bit varint plus marshalling optimization
Min64Fixed // a single fixed64 bit value, first 32 are offset and 2nd 32 are
Min32Fixed // two 32bit fixed, similar to optimized but not varints + no manual marshalling optimization
)
// QueueManager manages a queue of samples to be sent to the Storage
// indicated by the provided WriteClient. Implements writeTo interface
// used by WAL Watcher.
@ -406,7 +415,7 @@ type QueueManager struct {
watcher *wlog.Watcher
metadataWatcher *MetadataWatcher
// experimental feature, new remote write proto format
internFormat bool
rwFormat RemoteWriteFormat
clientMtx sync.RWMutex
storeClient WriteClient
@ -454,7 +463,7 @@ func NewQueueManager(
sm ReadyScrapeManager,
enableExemplarRemoteWrite bool,
enableNativeHistogramRemoteWrite bool,
internFormat bool,
rwFormat RemoteWriteFormat,
) *QueueManager {
if logger == nil {
logger = log.NewNopLogger()
@ -477,7 +486,7 @@ func NewQueueManager(
storeClient: client,
sendExemplars: enableExemplarRemoteWrite,
sendNativeHistograms: enableNativeHistogramRemoteWrite,
internFormat: internFormat,
rwFormat: rwFormat,
seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels),
seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int),
@ -1276,7 +1285,6 @@ func (q *queue) Chan() <-chan []timeSeries {
func (q *queue) Batch() []timeSeries {
q.batchMtx.Lock()
defer q.batchMtx.Unlock()
select {
case batch := <-q.batchQueue:
return batch
@ -1363,6 +1371,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
max += int(float64(max) * 0.1)
}
// TODO we should make an interface for the timeseries type
batchQueue := queue.Chan()
pendingData := make([]prompb.TimeSeries, max)
for i := range pendingData {
@ -1377,6 +1386,16 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
pendingMinimizedData[i].Samples = []prompb.Sample{{}}
}
pendingMin64Data := make([]prompb.MinimizedTimeSeriesFixed64, max)
for i := range pendingMin64Data {
pendingMin64Data[i].Samples = []prompb.Sample{{}}
}
pendingMin32Data := make([]prompb.MinimizedTimeSeriesFixed32, max)
for i := range pendingMin32Data {
pendingMin32Data[i].Samples = []prompb.Sample{{}}
}
timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline))
stop := func() {
if !timer.Stop() {
@ -1411,17 +1430,28 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
if !ok {
return
}
if s.qm.internFormat {
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
symbolTable.clear()
} else {
switch s.qm.rwFormat {
case Base1:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
case Min32Optimized:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
symbolTable.clear()
case Min64Fixed:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeriesFixed64(&symbolTable, batch, pendingMin64Data, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendMin64Samples(ctx, pendingMin64Data[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
symbolTable.clear()
case Min32Fixed:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeriesFixed32(&symbolTable, batch, pendingMin32Data, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendMin32Samples(ctx, pendingMin32Data[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
symbolTable.clear()
}
queue.ReturnForReuse(batch)
stop()
@ -1430,18 +1460,27 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
case <-timer.C:
batch := queue.Batch()
if len(batch) > 0 {
if s.qm.internFormat {
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
symbolTable.clear()
} else {
switch s.qm.rwFormat {
case Base1:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
case Min32Optimized:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
symbolTable.clear()
case Min64Fixed:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeriesFixed64(&symbolTable, batch, pendingMin64Data, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
s.sendMin64Samples(ctx, pendingMin64Data[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
symbolTable.clear()
}
}
queue.ReturnForReuse(batch)
@ -1514,6 +1553,30 @@ func (s *shards) sendMinSamples(ctx context.Context, samples []prompb.MinimizedT
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, time.Since(begin))
}
func (s *shards) sendMin64Samples(ctx context.Context, samples []prompb.MinimizedTimeSeriesFixed64, labels string, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) {
begin := time.Now()
// Build the ReducedWriteRequest with no metadata.
// Failing to build the write request is non-recoverable, since it will
// only error if marshaling the proto to bytes fails.
req, highest, err := buildMinimizedWriteRequestFixed64(samples, labels, pBuf, buf)
if err == nil {
err = s.sendSamplesWithBackoff(ctx, req, sampleCount, exemplarCount, histogramCount, highest)
}
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, time.Since(begin))
}
func (s *shards) sendMin32Samples(ctx context.Context, samples []prompb.MinimizedTimeSeriesFixed32, labels string, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) {
begin := time.Now()
// Build the ReducedWriteRequest with no metadata.
// Failing to build the write request is non-recoverable, since it will
// only error if marshaling the proto to bytes fails.
req, highest, err := buildMinimizedWriteRequestFixed32(samples, labels, pBuf, buf)
if err == nil {
err = s.sendSamplesWithBackoff(ctx, req, sampleCount, exemplarCount, histogramCount, highest)
}
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, time.Since(begin))
}
func (s *shards) updateMetrics(ctx context.Context, err error, sampleCount, exemplarCount, histogramCount int, duration time.Duration) {
if err != nil {
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err)
@ -1638,6 +1701,156 @@ func populateMinimizedTimeSeries(symbolTable *rwSymbolTable, batch []timeSeries,
return nPendingSamples, nPendingExemplars, nPendingHistograms
}
func populateMinimizedTimeSeriesFixed64(symbolTable *rwSymbolTable, batch []timeSeries, pendingData []prompb.MinimizedTimeSeriesFixed64, sendExemplars, sendNativeHistograms bool) (int, int, int) {
var nPendingSamples, nPendingExemplars, nPendingHistograms int
for nPending, d := range batch {
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
if sendExemplars {
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
}
if sendNativeHistograms {
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
}
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
// pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
pendingData[nPending].LabelSymbols = labelsToUint64Slice(d.seriesLabels, symbolTable, pendingData[nPending].LabelSymbols)
switch d.sType {
case tSample:
pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
Value: d.value,
Timestamp: d.timestamp,
})
nPendingSamples++
// TODO: handle all types
//case tExemplar:
// // TODO(npazosmendez) optimize?
// l := make([]prompb.LabelRef, 0, d.exemplarLabels.Len())
// d.exemplarLabels.Range(func(el labels.Label) {
// nRef := pool.intern(el.Name)
// vRef := pool.intern(el.Value)
// l = append(l, prompb.LabelRef{NameRef: nRef, ValueRef: vRef})
// })
// pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.ExemplarRef{
// Labels: l,
// Value: d.value,
// Timestamp: d.timestamp,
// })
// nPendingExemplars++
case tHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram))
nPendingHistograms++
case tFloatHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, FloatHistogramToHistogramProto(d.timestamp, d.floatHistogram))
nPendingHistograms++
}
}
return nPendingSamples, nPendingExemplars, nPendingHistograms
}
func populateMinimizedTimeSeriesFixed32(symbolTable *rwSymbolTable, batch []timeSeries, pendingData []prompb.MinimizedTimeSeriesFixed32, sendExemplars, sendNativeHistograms bool) (int, int, int) {
var nPendingSamples, nPendingExemplars, nPendingHistograms int
for nPending, d := range batch {
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
if sendExemplars {
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
}
if sendNativeHistograms {
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
}
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
// pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
pendingData[nPending].LabelSymbols = labelsToUint32Slice(d.seriesLabels, symbolTable, pendingData[nPending].LabelSymbols)
switch d.sType {
case tSample:
pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
Value: d.value,
Timestamp: d.timestamp,
})
nPendingSamples++
// TODO: handle all types
//case tExemplar:
// // TODO(npazosmendez) optimize?
// l := make([]prompb.LabelRef, 0, d.exemplarLabels.Len())
// d.exemplarLabels.Range(func(el labels.Label) {
// nRef := pool.intern(el.Name)
// vRef := pool.intern(el.Value)
// l = append(l, prompb.LabelRef{NameRef: nRef, ValueRef: vRef})
// })
// pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.ExemplarRef{
// Labels: l,
// Value: d.value,
// Timestamp: d.timestamp,
// })
// nPendingExemplars++
case tHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram))
nPendingHistograms++
case tFloatHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, FloatHistogramToHistogramProto(d.timestamp, d.floatHistogram))
nPendingHistograms++
}
}
return nPendingSamples, nPendingExemplars, nPendingHistograms
}
func populateMinimizedTimeSeriesPacking(symbolTable *rwSymbolTable, batch []timeSeries, pendingData []prompb.MinimizedTimeSeriesPacking, sendExemplars, sendNativeHistograms bool) (int, int, int) {
var nPendingSamples, nPendingExemplars, nPendingHistograms int
for nPending, d := range batch {
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
if sendExemplars {
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
}
if sendNativeHistograms {
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
}
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
// pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
pendingData[nPending].LabelSymbols = labelsToUint32Packed(d.seriesLabels, symbolTable, pendingData[nPending].LabelSymbols)
switch d.sType {
case tSample:
pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
Value: d.value,
Timestamp: d.timestamp,
})
nPendingSamples++
// TODO: handle all types
//case tExemplar:
// // TODO(npazosmendez) optimize?
// l := make([]prompb.LabelRef, 0, d.exemplarLabels.Len())
// d.exemplarLabels.Range(func(el labels.Label) {
// nRef := pool.intern(el.Name)
// vRef := pool.intern(el.Value)
// l = append(l, prompb.LabelRef{NameRef: nRef, ValueRef: vRef})
// })
// pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.ExemplarRef{
// Labels: l,
// Value: d.value,
// Timestamp: d.timestamp,
// })
// nPendingExemplars++
case tHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram))
nPendingHistograms++
case tFloatHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, FloatHistogramToHistogramProto(d.timestamp, d.floatHistogram))
nPendingHistograms++
}
}
return nPendingSamples, nPendingExemplars, nPendingHistograms
}
func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l log.Logger, attempt func(int) error, onRetry func()) error {
backoff := cfg.MinBackoff
sleepDuration := model.Duration(0)
@ -1742,13 +1955,17 @@ type offLenPair struct {
}
type rwSymbolTable struct {
symbols []byte
symbolsMap map[string]offLenPair
symbols []byte
symbolsMap map[string]offLenPair
symbolsMap64Packed map[string]uint64
symbolsMap32Packed map[string]uint32
}
func newRwSymbolTable() rwSymbolTable {
return rwSymbolTable{
symbolsMap: make(map[string]offLenPair),
symbolsMap: make(map[string]offLenPair),
symbolsMap64Packed: make(map[string]uint64),
symbolsMap32Packed: make(map[string]uint32),
}
}
@ -1757,11 +1974,41 @@ func (r *rwSymbolTable) Ref(str string) (off uint32, leng uint32) {
return offlen.Off, offlen.Len
}
off, leng = uint32(len(r.symbols)), uint32(len(str))
if int(off) > len(r.symbols) {
panic(1)
}
r.symbols = append(r.symbols, str...)
if len(r.symbols) < int(off+leng) {
panic(2)
}
r.symbolsMap[str] = offLenPair{off, leng}
return
}
func (r *rwSymbolTable) Ref64Packed(str string) uint64 {
// todo, check for overflowing the uint32 based on documented format?
if ref, ok := r.symbolsMap64Packed[str]; ok {
return ref
}
if len(r.symbols) > math.MaxUint32 || len(str) > math.MaxUint32 || len(str)+len(r.symbols) > math.MaxUint32 {
panic(1)
}
r.symbolsMap64Packed[str] = packRef64(uint32(len(r.symbols)), uint32(len(str)))
r.symbols = append(r.symbols, str...)
return r.symbolsMap64Packed[str]
}
func (r *rwSymbolTable) Ref32Packed(str string) uint32 {
// todo, check for overflowing the uint32 based on documented format?
if ref, ok := r.symbolsMap32Packed[str]; ok {
return ref
}
r.symbolsMap32Packed[str] = packRef(len(r.symbols), len(str))
r.symbols = append(r.symbols, str...)
return r.symbolsMap32Packed[str]
}
func (r *rwSymbolTable) LabelsString() string {
return *((*string)(unsafe.Pointer(&r.symbols)))
}
@ -1770,6 +2017,12 @@ func (r *rwSymbolTable) clear() {
for k := range r.symbolsMap {
delete(r.symbolsMap, k)
}
for k := range r.symbolsMap64Packed {
delete(r.symbolsMap64Packed, k)
}
for k := range r.symbolsMap32Packed {
delete(r.symbolsMap32Packed, k)
}
r.symbols = r.symbols[:0]
}
@ -1817,3 +2070,141 @@ func buildMinimizedWriteRequest(samples []prompb.MinimizedTimeSeries, labels str
}
return compressed, highest, nil
}
func buildMinimizedWriteRequestFixed64(samples []prompb.MinimizedTimeSeriesFixed64, labels string, pBuf *proto.Buffer, buf *[]byte) ([]byte, int64, error) {
var highest int64
for _, ts := range samples {
// At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
if len(ts.Samples) > 0 && ts.Samples[0].Timestamp > highest {
highest = ts.Samples[0].Timestamp
}
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest {
highest = ts.Exemplars[0].Timestamp
}
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp > highest {
highest = ts.Histograms[0].Timestamp
}
}
req := &prompb.MinimizedWriteRequestFixed64{
Symbols: labels,
Timeseries: samples,
}
if pBuf == nil {
pBuf = proto.NewBuffer(nil) // For convenience in tests. Not efficient.
} else {
pBuf.Reset()
}
err := pBuf.Marshal(req)
if err != nil {
return nil, 0, err
}
// snappy uses len() to see if it needs to allocate a new slice. Make the
// buffer as long as possible.
if buf != nil {
*buf = (*buf)[0:cap(*buf)]
} else {
buf = &[]byte{}
}
compressed := snappy.Encode(*buf, pBuf.Bytes())
if n := snappy.MaxEncodedLen(len(pBuf.Bytes())); buf != nil && n > len(*buf) {
// grow the buffer for the next time
*buf = make([]byte, n)
}
return compressed, highest, nil
}
func buildMinimizedWriteRequestFixed32(samples []prompb.MinimizedTimeSeriesFixed32, labels string, pBuf *proto.Buffer, buf *[]byte) ([]byte, int64, error) {
var highest int64
for _, ts := range samples {
// At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
if len(ts.Samples) > 0 && ts.Samples[0].Timestamp > highest {
highest = ts.Samples[0].Timestamp
}
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest {
highest = ts.Exemplars[0].Timestamp
}
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp > highest {
highest = ts.Histograms[0].Timestamp
}
}
req := &prompb.MinimizedWriteRequestFixed32{
Symbols: labels,
Timeseries: samples,
}
if pBuf == nil {
pBuf = proto.NewBuffer(nil) // For convenience in tests. Not efficient.
} else {
pBuf.Reset()
}
err := pBuf.Marshal(req)
if err != nil {
return nil, 0, err
}
// snappy uses len() to see if it needs to allocate a new slice. Make the
// buffer as long as possible.
if buf != nil {
*buf = (*buf)[0:cap(*buf)]
} else {
buf = &[]byte{}
}
compressed := snappy.Encode(*buf, pBuf.Bytes())
if n := snappy.MaxEncodedLen(len(pBuf.Bytes())); buf != nil && n > len(*buf) {
// grow the buffer for the next time
*buf = make([]byte, n)
}
return compressed, highest, nil
}
func buildMinimizedWriteRequestPacking(samples []prompb.MinimizedTimeSeriesPacking, labels string, pBuf *proto.Buffer, buf *[]byte) ([]byte, int64, error) {
var highest int64
for _, ts := range samples {
// At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
if len(ts.Samples) > 0 && ts.Samples[0].Timestamp > highest {
highest = ts.Samples[0].Timestamp
}
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest {
highest = ts.Exemplars[0].Timestamp
}
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp > highest {
highest = ts.Histograms[0].Timestamp
}
}
req := &prompb.MinimizedWriteRequestPacking{
Symbols: labels,
Timeseries: samples,
}
if pBuf == nil {
pBuf = proto.NewBuffer(nil) // For convenience in tests. Not efficient.
} else {
pBuf.Reset()
}
err := pBuf.Marshal(req)
if err != nil {
return nil, 0, err
}
// snappy uses len() to see if it needs to allocate a new slice. Make the
// buffer as long as possible.
if buf != nil {
*buf = (*buf)[0:cap(*buf)]
} else {
buf = &[]byte{}
}
compressed := snappy.Encode(*buf, pBuf.Bytes())
if n := snappy.MaxEncodedLen(len(pBuf.Bytes())); buf != nil && n > len(*buf) {
// grow the buffer for the next time
*buf = make([]byte, n)
}
return compressed, highest, nil
}

View file

@ -67,19 +67,25 @@ func TestSampleDelivery(t *testing.T) {
exemplars bool
histograms bool
floatHistograms bool
remoteWrite11 bool
rwFormat RemoteWriteFormat
}{
{samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"},
{samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"},
{samples: false, exemplars: true, histograms: false, floatHistograms: false, name: "exemplars only"},
{samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"},
{samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"},
//{samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"},
//{samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"},
//{samples: false, exemplars: true, histograms: false, floatHistograms: false, name: "exemplars only"},
//{samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"},
//{samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"},
{remoteWrite11: true, samples: true, exemplars: false, histograms: false, name: "interned samples only"},
{remoteWrite11: true, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "interned samples, exemplars, and histograms"},
{remoteWrite11: true, samples: false, exemplars: true, histograms: false, name: "interned exemplars only"},
{remoteWrite11: true, samples: false, exemplars: false, histograms: true, name: "interned histograms only"},
{remoteWrite11: true, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "interned float histograms only"},
{rwFormat: Min32Optimized, samples: true, exemplars: false, histograms: false, name: "interned samples only"},
//{rwFormat: Min32Optimized, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "interned samples, exemplars, and histograms"},
//{rwFormat: Min32Optimized, samples: false, exemplars: true, histograms: false, name: "interned exemplars only"},
//{rwFormat: Min32Optimized, samples: false, exemplars: false, histograms: true, name: "interned histograms only"},
//{rwFormat: Min32Optimized, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "interned float histograms only"},
//{rwFormat: Min64Fixed, samples: true, exemplars: false, histograms: false, name: "interned samples only"},
//{rwFormat: Min64Fixed, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "interned samples, exemplars, and histograms"},
//{rwFormat: Min64Fixed, samples: false, exemplars: true, histograms: false, name: "interned exemplars only"},
//{rwFormat: Min64Fixed, samples: false, exemplars: false, histograms: true, name: "interned histograms only"},
//{rwFormat: Min64Fixed, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "interned float histograms only"},
}
// Let's create an even number of send batches so we don't run into the
@ -106,30 +112,30 @@ func TestSampleDelivery(t *testing.T) {
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, tc.remoteWrite11)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, tc.rwFormat)
defer s.Close()
var (
series []record.RefSeries
samples []record.RefSample
exemplars []record.RefExemplar
histograms []record.RefHistogramSample
floatHistograms []record.RefFloatHistogramSample
series []record.RefSeries
samples []record.RefSample
//exemplars []record.RefExemplar
//histograms []record.RefHistogramSample
//floatHistograms []record.RefFloatHistogramSample
)
// Generates same series in both cases.
if tc.samples {
samples, series = createTimeseries(n, n)
}
if tc.exemplars {
exemplars, series = createExemplars(n, n)
}
if tc.histograms {
histograms, _, series = createHistograms(n, n, false)
}
if tc.floatHistograms {
_, floatHistograms, series = createHistograms(n, n, true)
}
//if tc.exemplars {
// exemplars, series = createExemplars(n, n)
//}
//if tc.histograms {
// histograms, _, series = createHistograms(n, n, false)
//}
//if tc.floatHistograms {
// _, floatHistograms, series = createHistograms(n, n, true)
//}
// Apply new config.
queueConfig.Capacity = len(samples)
@ -139,38 +145,38 @@ func TestSampleDelivery(t *testing.T) {
require.NoError(t, err)
qm := s.rws.queues[hash]
c := NewTestWriteClient(tc.remoteWrite11)
c := NewTestWriteClient(tc.rwFormat)
qm.SetClient(c)
qm.StoreSeries(series, 0)
// Send first half of data.
c.expectSamples(samples[:len(samples)/2], series)
c.expectExemplars(exemplars[:len(exemplars)/2], series)
c.expectHistograms(histograms[:len(histograms)/2], series)
c.expectFloatHistograms(floatHistograms[:len(floatHistograms)/2], series)
//c.expectExemplars(exemplars[:len(exemplars)/2], series)
//c.expectHistograms(histograms[:len(histograms)/2], series)
//c.expectFloatHistograms(floatHistograms[:len(floatHistograms)/2], series)
qm.Append(samples[:len(samples)/2])
qm.AppendExemplars(exemplars[:len(exemplars)/2])
qm.AppendHistograms(histograms[:len(histograms)/2])
qm.AppendFloatHistograms(floatHistograms[:len(floatHistograms)/2])
//qm.AppendExemplars(exemplars[:len(exemplars)/2])
//qm.AppendHistograms(histograms[:len(histograms)/2])
//qm.AppendFloatHistograms(floatHistograms[:len(floatHistograms)/2])
c.waitForExpectedData(t)
// Send second half of data.
c.expectSamples(samples[len(samples)/2:], series)
c.expectExemplars(exemplars[len(exemplars)/2:], series)
c.expectHistograms(histograms[len(histograms)/2:], series)
c.expectFloatHistograms(floatHistograms[len(floatHistograms)/2:], series)
//c.expectExemplars(exemplars[len(exemplars)/2:], series)
//c.expectHistograms(histograms[len(histograms)/2:], series)
//c.expectFloatHistograms(floatHistograms[len(floatHistograms)/2:], series)
qm.Append(samples[len(samples)/2:])
qm.AppendExemplars(exemplars[len(exemplars)/2:])
qm.AppendHistograms(histograms[len(histograms)/2:])
qm.AppendFloatHistograms(floatHistograms[len(floatHistograms)/2:])
//qm.AppendExemplars(exemplars[len(exemplars)/2:])
//qm.AppendHistograms(histograms[len(histograms)/2:])
//qm.AppendFloatHistograms(floatHistograms[len(floatHistograms)/2:])
c.waitForExpectedData(t)
})
}
}
func TestMetadataDelivery(t *testing.T) {
c := NewTestWriteClient(false)
c := NewTestWriteClient(Base1)
dir := t.TempDir()
@ -178,7 +184,7 @@ func TestMetadataDelivery(t *testing.T) {
mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, 0)
m.Start()
defer m.Stop()
@ -204,13 +210,13 @@ func TestMetadataDelivery(t *testing.T) {
}
func TestSampleDeliveryTimeout(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} {
t.Run(proto, func(t *testing.T) {
remoteWrite11 := proto == "1.1"
for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized, Min64Fixed} {
t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
//remoteWrite11 := proto == "1.1"
// Let's send one less sample than batch size, and wait the timeout duration
n := 9
samples, series := createTimeseries(n, n)
c := NewTestWriteClient(remoteWrite11)
c := NewTestWriteClient(rwFormat)
cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig
@ -220,7 +226,7 @@ func TestSampleDeliveryTimeout(t *testing.T) {
dir := t.TempDir()
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0)
m.Start()
defer m.Stop()
@ -238,9 +244,8 @@ func TestSampleDeliveryTimeout(t *testing.T) {
}
func TestSampleDeliveryOrder(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} {
t.Run(proto, func(t *testing.T) {
remoteWrite11 := proto == "1.1"
for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized, Min64Fixed} {
t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
ts := 10
n := config.DefaultQueueConfig.MaxSamplesPerSend * ts
samples := make([]record.RefSample, 0, n)
@ -258,7 +263,7 @@ func TestSampleDeliveryOrder(t *testing.T) {
})
}
c := NewTestWriteClient(remoteWrite11)
c := NewTestWriteClient(rwFormat)
c.expectSamples(samples, series)
dir := t.TempDir()
@ -267,7 +272,7 @@ func TestSampleDeliveryOrder(t *testing.T) {
mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0)
m.Start()
@ -289,7 +294,7 @@ func TestShutdown(t *testing.T) {
mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend
samples, series := createTimeseries(n, n)
m.StoreSeries(series, 0)
@ -327,7 +332,7 @@ func TestSeriesReset(t *testing.T) {
cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
for i := 0; i < numSegments; i++ {
series := []record.RefSeries{}
for j := 0; j < numSeries; j++ {
@ -341,15 +346,14 @@ func TestSeriesReset(t *testing.T) {
}
func TestReshard(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} {
t.Run(proto, func(t *testing.T) {
remoteWrite11 := proto == "1.1"
for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized, Min64Fixed} {
t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
size := 10 // Make bigger to find more races.
nSeries := 6
nSamples := config.DefaultQueueConfig.Capacity * size
samples, series := createTimeseries(nSamples, nSeries)
c := NewTestWriteClient(remoteWrite11)
c := NewTestWriteClient(rwFormat)
c.expectSamples(samples, series)
cfg := config.DefaultQueueConfig
@ -359,7 +363,7 @@ func TestReshard(t *testing.T) {
dir := t.TempDir()
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0)
m.Start()
@ -385,10 +389,9 @@ func TestReshard(t *testing.T) {
}
func TestReshardRaceWithStop(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} {
t.Run(proto, func(t *testing.T) {
remoteWrite11 := proto == "1.1"
c := NewTestWriteClient(remoteWrite11)
for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized, Min64Fixed} {
t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
c := NewTestWriteClient(rwFormat)
var m *QueueManager
h := sync.Mutex{}
@ -400,7 +403,7 @@ func TestReshardRaceWithStop(t *testing.T) {
go func() {
for {
metrics := newQueueManagerMetrics(nil, "", "")
m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.Start()
h.Unlock()
h.Lock()
@ -425,9 +428,8 @@ func TestReshardRaceWithStop(t *testing.T) {
}
func TestReshardPartialBatch(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} {
t.Run(proto, func(t *testing.T) {
remoteWrite11 := proto == "1.1"
for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized, Min64Fixed} {
t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
samples, series := createTimeseries(1, 10)
c := NewTestBlockedWriteClient()
@ -440,7 +442,7 @@ func TestReshardPartialBatch(t *testing.T) {
cfg.BatchSendDeadline = model.Duration(batchSendDeadline)
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0)
m.Start()
@ -472,9 +474,8 @@ func TestReshardPartialBatch(t *testing.T) {
// where a large scrape (> capacity + max samples per send) is appended at the
// same time as a batch times out according to the batch send deadline.
func TestQueueFilledDeadlock(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} {
t.Run(proto, func(t *testing.T) {
remoteWrite11 := proto == "1.1"
for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized, Min64Fixed} {
t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
samples, series := createTimeseries(50, 1)
c := NewNopWriteClient()
@ -490,7 +491,7 @@ func TestQueueFilledDeadlock(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0)
m.Start()
defer m.Stop()
@ -515,14 +516,14 @@ func TestQueueFilledDeadlock(t *testing.T) {
}
func TestReleaseNoninternedString(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} {
t.Run(proto, func(t *testing.T) {
remoteWrite11 := proto == "1.1"
for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized, Min64Fixed} {
t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "")
c := NewTestWriteClient(remoteWrite11)
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
c := NewTestWriteClient(rwFormat)
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.Start()
defer m.Stop()
@ -570,8 +571,9 @@ func TestShouldReshard(t *testing.T) {
mcfg := config.DefaultMetadataConfig
for _, c := range cases {
metrics := newQueueManagerMetrics(nil, "", "")
client := NewTestWriteClient(false)
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
// todo: test with new proto type(s)
client := NewTestWriteClient(Base1)
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
m.numShards = c.startingShards
m.dataIn.incr(c.samplesIn)
m.dataOut.incr(c.samplesOut)
@ -706,16 +708,16 @@ type TestWriteClient struct {
wg sync.WaitGroup
mtx sync.Mutex
buf []byte
expectRemoteWrite11 bool
rwFormat RemoteWriteFormat
}
func NewTestWriteClient(expectRemoteWrite11 bool) *TestWriteClient {
func NewTestWriteClient(rwFormat RemoteWriteFormat) *TestWriteClient {
return &TestWriteClient{
withWaitGroup: true,
receivedSamples: map[string][]prompb.Sample{},
expectedSamples: map[string][]prompb.Sample{},
receivedMetadata: map[string][]prompb.MetricMetadata{},
expectRemoteWrite11: expectRemoteWrite11,
withWaitGroup: true,
receivedSamples: map[string][]prompb.Sample{},
expectedSamples: map[string][]prompb.Sample{},
receivedMetadata: map[string][]prompb.MetricMetadata{},
rwFormat: rwFormat,
}
}
@ -803,6 +805,7 @@ func (c *TestWriteClient) waitForExpectedData(tb testing.TB) {
c.mtx.Lock()
defer c.mtx.Unlock()
for ts, expectedSamples := range c.expectedSamples {
require.Equal(tb, expectedSamples, c.receivedSamples[ts], ts)
}
@ -831,25 +834,33 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
}
var reqProto *prompb.WriteRequest
if c.expectRemoteWrite11 {
var reqReduced prompb.MinimizedWriteRequest
err = proto.Unmarshal(reqBuf, &reqReduced)
if err == nil {
reqProto, err = MinimizedWriteRequestToWriteRequest(&reqReduced)
}
} else {
switch c.rwFormat {
case Base1:
reqProto = &prompb.WriteRequest{}
err = proto.Unmarshal(reqBuf, reqProto)
case Min32Optimized:
var reqMin prompb.MinimizedWriteRequest
err = proto.Unmarshal(reqBuf, &reqMin)
if err == nil {
reqProto, err = MinimizedWriteRequestToWriteRequest(&reqMin)
}
case Min64Fixed:
var reqMin64 prompb.MinimizedWriteRequestFixed64
err = proto.Unmarshal(reqBuf, &reqMin64)
if err == nil {
reqProto, err = min64WriteRequestToWriteRequest(&reqMin64)
}
}
if err != nil {
fmt.Println("error: ", err)
return err
}
count := 0
for _, ts := range reqProto.Timeseries {
labels := labelProtosToLabels(ts.Labels)
seriesName := labels.Get("__name__")
ls := labelProtosToLabels(ts.Labels)
seriesName := ls.Get("__name__")
for _, sample := range ts.Samples {
count++
c.receivedSamples[seriesName] = append(c.receivedSamples[seriesName], sample)
@ -860,12 +871,12 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
c.receivedExemplars[seriesName] = append(c.receivedExemplars[seriesName], ex)
}
for _, histogram := range ts.Histograms {
for _, hist := range ts.Histograms {
count++
if histogram.IsFloatHistogram() {
c.receivedFloatHistograms[seriesName] = append(c.receivedFloatHistograms[seriesName], histogram)
if hist.IsFloatHistogram() {
c.receivedFloatHistograms[seriesName] = append(c.receivedFloatHistograms[seriesName], hist)
} else {
c.receivedHistograms[seriesName] = append(c.receivedHistograms[seriesName], histogram)
c.receivedHistograms[seriesName] = append(c.receivedHistograms[seriesName], hist)
}
}
@ -965,7 +976,8 @@ func BenchmarkSampleSend(b *testing.B) {
dir := b.TempDir()
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
// todo: test with new proto type(s)
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
m.StoreSeries(series, 0)
// These should be received by the client.
@ -1009,9 +1021,10 @@ func BenchmarkStartup(b *testing.B) {
for n := 0; n < b.N; n++ {
metrics := newQueueManagerMetrics(nil, "", "")
c := NewTestBlockedWriteClient()
// todo: test with new proto type(s)
m := NewQueueManager(metrics, nil, nil, logger, dir,
newEWMARate(ewmaWeight, shardUpdateDuration),
cfg, mcfg, labels.EmptyLabels(), nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false, false, false)
cfg, mcfg, labels.EmptyLabels(), nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
m.watcher.SetStartTime(timestamp.Time(math.MaxInt64))
m.watcher.MaxSegment = segments[len(segments)-2]
err := m.watcher.Run()
@ -1094,7 +1107,8 @@ func TestCalculateDesiredShards(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "")
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
// todo: test with new proto type(s)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
// Need to start the queue manager so the proper metrics are initialized.
// However we can stop it right away since we don't need to do any actual
@ -1163,7 +1177,7 @@ func TestCalculateDesiredShards(t *testing.T) {
}
func TestCalculateDesiredShardsDetail(t *testing.T) {
c := NewTestWriteClient(false)
c := NewTestWriteClient(Base1)
cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig
@ -1171,7 +1185,8 @@ func TestCalculateDesiredShardsDetail(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "")
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
// todo: test with new proto type(s)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
for _, tc := range []struct {
name string
@ -1531,3 +1546,138 @@ func BenchmarkBuildMinimizedWriteRequest(b *testing.B) {
})
}
}
func BenchmarkBuildMinimizedWriteRequestFixed32(b *testing.B) {
type testcase struct {
batch []timeSeries
}
testCases := []testcase{
testcase{createDummyTimeSeries(2)},
testcase{createDummyTimeSeries(10)},
testcase{createDummyTimeSeries(100)},
}
for _, tc := range testCases {
symbolTable := newRwSymbolTable()
pBuf := proto.NewBuffer(nil)
buff := make([]byte, 0)
seriesBuff := make([]prompb.MinimizedTimeSeriesFixed32, len(tc.batch))
//total := 0
for i := range seriesBuff {
seriesBuff[i].Samples = []prompb.Sample{{}}
// todo: add other types
//seriesBuff[i].Exemplars = []prompb.Exemplar{{}}
}
//pBuf := []byte{}
// Warmup buffers
for i := 0; i < 10; i++ {
populateMinimizedTimeSeriesFixed32(&symbolTable, tc.batch, seriesBuff, true, true)
buildMinimizedWriteRequestFixed32(seriesBuff, symbolTable.LabelsString(), pBuf, &buff)
}
b.Run(fmt.Sprintf("%d-instances", len(tc.batch)), func(b *testing.B) {
totalSize := 0
for j := 0; j < b.N; j++ {
populateMinimizedTimeSeriesFixed32(&symbolTable, tc.batch, seriesBuff, true, true)
b.ResetTimer()
req, _, err := buildMinimizedWriteRequestFixed32(seriesBuff, symbolTable.LabelsString(), pBuf, &buff)
if err != nil {
b.Fatal(err)
}
symbolTable.clear()
totalSize += len(req)
b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op")
}
})
}
}
func BenchmarkBuildMinimizedWriteRequestFixed64(b *testing.B) {
type testcase struct {
batch []timeSeries
}
testCases := []testcase{
testcase{createDummyTimeSeries(2)},
testcase{createDummyTimeSeries(10)},
testcase{createDummyTimeSeries(100)},
}
for _, tc := range testCases {
symbolTable := newRwSymbolTable()
pBuf := proto.NewBuffer(nil)
buff := make([]byte, 0)
seriesBuff := make([]prompb.MinimizedTimeSeriesFixed64, len(tc.batch))
//total := 0
for i := range seriesBuff {
seriesBuff[i].Samples = []prompb.Sample{{}}
// todo: add other types
//seriesBuff[i].Exemplars = []prompb.Exemplar{{}}
}
//pBuf := []byte{}
// Warmup buffers
for i := 0; i < 10; i++ {
populateMinimizedTimeSeriesFixed64(&symbolTable, tc.batch, seriesBuff, true, true)
buildMinimizedWriteRequestFixed64(seriesBuff, symbolTable.LabelsString(), pBuf, &buff)
}
b.Run(fmt.Sprintf("%d-instances", len(tc.batch)), func(b *testing.B) {
totalSize := 0
for j := 0; j < b.N; j++ {
populateMinimizedTimeSeriesFixed64(&symbolTable, tc.batch, seriesBuff, true, true)
b.ResetTimer()
req, _, err := buildMinimizedWriteRequestFixed64(seriesBuff, symbolTable.LabelsString(), pBuf, &buff)
if err != nil {
b.Fatal(err)
}
symbolTable.clear()
totalSize += len(req)
b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op")
}
})
}
}
func BenchmarkBuildMinimizedWriteRequestPacking(b *testing.B) {
type testcase struct {
batch []timeSeries
}
testCases := []testcase{
testcase{createDummyTimeSeries(2)},
testcase{createDummyTimeSeries(10)},
testcase{createDummyTimeSeries(100)},
}
for _, tc := range testCases {
symbolTable := newRwSymbolTable()
pBuf := proto.NewBuffer(nil)
buff := make([]byte, 0)
seriesBuff := make([]prompb.MinimizedTimeSeriesPacking, len(tc.batch))
//total := 0
for i := range seriesBuff {
seriesBuff[i].Samples = []prompb.Sample{{}}
// todo: add other types
//seriesBuff[i].Exemplars = []prompb.Exemplar{{}}
}
//pBuf := []byte{}
// Warmup buffers
for i := 0; i < 10; i++ {
populateMinimizedTimeSeriesPacking(&symbolTable, tc.batch, seriesBuff, true, true)
buildMinimizedWriteRequestPacking(seriesBuff, symbolTable.LabelsString(), pBuf, &buff)
}
b.Run(fmt.Sprintf("%d-instances", len(tc.batch)), func(b *testing.B) {
totalSize := 0
for j := 0; j < b.N; j++ {
populateMinimizedTimeSeriesPacking(&symbolTable, tc.batch, seriesBuff, true, true)
b.ResetTimer()
req, _, err := buildMinimizedWriteRequestPacking(seriesBuff, symbolTable.LabelsString(), pBuf, &buff)
if err != nil {
b.Fatal(err)
}
symbolTable.clear()
totalSize += len(req)
b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op")
}
})
}
}

View file

@ -91,7 +91,8 @@ func TestNoDuplicateReadConfigs(t *testing.T) {
for _, tc := range cases {
t.Run("", func(t *testing.T) {
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
// todo: test with new format type(s)?
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteReadConfigs: tc.cfgs,

View file

@ -62,7 +62,7 @@ type Storage struct {
}
// NewStorage returns a remote.Storage.
func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, remoteWrite11 bool) *Storage {
func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, rwFormat RemoteWriteFormat) *Storage {
if l == nil {
l = log.NewNopLogger()
}
@ -72,7 +72,7 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal
logger: logger,
localStartTimeCallback: stCallback,
}
s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, remoteWrite11)
s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, rwFormat)
return s
}

View file

@ -27,7 +27,8 @@ import (
func TestStorageLifecycle(t *testing.T) {
dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
// todo: test with new format type(s)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: []*config.RemoteWriteConfig{
@ -54,7 +55,8 @@ func TestStorageLifecycle(t *testing.T) {
func TestUpdateRemoteReadConfigs(t *testing.T) {
dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
// todo: test with new format type(s)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{},
@ -75,7 +77,8 @@ func TestUpdateRemoteReadConfigs(t *testing.T) {
func TestFilterExternalLabels(t *testing.T) {
dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
// todo: test with new format type(s)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{
@ -100,7 +103,8 @@ func TestFilterExternalLabels(t *testing.T) {
func TestIgnoreExternalLabels(t *testing.T) {
dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
// todo: test with new format type(s)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{

View file

@ -65,7 +65,7 @@ type WriteStorage struct {
externalLabels labels.Labels
dir string
queues map[string]*QueueManager
remoteWrite11 bool
rwFormat RemoteWriteFormat
samplesIn *ewmaRate
flushDeadline time.Duration
interner *pool
@ -77,13 +77,13 @@ type WriteStorage struct {
}
// NewWriteStorage creates and runs a WriteStorage.
func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, remoteWrite11 bool) *WriteStorage {
func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, rwFormat RemoteWriteFormat) *WriteStorage {
if logger == nil {
logger = log.NewNopLogger()
}
rws := &WriteStorage{
queues: make(map[string]*QueueManager),
remoteWrite11: remoteWrite11,
rwFormat: rwFormat,
watcherMetrics: wlog.NewWatcherMetrics(reg),
liveReaderMetrics: wlog.NewLiveReaderMetrics(reg),
logger: logger,
@ -156,14 +156,14 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
}
c, err := NewWriteClient(name, &ClientConfig{
URL: rwConf.URL,
RemoteWrite11: rws.remoteWrite11,
Timeout: rwConf.RemoteTimeout,
HTTPClientConfig: rwConf.HTTPClientConfig,
SigV4Config: rwConf.SigV4Config,
AzureADConfig: rwConf.AzureADConfig,
Headers: rwConf.Headers,
RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit,
URL: rwConf.URL,
RemoteWriteFormat: rws.rwFormat,
Timeout: rwConf.RemoteTimeout,
HTTPClientConfig: rwConf.HTTPClientConfig,
SigV4Config: rwConf.SigV4Config,
AzureADConfig: rwConf.AzureADConfig,
Headers: rwConf.Headers,
RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit,
})
if err != nil {
return err
@ -200,7 +200,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
rws.scraper,
rwConf.SendExemplars,
rwConf.SendNativeHistograms,
rws.remoteWrite11,
rws.rwFormat,
)
// Keep track of which queues are new so we know which to start.
newHashes = append(newHashes, hash)

View file

@ -46,17 +46,17 @@ type writeHandler struct {
// Experimental feature, new remote write proto format
// The handler will accept the new format, but it can still accept the old one
enableRemoteWrite11 bool
// TODO: this should eventually be via content negotiation
rwFormat RemoteWriteFormat
}
// NewWriteHandler creates a http.Handler that accepts remote write requests and
// writes them to the provided appendable.
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, enableRemoteWrite11 bool) http.Handler {
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, rwFormat RemoteWriteFormat) http.Handler {
h := &writeHandler{
logger: logger,
appendable: appendable,
enableRemoteWrite11: enableRemoteWrite11,
logger: logger,
appendable: appendable,
rwFormat: rwFormat,
samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "prometheus",
Subsystem: "api",
@ -74,11 +74,19 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var err error
var req *prompb.WriteRequest
var reqMin *prompb.MinimizedWriteRequest
var reqMin64Fixed *prompb.MinimizedWriteRequestFixed64
var reqMin32Fixed *prompb.MinimizedWriteRequestFixed32
if h.enableRemoteWrite11 && r.Header.Get(RemoteWriteVersionHeader) == RemoteWriteVersion11HeaderValue {
reqMin, err = DecodeMinimizedWriteRequest(r.Body)
} else {
// TODO: this should eventually be done via content negotiation/looking at the header
switch h.rwFormat {
case Base1:
req, err = DecodeWriteRequest(r.Body)
case Min32Optimized:
reqMin, err = DecodeMinimizedWriteRequest(r.Body)
case Min64Fixed:
reqMin64Fixed, err = DecodeMinimizedWriteRequestFixed64(r.Body)
case Min32Fixed:
reqMin32Fixed, err = DecodeMinimizedWriteRequestFixed32(r.Body)
}
if err != nil {
@ -87,11 +95,18 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
if h.enableRemoteWrite11 && r.Header.Get(RemoteWriteVersionHeader) == RemoteWriteVersion11HeaderValue {
err = h.writeMin(r.Context(), reqMin)
} else {
// TODO: this should eventually be done detecting the format version above
switch h.rwFormat {
case Base1:
err = h.write(r.Context(), req)
case Min32Optimized:
err = h.writeMin(r.Context(), reqMin)
case Min64Fixed:
err = h.writeMin64(r.Context(), reqMin64Fixed)
case Min32Fixed:
err = h.writeMin32(r.Context(), reqMin32Fixed)
}
switch err {
case nil:
case storage.ErrOutOfOrderSample, storage.ErrOutOfBounds, storage.ErrDuplicateSampleForTimestamp:
@ -320,3 +335,81 @@ func (h *writeHandler) writeMin(ctx context.Context, req *prompb.MinimizedWriteR
return nil
}
func (h *writeHandler) writeMin64(ctx context.Context, req *prompb.MinimizedWriteRequestFixed64) (err error) {
outOfOrderExemplarErrs := 0
app := h.appendable.Appender(ctx)
defer func() {
if err != nil {
_ = app.Rollback()
return
}
err = app.Commit()
}()
for _, ts := range req.Timeseries {
ls := Uint64RefToLabels(req.Symbols, ts.LabelSymbols)
err := h.appendSamples(app, ts.Samples, ls)
if err != nil {
return err
}
for _, ep := range ts.Exemplars {
e := exemplarProtoToExemplar(ep)
//e := exemplarRefProtoToExemplar(req.StringSymbolTable, ep)
h.appendExemplar(app, e, ls, &outOfOrderExemplarErrs)
}
err = h.appendHistograms(app, ts.Histograms, ls)
if err != nil {
return err
}
}
if outOfOrderExemplarErrs > 0 {
_ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
}
return nil
}
func (h *writeHandler) writeMin32(ctx context.Context, req *prompb.MinimizedWriteRequestFixed32) (err error) {
outOfOrderExemplarErrs := 0
app := h.appendable.Appender(ctx)
defer func() {
if err != nil {
_ = app.Rollback()
return
}
err = app.Commit()
}()
for _, ts := range req.Timeseries {
ls := Uint32RefToLabels(req.Symbols, ts.LabelSymbols)
err := h.appendSamples(app, ts.Samples, ls)
if err != nil {
return err
}
for _, ep := range ts.Exemplars {
e := exemplarProtoToExemplar(ep)
//e := exemplarRefProtoToExemplar(req.StringSymbolTable, ep)
h.appendExemplar(app, e, ls, &outOfOrderExemplarErrs)
}
err = h.appendHistograms(app, ts.Histograms, ls)
if err != nil {
return err
}
}
if outOfOrderExemplarErrs > 0 {
_ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
}
return nil
}

View file

@ -45,7 +45,8 @@ func TestRemoteWriteHandler(t *testing.T) {
require.NoError(t, err)
appendable := &mockAppendable{}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
// TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -92,7 +93,8 @@ func TestRemoteWriteHandlerMinimizedFormat(t *testing.T) {
require.NoError(t, err)
appendable := &mockAppendable{}
handler := NewWriteHandler(nil, nil, appendable, true)
// TODO: test with other proto format(s)
handler := NewWriteHandler(nil, nil, appendable, Min32Optimized)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -106,25 +108,124 @@ func TestRemoteWriteHandlerMinimizedFormat(t *testing.T) {
// the reduced write request is equivalent to the write request fixture.
// we can use it for
for _, ts := range writeRequestFixture.Timeseries {
ls := labelProtosToLabels(ts.Labels)
labels := labelProtosToLabels(ts.Labels)
for _, s := range ts.Samples {
require.Equal(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i])
require.Equal(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i])
i++
}
for _, e := range ts.Exemplars {
exemplarLabels := labelProtosToLabels(e.Labels)
require.Equal(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
require.Equal(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
j++
}
for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() {
fh := FloatHistogramProtoToFloatHistogram(hp)
require.Equal(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k])
require.Equal(t, mockHistogram{labels, hp.Timestamp, nil, fh}, appendable.histograms[k])
} else {
h := HistogramProtoToHistogram(hp)
require.Equal(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k])
require.Equal(t, mockHistogram{labels, hp.Timestamp, h, nil}, appendable.histograms[k])
}
k++
}
}
}
//func TestRemoteWriteHandlerMinimizedFormat(t *testing.T) {
// buf, _, err := buildMinimizedWriteRequest(writeRequestMinimizedFixture.Timeseries, writeRequestMinimizedFixture.Symbols, nil, nil)
// require.NoError(t, err)
//
// req, err := http.NewRequest("", "", bytes.NewReader(buf))
// req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
// require.NoError(t, err)
//
// appendable := &mockAppendable{}
// handler := NewWriteHandler(nil, nil, appendable, false, true)
//
// recorder := httptest.NewRecorder()
// handler.ServeHTTP(recorder, req)
//
// resp := recorder.Result()
// require.Equal(t, http.StatusNoContent, resp.StatusCode)
//
// i := 0
// j := 0
// k := 0
// // the reduced write request is equivalent to the write request fixture.
// // we can use it for
// for _, ts := range writeRequestFixture.Timeseries {
// ls := labelProtosToLabels(ts.Labels)
// for _, s := range ts.Samples {
// require.Equal(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i])
// i++
// }
//
// for _, e := range ts.Exemplars {
// exemplarLabels := labelProtosToLabels(e.Labels)
// require.Equal(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
// j++
// }
//
// for _, hp := range ts.Histograms {
// if hp.IsFloatHistogram() {
// fh := FloatHistogramProtoToFloatHistogram(hp)
// require.Equal(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k])
// } else {
// h := HistogramProtoToHistogram(hp)
// require.Equal(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k])
// }
//
// k++
// }
// }
//}
func TestRemoteWriteHandler64Packed(t *testing.T) {
buf, _, err := buildMinimizedWriteRequestFixed64(writeRequestMinimized64Fixture.Timeseries, writeRequestMinimized64Fixture.Symbols, nil, nil)
require.NoError(t, err)
req, err := http.NewRequest("", "", bytes.NewReader(buf))
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
require.NoError(t, err)
appendable := &mockAppendable{}
// TODO: test with other proto format(s)
handler := NewWriteHandler(nil, nil, appendable, Min64Fixed)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
require.Equal(t, http.StatusNoContent, resp.StatusCode)
i := 0
j := 0
k := 0
// the reduced write request is equivalent to the write request fixture.
// we can use it for
for _, ts := range writeRequestMinimized64Fixture.Timeseries {
labels := Uint64RefToLabels(writeRequestMinimized64Fixture.Symbols, ts.LabelSymbols)
for _, s := range ts.Samples {
require.Equal(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i])
i++
}
for _, e := range ts.Exemplars {
exemplarLabels := labelProtosToLabels(e.Labels)
require.Equal(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
j++
}
for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() {
fh := FloatHistogramProtoToFloatHistogram(hp)
require.Equal(t, mockHistogram{labels, hp.Timestamp, nil, fh}, appendable.histograms[k])
} else {
h := HistogramProtoToHistogram(hp)
require.Equal(t, mockHistogram{labels, hp.Timestamp, h, nil}, appendable.histograms[k])
}
k++
@ -145,7 +246,8 @@ func TestOutOfOrderSample(t *testing.T) {
appendable := &mockAppendable{
latestSample: 100,
}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
// TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -170,7 +272,8 @@ func TestOutOfOrderExemplar(t *testing.T) {
appendable := &mockAppendable{
latestExemplar: 100,
}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
// TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -193,8 +296,8 @@ func TestOutOfOrderHistogram(t *testing.T) {
appendable := &mockAppendable{
latestHistogram: 100,
}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
// TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -222,7 +325,8 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
}
appendable := &mockAppendable{}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
// TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder()
b.ResetTimer()
@ -252,15 +356,31 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
// reqs = append(reqs, req)
// }
// appendable := &mockAppendable{}
// handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, true, false)
// recorder := httptest.NewRecorder()
// b.ResetTimer()
// for _, req := range reqs {
// handler.ServeHTTP(recorder, req)
// }
// }
// TODO(npazosmendez): add benchmarks with realistic scenarios
//func BenchmarkMin64RemoteWriteHandler(b *testing.B) {
// const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte"
// reqs := []*http.Request{}
// for i := 0; i < b.N; i++ {
// rw := newRwSymbolTable()
// num := strings.Repeat(strconv.Itoa(i), 16)
// buf, _, err := buildMinimizedWriteRequestFixed64([]prompb.MinimizedTimeSeriesFixed64{{
// LabelSymbols: []uint64{
// rw.Ref64Packed("__name__"), rw.Ref64Packed("test_metric"),
// rw.Ref64Packed("test_label_name_" + num), rw.Ref64Packed(labelValue + num),
// },
// Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram)},
// }}, rw.LabelsString(), nil, nil)
// require.NoError(b, err)
// req, err := http.NewRequest("", "", bytes.NewReader(buf))
// require.NoError(b, err)
// req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
// reqs = append(reqs, req)
// }
//
// appendable := &mockAppendable{}
// // TODO: test with other proto format(s)
// handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
// recorder := httptest.NewRecorder()
func TestCommitErr(t *testing.T) {
buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil, nil)
@ -272,7 +392,8 @@ func TestCommitErr(t *testing.T) {
appendable := &mockAppendable{
commitErr: fmt.Errorf("commit error"),
}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
// TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -297,8 +418,8 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) {
b.Cleanup(func() {
require.NoError(b, db.Close())
})
handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head(), false)
// TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head(), Base1)
buf, _, err := buildWriteRequest(genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil)
require.NoError(b, err)

View file

@ -117,7 +117,8 @@ func TestNoDuplicateWriteConfigs(t *testing.T) {
}
for _, tc := range cases {
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false)
// todo: test with new format type(s)
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, Base1)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: tc.cfgs,
@ -139,7 +140,8 @@ func TestRestartOnNameChange(t *testing.T) {
hash, err := toHash(cfg)
require.NoError(t, err)
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false)
// todo: test with new format type(s)
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, Base1)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
@ -164,7 +166,8 @@ func TestRestartOnNameChange(t *testing.T) {
func TestUpdateWithRegisterer(t *testing.T) {
dir := t.TempDir()
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil, false)
// todo: test with new format type(s)
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil, Base1)
c1 := &config.RemoteWriteConfig{
Name: "named",
URL: &common_config.URL{
@ -204,7 +207,8 @@ func TestUpdateWithRegisterer(t *testing.T) {
func TestWriteStorageLifecycle(t *testing.T) {
dir := t.TempDir()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
// todo: test with new format type(s)
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: []*config.RemoteWriteConfig{
@ -221,7 +225,8 @@ func TestWriteStorageLifecycle(t *testing.T) {
func TestUpdateExternalLabels(t *testing.T) {
dir := t.TempDir()
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil, false)
// todo: test with new format type(s)
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil, Base1)
externalLabels := labels.FromStrings("external", "true")
conf := &config.Config{
@ -250,8 +255,8 @@ func TestUpdateExternalLabels(t *testing.T) {
func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
dir := t.TempDir()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
// todo: test with new format type(s)
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{},
RemoteWriteConfigs: []*config.RemoteWriteConfig{
@ -276,7 +281,8 @@ func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
dir := t.TempDir()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
// todo: test with new format type(s)
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, Base1)
c0 := &config.RemoteWriteConfig{
RemoteTimeout: model.Duration(10 * time.Second),

View file

@ -88,7 +88,7 @@ func createTestAgentDB(t *testing.T, reg prometheus.Registerer, opts *Options) *
t.Helper()
dbDir := t.TempDir()
rs := remote.NewStorage(log.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil, false)
rs := remote.NewStorage(log.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil, remote.Base1)
t.Cleanup(func() {
require.NoError(t, rs.Close())
})
@ -584,7 +584,7 @@ func TestLockfile(t *testing.T) {
tsdbutil.TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*tsdbutil.DirLocker, testutil.Closer) {
logger := log.NewNopLogger()
reg := prometheus.NewRegistry()
rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil, false)
rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil, remote.Base1)
t.Cleanup(func() {
require.NoError(t, rs.Close())
})
@ -604,7 +604,7 @@ func TestLockfile(t *testing.T) {
func Test_ExistingWAL_NextRef(t *testing.T) {
dbDir := t.TempDir()
rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, false)
rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, remote.Base1)
defer func() {
require.NoError(t, rs.Close())
}()

View file

@ -253,8 +253,8 @@ func NewAPI(
registerer prometheus.Registerer,
statsRenderer StatsRenderer,
rwEnabled bool,
rwFormat remote.RemoteWriteFormat,
otlpEnabled bool,
enableRemoteWrite11 bool,
) *API {
a := &API{
QueryEngine: qe,
@ -296,7 +296,7 @@ func NewAPI(
}
if rwEnabled {
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, enableRemoteWrite11)
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, rwFormat)
}
if otlpEnabled {
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap)

View file

@ -459,9 +459,10 @@ func TestEndpoints(t *testing.T) {
dbDir := t.TempDir()
// TODO: test with other proto format(s)?
remote := remote.NewStorage(promlog.New(&promlogConfig), prometheus.DefaultRegisterer, func() (int64, error) {
return 0, nil
}, dbDir, 1*time.Second, nil, false)
}, dbDir, 1*time.Second, nil, remote.Base1)
err = remote.ApplyConfig(&config.Config{
RemoteReadConfigs: []*config.RemoteReadConfig{

View file

@ -17,6 +17,7 @@ package v1
import (
"context"
"fmt"
"github.com/prometheus/prometheus/storage/remote"
"net/http"
"net/http/httptest"
"net/url"
@ -136,7 +137,7 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable) *route.Router {
nil,
nil,
false,
false,
remote.Base1,
false, // Disable experimental reduce remote write proto support.
)

20844
web/ui/package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -18,6 +18,7 @@ import (
"context"
"encoding/json"
"fmt"
"github.com/prometheus/prometheus/storage/remote"
"io"
stdlog "log"
"math"
@ -242,27 +243,27 @@ type Options struct {
Version *PrometheusVersion
Flags map[string]string
ListenAddress string
CORSOrigin *regexp.Regexp
ReadTimeout time.Duration
MaxConnections int
ExternalURL *url.URL
RoutePrefix string
UseLocalAssets bool
UserAssetsPath string
ConsoleTemplatesPath string
ConsoleLibrariesPath string
EnableLifecycle bool
EnableAdminAPI bool
PageTitle string
RemoteReadSampleLimit int
RemoteReadConcurrencyLimit int
RemoteReadBytesInFrame int
EnableRemoteWriteReceiver bool
EnableOTLPWriteReceiver bool
IsAgent bool
AppName string
EnableReceiverRemoteWrite11 bool
ListenAddress string
CORSOrigin *regexp.Regexp
ReadTimeout time.Duration
MaxConnections int
ExternalURL *url.URL
RoutePrefix string
UseLocalAssets bool
UserAssetsPath string
ConsoleTemplatesPath string
ConsoleLibrariesPath string
EnableLifecycle bool
EnableAdminAPI bool
PageTitle string
RemoteReadSampleLimit int
RemoteReadConcurrencyLimit int
RemoteReadBytesInFrame int
EnableRemoteWriteReceiver bool
EnableOTLPWriteReceiver bool
IsAgent bool
AppName string
RemoteWriteFormat remote.RemoteWriteFormat
Gatherer prometheus.Gatherer
Registerer prometheus.Registerer
@ -323,6 +324,7 @@ func New(logger log.Logger, o *Options) *Handler {
app = h.storage
}
fmt.Println("rw format for handler is: ", o.RemoteWriteFormat)
h.apiV1 = api_v1.NewAPI(h.queryEngine, h.storage, app, h.exemplarStorage, factorySPr, factoryTr, factoryAr,
func() config.Config {
h.mtx.RLock()
@ -352,8 +354,8 @@ func New(logger log.Logger, o *Options) *Handler {
o.Registerer,
nil,
o.EnableRemoteWriteReceiver,
o.RemoteWriteFormat,
o.EnableOTLPWriteReceiver,
o.EnableReceiverRemoteWrite11,
)
if o.RoutePrefix != "/" {