Merge pull request #3 from prometheus/callum-rw-format-testing

This commit is contained in:
Nicolás Pazos 2023-11-23 19:34:04 -03:00 committed by GitHub
commit f2bc16177d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
29 changed files with 1347 additions and 445 deletions

View file

@ -154,7 +154,8 @@ type flagConfig struct {
enableNewSDManager bool enableNewSDManager bool
enablePerStepStats bool enablePerStepStats bool
enableAutoGOMAXPROCS bool enableAutoGOMAXPROCS bool
enableSenderRemoteWrite11 bool // todo: how to use the enable feature flag properly + use the remote format enum type
rwFormat int
prometheusURL string prometheusURL string
corsRegexString string corsRegexString string
@ -211,11 +212,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
continue continue
case "promql-at-modifier", "promql-negative-offset": case "promql-at-modifier", "promql-negative-offset":
level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o) level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o)
case "rw-1-1-sender":
c.enableSenderRemoteWrite11 = true
level.Info(logger).Log("msg", "Experimental remote write 1.1 will be used on the sender end, receiver must be able to parse this new protobuf format.")
case "rw-1-1-receiver":
c.web.EnableReceiverRemoteWrite11 = true
default: default:
level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o) level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o)
} }
@ -429,6 +425,9 @@ func main() {
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList) Default("").StringsVar(&cfg.featureList)
a.Flag("remote-write-format", "remote write proto format to use, valid options: 0 (1.0), 1 (reduced format), 3 (min64 format)").
Default("0").IntVar(&cfg.rwFormat)
promlogflag.AddFlags(a, &cfg.promlogConfig) promlogflag.AddFlags(a, &cfg.promlogConfig)
a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error { a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error {
@ -601,7 +600,7 @@ func main() {
var ( var (
localStorage = &readyStorage{stats: tsdb.NewDBStats()} localStorage = &readyStorage{stats: tsdb.NewDBStats()}
scraper = &readyScrapeManager{} scraper = &readyScrapeManager{}
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.enableSenderRemoteWrite11) remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, remote.RemoteWriteFormat(cfg.rwFormat))
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage) fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
) )
@ -725,6 +724,7 @@ func main() {
cfg.web.Flags[f.Name] = f.Value.String() cfg.web.Flags[f.Name] = f.Value.String()
} }
cfg.web.RemoteWriteFormat = remote.RemoteWriteFormat(cfg.rwFormat)
// Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager. // Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager.
webHandler := web.New(log.With(logger, "component", "web"), &cfg.web) webHandler := web.New(log.With(logger, "component", "web"), &cfg.web)

View file

@ -53,6 +53,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | | <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
| <code class="text-nowrap">--enable-feature</code> | Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | <code class="text-nowrap">--enable-feature</code> | Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--remote-write-format</code> | remote write proto format to use, valid options: 0 (1.0), 1 (reduced format), 3 (min64 format) | `0` |
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` | | <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |

View file

@ -57,38 +57,6 @@ func main() {
} }
}) })
http.HandleFunc("/receiveReduced", func(w http.ResponseWriter, r *http.Request) {
req, err := remote.DecodeReducedWriteRequest(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
for _, ts := range req.Timeseries {
m := make(model.Metric, len(ts.Labels))
for _, l := range ts.Labels {
m[model.LabelName(req.StringSymbolTable[l.NameRef])] = model.LabelValue(req.StringSymbolTable[l.ValueRef])
}
for _, s := range ts.Samples {
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
}
for _, e := range ts.Exemplars {
m := make(model.Metric, len(e.Labels))
for _, l := range e.Labels {
m[model.LabelName(req.StringSymbolTable[l.NameRef])] = model.LabelValue(req.StringSymbolTable[l.ValueRef])
}
fmt.Printf("\tExemplar: %+v %f %d\n", m, e.Value, e.Timestamp)
}
for _, hp := range ts.Histograms {
h := remote.HistogramProtoToHistogram(hp)
fmt.Printf("\tHistogram: %s\n", h.String())
}
}
})
http.HandleFunc("/receiveMinimized", func(w http.ResponseWriter, r *http.Request) { http.HandleFunc("/receiveMinimized", func(w http.ResponseWriter, r *http.Request) {
req, err := remote.DecodeMinimizedWriteRequest(r.Body) req, err := remote.DecodeMinimizedWriteRequest(r.Body)
if err != nil { if err != nil {
@ -97,8 +65,25 @@ func main() {
} }
for _, ts := range req.Timeseries { for _, ts := range req.Timeseries {
ls := remote.Uint32RefToLabels(req.Symbols, ts.LabelSymbols) m := make(model.Metric, len(ts.LabelSymbols)/2)
fmt.Println(ls) labelIdx := 0
for labelIdx < len(ts.LabelSymbols) {
// todo, check for overflow?
offset := ts.LabelSymbols[labelIdx]
labelIdx++
length := ts.LabelSymbols[labelIdx]
labelIdx++
name := req.Symbols[offset : offset+length]
// todo, check for overflow?
offset = ts.LabelSymbols[labelIdx]
labelIdx++
length = ts.LabelSymbols[labelIdx]
labelIdx++
value := req.Symbols[offset : offset+length]
m[model.LabelName(name)] = model.LabelValue(value)
}
fmt.Println(m)
for _, s := range ts.Samples { for _, s := range ts.Samples {
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp) fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)

View file

@ -14,8 +14,9 @@
package prompb package prompb
import ( import (
"slices"
"sync" "sync"
"golang.org/x/exp/slices"
) )
func (m Sample) T() int64 { return m.Timestamp } func (m Sample) T() int64 { return m.Timestamp }

View file

@ -1,9 +1,21 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prompb package prompb
import ( import (
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/require"
) )
func TestOptimizedMarshal(t *testing.T) { func TestOptimizedMarshal(t *testing.T) {
@ -63,15 +75,15 @@ func TestOptimizedMarshal(t *testing.T) {
got = got[:0] got = got[:0]
// should be the same as the standard marshal // should be the same as the standard marshal
expected, err := tt.m.Marshal() expected, err := tt.m.Marshal()
assert.NoError(t, err) require.NoError(t, err)
got, err = tt.m.OptimizedMarshal(got) got, err = tt.m.OptimizedMarshal(got)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, expected, got) require.Equal(t, expected, got)
// round trip // round trip
m := &MinimizedWriteRequest{} m := &MinimizedWriteRequest{}
assert.NoError(t, m.Unmarshal(got)) require.NoError(t, m.Unmarshal(got))
assert.Equal(t, tt.m, m) require.Equal(t, tt.m, m)
}) })
} }
} }

View file

@ -60,7 +60,7 @@ func (x ReadRequest_ResponseType) String() string {
} }
func (ReadRequest_ResponseType) EnumDescriptor() ([]byte, []int) { func (ReadRequest_ResponseType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{2, 0} return fileDescriptor_eefc82927d57d89b, []int{3, 0}
} }
type WriteRequest struct { type WriteRequest struct {
@ -175,6 +175,63 @@ func (m *MinimizedWriteRequest) GetSymbols() string {
return "" return ""
} }
type MinimizedWriteRequestLen struct {
Timeseries []MinimizedTimeSeriesLen `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
// The symbols table. All symbols are concatenated strings prepended with a varint of their length.
// To read the symbols table, it's required to know the offset of the actual symbol to read from this string.
Symbols []byte `protobuf:"bytes,4,opt,name=symbols,proto3" json:"symbols,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MinimizedWriteRequestLen) Reset() { *m = MinimizedWriteRequestLen{} }
func (m *MinimizedWriteRequestLen) String() string { return proto.CompactTextString(m) }
func (*MinimizedWriteRequestLen) ProtoMessage() {}
func (*MinimizedWriteRequestLen) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{2}
}
func (m *MinimizedWriteRequestLen) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MinimizedWriteRequestLen) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MinimizedWriteRequestLen.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *MinimizedWriteRequestLen) XXX_Merge(src proto.Message) {
xxx_messageInfo_MinimizedWriteRequestLen.Merge(m, src)
}
func (m *MinimizedWriteRequestLen) XXX_Size() int {
return m.Size()
}
func (m *MinimizedWriteRequestLen) XXX_DiscardUnknown() {
xxx_messageInfo_MinimizedWriteRequestLen.DiscardUnknown(m)
}
var xxx_messageInfo_MinimizedWriteRequestLen proto.InternalMessageInfo
func (m *MinimizedWriteRequestLen) GetTimeseries() []MinimizedTimeSeriesLen {
if m != nil {
return m.Timeseries
}
return nil
}
func (m *MinimizedWriteRequestLen) GetSymbols() []byte {
if m != nil {
return m.Symbols
}
return nil
}
// ReadRequest represents a remote read request. // ReadRequest represents a remote read request.
type ReadRequest struct { type ReadRequest struct {
Queries []*Query `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` Queries []*Query `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"`
@ -193,7 +250,7 @@ func (m *ReadRequest) Reset() { *m = ReadRequest{} }
func (m *ReadRequest) String() string { return proto.CompactTextString(m) } func (m *ReadRequest) String() string { return proto.CompactTextString(m) }
func (*ReadRequest) ProtoMessage() {} func (*ReadRequest) ProtoMessage() {}
func (*ReadRequest) Descriptor() ([]byte, []int) { func (*ReadRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{2} return fileDescriptor_eefc82927d57d89b, []int{3}
} }
func (m *ReadRequest) XXX_Unmarshal(b []byte) error { func (m *ReadRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -249,7 +306,7 @@ func (m *ReadResponse) Reset() { *m = ReadResponse{} }
func (m *ReadResponse) String() string { return proto.CompactTextString(m) } func (m *ReadResponse) String() string { return proto.CompactTextString(m) }
func (*ReadResponse) ProtoMessage() {} func (*ReadResponse) ProtoMessage() {}
func (*ReadResponse) Descriptor() ([]byte, []int) { func (*ReadResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{3} return fileDescriptor_eefc82927d57d89b, []int{4}
} }
func (m *ReadResponse) XXX_Unmarshal(b []byte) error { func (m *ReadResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -299,7 +356,7 @@ func (m *Query) Reset() { *m = Query{} }
func (m *Query) String() string { return proto.CompactTextString(m) } func (m *Query) String() string { return proto.CompactTextString(m) }
func (*Query) ProtoMessage() {} func (*Query) ProtoMessage() {}
func (*Query) Descriptor() ([]byte, []int) { func (*Query) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{4} return fileDescriptor_eefc82927d57d89b, []int{5}
} }
func (m *Query) XXX_Unmarshal(b []byte) error { func (m *Query) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -368,7 +425,7 @@ func (m *QueryResult) Reset() { *m = QueryResult{} }
func (m *QueryResult) String() string { return proto.CompactTextString(m) } func (m *QueryResult) String() string { return proto.CompactTextString(m) }
func (*QueryResult) ProtoMessage() {} func (*QueryResult) ProtoMessage() {}
func (*QueryResult) Descriptor() ([]byte, []int) { func (*QueryResult) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{5} return fileDescriptor_eefc82927d57d89b, []int{6}
} }
func (m *QueryResult) XXX_Unmarshal(b []byte) error { func (m *QueryResult) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -421,7 +478,7 @@ func (m *ChunkedReadResponse) Reset() { *m = ChunkedReadResponse{} }
func (m *ChunkedReadResponse) String() string { return proto.CompactTextString(m) } func (m *ChunkedReadResponse) String() string { return proto.CompactTextString(m) }
func (*ChunkedReadResponse) ProtoMessage() {} func (*ChunkedReadResponse) ProtoMessage() {}
func (*ChunkedReadResponse) Descriptor() ([]byte, []int) { func (*ChunkedReadResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{6} return fileDescriptor_eefc82927d57d89b, []int{7}
} }
func (m *ChunkedReadResponse) XXX_Unmarshal(b []byte) error { func (m *ChunkedReadResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -468,6 +525,7 @@ func init() {
proto.RegisterEnum("prometheus.ReadRequest_ResponseType", ReadRequest_ResponseType_name, ReadRequest_ResponseType_value) proto.RegisterEnum("prometheus.ReadRequest_ResponseType", ReadRequest_ResponseType_name, ReadRequest_ResponseType_value)
proto.RegisterType((*WriteRequest)(nil), "prometheus.WriteRequest") proto.RegisterType((*WriteRequest)(nil), "prometheus.WriteRequest")
proto.RegisterType((*MinimizedWriteRequest)(nil), "prometheus.MinimizedWriteRequest") proto.RegisterType((*MinimizedWriteRequest)(nil), "prometheus.MinimizedWriteRequest")
proto.RegisterType((*MinimizedWriteRequestLen)(nil), "prometheus.MinimizedWriteRequestLen")
proto.RegisterType((*ReadRequest)(nil), "prometheus.ReadRequest") proto.RegisterType((*ReadRequest)(nil), "prometheus.ReadRequest")
proto.RegisterType((*ReadResponse)(nil), "prometheus.ReadResponse") proto.RegisterType((*ReadResponse)(nil), "prometheus.ReadResponse")
proto.RegisterType((*Query)(nil), "prometheus.Query") proto.RegisterType((*Query)(nil), "prometheus.Query")
@ -478,41 +536,43 @@ func init() {
func init() { proto.RegisterFile("remote.proto", fileDescriptor_eefc82927d57d89b) } func init() { proto.RegisterFile("remote.proto", fileDescriptor_eefc82927d57d89b) }
var fileDescriptor_eefc82927d57d89b = []byte{ var fileDescriptor_eefc82927d57d89b = []byte{
// 543 bytes of a gzipped FileDescriptorProto // 568 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x4b, 0x6f, 0xd3, 0x40,
0x10, 0xae, 0xeb, 0xb4, 0x09, 0xe3, 0x10, 0x99, 0x6d, 0x43, 0x4c, 0x0e, 0x49, 0x64, 0x71, 0x88, 0x10, 0xee, 0xd6, 0x69, 0x13, 0xc6, 0xa1, 0x32, 0xdb, 0x96, 0x9a, 0x1e, 0x9a, 0xc8, 0xe2, 0x10,
0x54, 0x14, 0x44, 0xa8, 0x38, 0xf5, 0x40, 0x5a, 0x22, 0x95, 0x52, 0xf3, 0xb3, 0x09, 0x02, 0x21, 0xa9, 0x28, 0x88, 0x50, 0x71, 0xea, 0x81, 0xb4, 0x44, 0x0a, 0x25, 0xe6, 0xb1, 0x09, 0x02, 0x21,
0x24, 0xcb, 0xb1, 0x47, 0x8d, 0x45, 0xfc, 0x53, 0xef, 0x5a, 0x6a, 0x38, 0xf3, 0x00, 0x3c, 0x13, 0x24, 0xcb, 0xb1, 0x47, 0x8d, 0x45, 0xfc, 0xa8, 0x77, 0x2d, 0x35, 0x9c, 0x39, 0x71, 0xe2, 0x37,
0xa7, 0x9e, 0x10, 0x4f, 0x80, 0x50, 0x9e, 0x04, 0x79, 0x6d, 0x87, 0x0d, 0x20, 0xc4, 0xcd, 0xfb, 0x71, 0xea, 0x09, 0xf1, 0x0b, 0x10, 0xca, 0x2f, 0x41, 0x7e, 0x85, 0x0d, 0x44, 0x94, 0xdb, 0xee,
0xfd, 0xcd, 0xec, 0xec, 0x18, 0xea, 0x09, 0x06, 0x11, 0xc7, 0x41, 0x9c, 0x44, 0x3c, 0x22, 0x10, 0x7c, 0x8f, 0xfd, 0x76, 0x76, 0x6c, 0xa8, 0xc7, 0xe8, 0x87, 0x02, 0xdb, 0x51, 0x1c, 0x8a, 0x90,
0x27, 0x51, 0x80, 0x7c, 0x8e, 0x29, 0x6b, 0x6b, 0x7c, 0x19, 0x23, 0xcb, 0x89, 0xf6, 0xfe, 0x45, 0x42, 0x14, 0x87, 0x3e, 0x8a, 0x09, 0x26, 0x7c, 0x5f, 0x15, 0xb3, 0x08, 0x79, 0x0e, 0xec, 0xef,
0x74, 0x11, 0x89, 0xcf, 0xfb, 0xd9, 0x57, 0x8e, 0x9a, 0x9f, 0x15, 0xa8, 0xbf, 0x49, 0x7c, 0x8e, 0x9c, 0x87, 0xe7, 0x61, 0xb6, 0xbc, 0x9f, 0xae, 0xf2, 0xaa, 0xf1, 0x85, 0x40, 0xfd, 0x4d, 0xec,
0x14, 0x2f, 0x53, 0x64, 0x9c, 0x1c, 0x01, 0x70, 0x3f, 0x40, 0x86, 0x89, 0x8f, 0xcc, 0x50, 0x7a, 0x09, 0x64, 0x78, 0x91, 0x20, 0x17, 0xf4, 0x18, 0x40, 0x78, 0x3e, 0x72, 0x8c, 0x3d, 0xe4, 0x3a,
0x6a, 0x5f, 0x1b, 0xde, 0x1e, 0xfc, 0x0a, 0x1d, 0x4c, 0xfd, 0x00, 0x27, 0x82, 0x3d, 0xae, 0x5c, 0x69, 0x2a, 0x2d, 0xb5, 0x73, 0xbb, 0xfd, 0xdb, 0xb4, 0x3d, 0xf2, 0x7c, 0x1c, 0x66, 0xe8, 0x49,
0x7f, 0xef, 0x6e, 0x51, 0x49, 0x4f, 0x8e, 0xa0, 0x16, 0x20, 0x77, 0x3c, 0x87, 0x3b, 0x86, 0x2a, 0xe5, 0xea, 0x47, 0x63, 0x8d, 0x49, 0x7c, 0x7a, 0x0c, 0x35, 0x1f, 0x85, 0xed, 0xda, 0xc2, 0xd6,
0xbc, 0x6d, 0xd9, 0x6b, 0x21, 0x4f, 0x7c, 0xd7, 0x2a, 0x14, 0x85, 0x7f, 0xed, 0x38, 0xab, 0xd4, 0x95, 0x4c, 0xbb, 0x2f, 0x6b, 0x4d, 0x14, 0xb1, 0xe7, 0x98, 0x05, 0xa3, 0xd0, 0x2f, 0x14, 0x67,
0xb6, 0x75, 0xd5, 0xfc, 0xa4, 0x40, 0xd3, 0xf2, 0x43, 0x3f, 0xf0, 0x3f, 0xa2, 0xb7, 0xd1, 0xdb, 0x95, 0xda, 0xba, 0xa6, 0x18, 0x9f, 0x08, 0xec, 0x9a, 0x5e, 0xe0, 0xf9, 0xde, 0x47, 0x74, 0x97,
0xf8, 0x2f, 0xbd, 0x75, 0x37, 0xf2, 0x4b, 0xdb, 0x3f, 0x9b, 0x34, 0xa0, 0xca, 0x96, 0xc1, 0x2c, 0xb2, 0xf5, 0x56, 0x64, 0x6b, 0x2c, 0xf9, 0x97, 0xb2, 0x7f, 0x86, 0xd4, 0xa1, 0xca, 0x67, 0xfe,
0x5a, 0x30, 0xa3, 0xd2, 0x53, 0xfa, 0x37, 0x68, 0x79, 0xcc, 0x1b, 0x38, 0xab, 0xd4, 0x54, 0xbd, 0x38, 0x9c, 0x72, 0xbd, 0xd2, 0x24, 0xad, 0x1b, 0xac, 0xdc, 0xe6, 0x01, 0xce, 0x2a, 0x35, 0x45,
0x62, 0x7e, 0x55, 0x40, 0xa3, 0xe8, 0x78, 0x65, 0xf1, 0x03, 0xa8, 0x5e, 0xa6, 0x72, 0xe5, 0x5b, 0xab, 0x18, 0x9f, 0x09, 0xe8, 0x2b, 0x63, 0x0c, 0x30, 0xa0, 0xfd, 0x15, 0x49, 0x8c, 0x6b, 0x92,
0x72, 0xe5, 0x57, 0x29, 0x26, 0x4b, 0x5a, 0x2a, 0xc8, 0x7b, 0x68, 0x39, 0xae, 0x8b, 0x31, 0x47, 0x0c, 0x30, 0xb8, 0x3e, 0x4c, 0x7d, 0x75, 0x98, 0x6f, 0x04, 0x54, 0x86, 0xb6, 0x5b, 0x76, 0xe2,
0xcf, 0x4e, 0x90, 0xc5, 0x51, 0xc8, 0xd0, 0x16, 0xaf, 0x61, 0x6c, 0xf7, 0xd4, 0x7e, 0x63, 0x78, 0x10, 0xaa, 0x17, 0x89, 0x7c, 0xf8, 0x2d, 0xf9, 0xf0, 0x57, 0x09, 0xc6, 0x33, 0x56, 0x32, 0xe8,
0x57, 0x36, 0x4b, 0x65, 0x06, 0xb4, 0x50, 0x4f, 0x97, 0x31, 0xd2, 0x66, 0x19, 0x22, 0xa3, 0xcc, 0x7b, 0xd8, 0xb3, 0x1d, 0x07, 0x23, 0x81, 0xae, 0x15, 0x23, 0x8f, 0xc2, 0x80, 0xa3, 0x95, 0x8d,
0x3c, 0x84, 0xba, 0x0c, 0x10, 0x0d, 0xaa, 0x93, 0x91, 0xf5, 0xf2, 0x7c, 0x3c, 0xd1, 0xb7, 0x48, 0x86, 0xbe, 0xde, 0x54, 0x5a, 0x5b, 0x9d, 0xbb, 0xb2, 0x58, 0x3a, 0xa6, 0xcd, 0x0a, 0xf6, 0x68,
0x0b, 0xf6, 0x26, 0x53, 0x3a, 0x1e, 0x59, 0xe3, 0x27, 0xf6, 0xdb, 0x17, 0xd4, 0x3e, 0x39, 0x7d, 0x16, 0x21, 0xdb, 0x2d, 0x4d, 0xe4, 0x2a, 0x37, 0x8e, 0xa0, 0x2e, 0x17, 0xa8, 0x0a, 0xd5, 0x61,
0xfd, 0xfc, 0xd9, 0x44, 0x57, 0xcc, 0x51, 0xe6, 0x72, 0xd6, 0x51, 0xe4, 0x01, 0x54, 0x13, 0x64, 0xd7, 0x7c, 0x39, 0xe8, 0x0d, 0xb5, 0x35, 0xba, 0x07, 0xdb, 0xc3, 0x11, 0xeb, 0x75, 0xcd, 0xde,
0xe9, 0x82, 0x97, 0x17, 0x6a, 0xfd, 0x79, 0x21, 0xc1, 0xd3, 0x52, 0x67, 0x7e, 0x51, 0x60, 0x47, 0x13, 0xeb, 0xed, 0x0b, 0x66, 0x9d, 0xf6, 0x5f, 0x3f, 0x7f, 0x36, 0xd4, 0x88, 0xd1, 0x4d, 0x55,
0x10, 0xe4, 0x1e, 0x10, 0xc6, 0x9d, 0x84, 0xdb, 0x62, 0xae, 0xdc, 0x09, 0x62, 0x3b, 0xc8, 0x72, 0xf6, 0xc2, 0x8a, 0x3e, 0x80, 0x6a, 0x8c, 0x3c, 0x99, 0x8a, 0xf2, 0x42, 0x7b, 0x7f, 0x5f, 0x28,
0x94, 0xbe, 0x4a, 0x75, 0xc1, 0x4c, 0x4b, 0xc2, 0x62, 0xa4, 0x0f, 0x3a, 0x86, 0xde, 0xa6, 0x76, 0xc3, 0x59, 0xc9, 0x33, 0xbe, 0x12, 0xd8, 0xc8, 0x00, 0x7a, 0x0f, 0x28, 0x17, 0x76, 0x2c, 0xac,
0x5b, 0x68, 0x1b, 0x18, 0x7a, 0xb2, 0xf2, 0x10, 0x6a, 0x81, 0xc3, 0xdd, 0x39, 0x26, 0xac, 0x58, 0xac, 0xaf, 0xc2, 0xf6, 0x23, 0xcb, 0x4f, 0x7d, 0x48, 0x4b, 0x61, 0x5a, 0x86, 0x8c, 0x4a, 0xc0,
0x20, 0x43, 0xee, 0xea, 0xdc, 0x99, 0xe1, 0xc2, 0xca, 0x05, 0x74, 0xad, 0x24, 0x07, 0xb0, 0x33, 0xe4, 0xb4, 0x05, 0x1a, 0x06, 0xee, 0x32, 0x77, 0x3d, 0xe3, 0x6e, 0x61, 0xe0, 0xca, 0xcc, 0x23,
0xf7, 0x43, 0x9e, 0xbf, 0xa7, 0x36, 0x6c, 0xfe, 0x3e, 0xdc, 0xd3, 0x8c, 0xa4, 0xb9, 0xc6, 0x1c, 0xa8, 0xf9, 0xb6, 0x70, 0x26, 0x18, 0xf3, 0x62, 0x9a, 0x75, 0x39, 0xd5, 0xc0, 0x1e, 0xe3, 0xd4,
0x83, 0x26, 0x5d, 0x8e, 0x3c, 0xfa, 0xff, 0x85, 0x97, 0xb7, 0xc8, 0xbc, 0x82, 0xbd, 0x93, 0x79, 0xcc, 0x09, 0x6c, 0xc1, 0xa4, 0x87, 0xb0, 0x31, 0xf1, 0x02, 0x91, 0xbf, 0xa7, 0xda, 0xd9, 0xfd,
0x1a, 0x7e, 0xc8, 0x1e, 0x47, 0x9a, 0xea, 0x63, 0x68, 0xb8, 0x39, 0x6c, 0x6f, 0x44, 0xde, 0x91, 0xb3, 0xb9, 0xfd, 0x14, 0x64, 0x39, 0xc7, 0xe8, 0x81, 0x2a, 0x5d, 0x8e, 0x3e, 0xfa, 0xff, 0xaf,
0x23, 0x0b, 0x63, 0x91, 0x7a, 0xd3, 0x95, 0x8f, 0xa4, 0x0b, 0x5a, 0xb6, 0x46, 0x4b, 0xdb, 0x0f, 0x4f, 0x9e, 0x22, 0xe3, 0x12, 0xb6, 0x4f, 0x27, 0x49, 0xf0, 0x21, 0x7d, 0x1c, 0xa9, 0xab, 0x8f,
0x3d, 0xbc, 0x2a, 0xe6, 0x04, 0x02, 0x7a, 0x9a, 0x21, 0xc7, 0xfb, 0xd7, 0xab, 0x8e, 0xf2, 0x6d, 0x61, 0xcb, 0xc9, 0xcb, 0xd6, 0x92, 0xe5, 0x1d, 0xd9, 0xb2, 0x10, 0x16, 0xae, 0x37, 0x1d, 0x79,
0xd5, 0x51, 0x7e, 0xac, 0x3a, 0xca, 0xbb, 0xdd, 0x2c, 0x37, 0x9e, 0xcd, 0x76, 0xc5, 0x0f, 0xfd, 0x4b, 0x1b, 0xa0, 0xa6, 0x63, 0x34, 0xb3, 0xbc, 0xc0, 0xc5, 0xcb, 0xa2, 0x4f, 0x90, 0x95, 0x9e,
0xf0, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x3e, 0xdc, 0x81, 0x0f, 0x04, 0x00, 0x00, 0xa6, 0x95, 0x93, 0x9d, 0xab, 0xf9, 0x01, 0xf9, 0x3e, 0x3f, 0x20, 0x3f, 0xe7, 0x07, 0xe4, 0xdd,
0x66, 0xea, 0x1b, 0x8d, 0xc7, 0x9b, 0xd9, 0xdf, 0xe5, 0xe1, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff,
0xf1, 0x65, 0x72, 0x0c, 0x9c, 0x04, 0x00, 0x00,
} }
func (m *WriteRequest) Marshal() (dAtA []byte, err error) { func (m *WriteRequest) Marshal() (dAtA []byte, err error) {
@ -618,6 +678,54 @@ func (m *MinimizedWriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil return len(dAtA) - i, nil
} }
func (m *MinimizedWriteRequestLen) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MinimizedWriteRequestLen) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *MinimizedWriteRequestLen) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Symbols) > 0 {
i -= len(m.Symbols)
copy(dAtA[i:], m.Symbols)
i = encodeVarintRemote(dAtA, i, uint64(len(m.Symbols)))
i--
dAtA[i] = 0x22
}
if len(m.Timeseries) > 0 {
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ReadRequest) Marshal() (dAtA []byte, err error) { func (m *ReadRequest) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)
@ -925,6 +1033,28 @@ func (m *MinimizedWriteRequest) Size() (n int) {
return n return n
} }
func (m *MinimizedWriteRequestLen) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Timeseries) > 0 {
for _, e := range m.Timeseries {
l = e.Size()
n += 1 + l + sovRemote(uint64(l))
}
}
l = len(m.Symbols)
if l > 0 {
n += 1 + l + sovRemote(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ReadRequest) Size() (n int) { func (m *ReadRequest) Size() (n int) {
if m == nil { if m == nil {
return 0 return 0
@ -1277,6 +1407,125 @@ func (m *MinimizedWriteRequest) Unmarshal(dAtA []byte) error {
} }
return nil return nil
} }
func (m *MinimizedWriteRequestLen) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MinimizedWriteRequestLen: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MinimizedWriteRequestLen: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Timeseries = append(m.Timeseries, MinimizedTimeSeriesLen{})
if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Symbols = append(m.Symbols[:0], dAtA[iNdEx:postIndex]...)
if m.Symbols == nil {
m.Symbols = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipRemote(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ReadRequest) Unmarshal(dAtA []byte) error { func (m *ReadRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA) l := len(dAtA)
iNdEx := 0 iNdEx := 0

View file

@ -27,7 +27,6 @@ message WriteRequest {
repeated prometheus.MetricMetadata metadata = 3 [(gogoproto.nullable) = false]; repeated prometheus.MetricMetadata metadata = 3 [(gogoproto.nullable) = false];
} }
message MinimizedWriteRequest { message MinimizedWriteRequest {
repeated MinimizedTimeSeries timeseries = 1 [(gogoproto.nullable) = false]; repeated MinimizedTimeSeries timeseries = 1 [(gogoproto.nullable) = false];
// Cortex uses this field to determine the source of the write request. // Cortex uses this field to determine the source of the write request.
@ -40,6 +39,18 @@ message MinimizedWriteRequest {
string symbols = 4; string symbols = 4;
} }
message MinimizedWriteRequestLen {
repeated MinimizedTimeSeriesLen timeseries = 1 [(gogoproto.nullable) = false];
// Cortex uses this field to determine the source of the write request.
// We reserve it to avoid any compatibility issues.
reserved 2;
// Metadata (3) has moved to be part of the TimeSeries type
reserved 3;
// The symbols table. All symbols are concatenated strings prepended with a varint of their length.
// To read the symbols table, it's required to know the offset of the actual symbol to read from this string.
bytes symbols = 4;
}
// ReadRequest represents a remote read request. // ReadRequest represents a remote read request.
message ReadRequest { message ReadRequest {
repeated Query queries = 1; repeated Query queries = 1;

View file

@ -127,7 +127,7 @@ func (x LabelMatcher_Type) String() string {
} }
func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{9, 0} return fileDescriptor_d938547f84707355, []int{10, 0}
} }
// We require this to match chunkenc.Encoding. // We require this to match chunkenc.Encoding.
@ -159,7 +159,7 @@ func (x Chunk_Encoding) String() string {
} }
func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{11, 0} return fileDescriptor_d938547f84707355, []int{12, 0}
} }
type MetricMetadata struct { type MetricMetadata struct {
@ -736,6 +736,7 @@ func (m *TimeSeries) GetHistograms() []Histogram {
type MinimizedTimeSeries struct { type MinimizedTimeSeries struct {
// Sorted list of label name-value pair references. This list's len is always multiple of 4, // Sorted list of label name-value pair references. This list's len is always multiple of 4,
// packing tuples of (label name offset, label name length, label value offset, label value length). // packing tuples of (label name offset, label name length, label value offset, label value length).
// Offsets point to the symbol table in the higher level MinimizedWriteRequestLen.
LabelSymbols []uint32 `protobuf:"varint,1,rep,packed,name=label_symbols,json=labelSymbols,proto3" json:"label_symbols,omitempty"` LabelSymbols []uint32 `protobuf:"varint,1,rep,packed,name=label_symbols,json=labelSymbols,proto3" json:"label_symbols,omitempty"`
// Sorted by time, oldest sample first. // Sorted by time, oldest sample first.
// TODO: support references for other types // TODO: support references for other types
@ -808,6 +809,82 @@ func (m *MinimizedTimeSeries) GetHistograms() []Histogram {
return nil return nil
} }
type MinimizedTimeSeriesLen struct {
// Sorted list of label name-value pair references, encoded as 32bit uint. This
// list's real len is always multiple of 2, label name offset/label value offset.
// Offsets point to the symbol table in the higher level MinimizedWriteRequestLen.
LabelSymbols []uint32 `protobuf:"fixed32,1,rep,packed,name=label_symbols,json=labelSymbols,proto3" json:"label_symbols,omitempty"`
// Sorted by time, oldest sample first.
// TODO: support references for other types
Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"`
Histograms []Histogram `protobuf:"bytes,4,rep,name=histograms,proto3" json:"histograms"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MinimizedTimeSeriesLen) Reset() { *m = MinimizedTimeSeriesLen{} }
func (m *MinimizedTimeSeriesLen) String() string { return proto.CompactTextString(m) }
func (*MinimizedTimeSeriesLen) ProtoMessage() {}
func (*MinimizedTimeSeriesLen) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{7}
}
func (m *MinimizedTimeSeriesLen) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MinimizedTimeSeriesLen) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MinimizedTimeSeriesLen.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *MinimizedTimeSeriesLen) XXX_Merge(src proto.Message) {
xxx_messageInfo_MinimizedTimeSeriesLen.Merge(m, src)
}
func (m *MinimizedTimeSeriesLen) XXX_Size() int {
return m.Size()
}
func (m *MinimizedTimeSeriesLen) XXX_DiscardUnknown() {
xxx_messageInfo_MinimizedTimeSeriesLen.DiscardUnknown(m)
}
var xxx_messageInfo_MinimizedTimeSeriesLen proto.InternalMessageInfo
func (m *MinimizedTimeSeriesLen) GetLabelSymbols() []uint32 {
if m != nil {
return m.LabelSymbols
}
return nil
}
func (m *MinimizedTimeSeriesLen) GetSamples() []Sample {
if m != nil {
return m.Samples
}
return nil
}
func (m *MinimizedTimeSeriesLen) GetExemplars() []Exemplar {
if m != nil {
return m.Exemplars
}
return nil
}
func (m *MinimizedTimeSeriesLen) GetHistograms() []Histogram {
if m != nil {
return m.Histograms
}
return nil
}
type Label struct { type Label struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
@ -820,7 +897,7 @@ func (m *Label) Reset() { *m = Label{} }
func (m *Label) String() string { return proto.CompactTextString(m) } func (m *Label) String() string { return proto.CompactTextString(m) }
func (*Label) ProtoMessage() {} func (*Label) ProtoMessage() {}
func (*Label) Descriptor() ([]byte, []int) { func (*Label) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{7} return fileDescriptor_d938547f84707355, []int{8}
} }
func (m *Label) XXX_Unmarshal(b []byte) error { func (m *Label) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -874,7 +951,7 @@ func (m *Labels) Reset() { *m = Labels{} }
func (m *Labels) String() string { return proto.CompactTextString(m) } func (m *Labels) String() string { return proto.CompactTextString(m) }
func (*Labels) ProtoMessage() {} func (*Labels) ProtoMessage() {}
func (*Labels) Descriptor() ([]byte, []int) { func (*Labels) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{8} return fileDescriptor_d938547f84707355, []int{9}
} }
func (m *Labels) XXX_Unmarshal(b []byte) error { func (m *Labels) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -924,7 +1001,7 @@ func (m *LabelMatcher) Reset() { *m = LabelMatcher{} }
func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } func (m *LabelMatcher) String() string { return proto.CompactTextString(m) }
func (*LabelMatcher) ProtoMessage() {} func (*LabelMatcher) ProtoMessage() {}
func (*LabelMatcher) Descriptor() ([]byte, []int) { func (*LabelMatcher) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{9} return fileDescriptor_d938547f84707355, []int{10}
} }
func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { func (m *LabelMatcher) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -991,7 +1068,7 @@ func (m *ReadHints) Reset() { *m = ReadHints{} }
func (m *ReadHints) String() string { return proto.CompactTextString(m) } func (m *ReadHints) String() string { return proto.CompactTextString(m) }
func (*ReadHints) ProtoMessage() {} func (*ReadHints) ProtoMessage() {}
func (*ReadHints) Descriptor() ([]byte, []int) { func (*ReadHints) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{10} return fileDescriptor_d938547f84707355, []int{11}
} }
func (m *ReadHints) XXX_Unmarshal(b []byte) error { func (m *ReadHints) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -1085,7 +1162,7 @@ func (m *Chunk) Reset() { *m = Chunk{} }
func (m *Chunk) String() string { return proto.CompactTextString(m) } func (m *Chunk) String() string { return proto.CompactTextString(m) }
func (*Chunk) ProtoMessage() {} func (*Chunk) ProtoMessage() {}
func (*Chunk) Descriptor() ([]byte, []int) { func (*Chunk) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{11} return fileDescriptor_d938547f84707355, []int{12}
} }
func (m *Chunk) XXX_Unmarshal(b []byte) error { func (m *Chunk) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -1157,7 +1234,7 @@ func (m *ChunkedSeries) Reset() { *m = ChunkedSeries{} }
func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) } func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) }
func (*ChunkedSeries) ProtoMessage() {} func (*ChunkedSeries) ProtoMessage() {}
func (*ChunkedSeries) Descriptor() ([]byte, []int) { func (*ChunkedSeries) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{12} return fileDescriptor_d938547f84707355, []int{13}
} }
func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error { func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -1212,6 +1289,7 @@ func init() {
proto.RegisterType((*BucketSpan)(nil), "prometheus.BucketSpan") proto.RegisterType((*BucketSpan)(nil), "prometheus.BucketSpan")
proto.RegisterType((*TimeSeries)(nil), "prometheus.TimeSeries") proto.RegisterType((*TimeSeries)(nil), "prometheus.TimeSeries")
proto.RegisterType((*MinimizedTimeSeries)(nil), "prometheus.MinimizedTimeSeries") proto.RegisterType((*MinimizedTimeSeries)(nil), "prometheus.MinimizedTimeSeries")
proto.RegisterType((*MinimizedTimeSeriesLen)(nil), "prometheus.MinimizedTimeSeriesLen")
proto.RegisterType((*Label)(nil), "prometheus.Label") proto.RegisterType((*Label)(nil), "prometheus.Label")
proto.RegisterType((*Labels)(nil), "prometheus.Labels") proto.RegisterType((*Labels)(nil), "prometheus.Labels")
proto.RegisterType((*LabelMatcher)(nil), "prometheus.LabelMatcher") proto.RegisterType((*LabelMatcher)(nil), "prometheus.LabelMatcher")
@ -1223,78 +1301,79 @@ func init() {
func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) }
var fileDescriptor_d938547f84707355 = []byte{ var fileDescriptor_d938547f84707355 = []byte{
// 1129 bytes of a gzipped FileDescriptorProto // 1150 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x56, 0xdd, 0x8e, 0xdb, 0x44, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x56, 0xcb, 0x6e, 0xdb, 0x46,
0x14, 0x5e, 0xdb, 0x89, 0x13, 0x9f, 0xfc, 0xd4, 0x3b, 0xdd, 0x16, 0x53, 0xd1, 0x6d, 0xb0, 0x54, 0x17, 0x36, 0x49, 0x89, 0x12, 0x8f, 0x2e, 0xa1, 0x27, 0x8e, 0x7f, 0xfe, 0x46, 0xe3, 0xa8, 0x2c,
0x08, 0x08, 0x65, 0xd5, 0x85, 0x0b, 0x2a, 0x2a, 0xa4, 0xdd, 0x6d, 0xf6, 0x47, 0xac, 0x13, 0x75, 0xd2, 0xaa, 0x45, 0x21, 0x23, 0x6e, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x1d, 0xf9, 0x82, 0x9a, 0x12,
0x92, 0x15, 0x94, 0x1b, 0x6b, 0x92, 0xcc, 0x26, 0x56, 0xfd, 0x87, 0x67, 0x52, 0x6d, 0xfa, 0x1e, 0x32, 0x92, 0xd1, 0xa6, 0x1b, 0x61, 0x24, 0x8d, 0x25, 0x22, 0xbc, 0x95, 0x33, 0x0a, 0xac, 0xbc,
0xdc, 0xf1, 0x12, 0xbc, 0x45, 0x25, 0x6e, 0xe0, 0x05, 0x10, 0xda, 0x2b, 0x24, 0x5e, 0x02, 0xcd, 0x47, 0x77, 0x7d, 0x89, 0xbe, 0x45, 0x80, 0x6e, 0xda, 0x17, 0x28, 0x0a, 0xaf, 0x02, 0xf4, 0x25,
0xd8, 0x8e, 0x9d, 0x6e, 0x91, 0x28, 0x77, 0xdc, 0xcd, 0xf9, 0xce, 0xdf, 0x37, 0x73, 0xe6, 0xcc, 0x8a, 0x19, 0x92, 0x22, 0x15, 0xbb, 0x40, 0xd3, 0x5d, 0x76, 0x73, 0xbe, 0x73, 0xfb, 0xce, 0xcc,
0x19, 0x68, 0xf0, 0x55, 0x4c, 0x59, 0x2f, 0x4e, 0x22, 0x1e, 0x21, 0x88, 0x93, 0x28, 0xa0, 0x7c, 0x99, 0x39, 0x03, 0x35, 0xbe, 0x8c, 0x28, 0xeb, 0x44, 0x71, 0xc8, 0x43, 0x04, 0x51, 0x1c, 0xfa,
0x41, 0x97, 0xec, 0xde, 0xce, 0x3c, 0x9a, 0x47, 0x12, 0xde, 0x13, 0xab, 0xd4, 0xc2, 0xfe, 0x49, 0x94, 0xcf, 0xe9, 0x82, 0xed, 0x6c, 0xcd, 0xc2, 0x59, 0x28, 0xe1, 0x3d, 0xb1, 0x4a, 0x2c, 0xec,
0x85, 0xb6, 0x43, 0x79, 0xe2, 0x4d, 0x1d, 0xca, 0xc9, 0x8c, 0x70, 0x82, 0x1e, 0x43, 0x45, 0xc4, 0x9f, 0x55, 0x68, 0x3a, 0x94, 0xc7, 0xee, 0xc4, 0xa1, 0x9c, 0x4c, 0x09, 0x27, 0xe8, 0x31, 0x94,
0xb0, 0x94, 0x8e, 0xd2, 0x6d, 0xef, 0x3f, 0xec, 0x15, 0x31, 0x7a, 0x9b, 0x96, 0x99, 0x38, 0x5e, 0x44, 0x0c, 0x4b, 0x69, 0x29, 0xed, 0xe6, 0xfe, 0xc3, 0x4e, 0x1e, 0xa3, 0xb3, 0x6e, 0x99, 0x8a,
0xc5, 0x14, 0x4b, 0x17, 0xf4, 0x19, 0xa0, 0x40, 0x62, 0xee, 0x25, 0x09, 0x3c, 0x7f, 0xe5, 0x86, 0xc3, 0x65, 0x44, 0xb1, 0x74, 0x41, 0x9f, 0x03, 0xf2, 0x25, 0x36, 0xba, 0x24, 0xbe, 0xeb, 0x2d,
0x24, 0xa0, 0x96, 0xda, 0x51, 0xba, 0x06, 0x36, 0x53, 0xcd, 0xb1, 0x54, 0x0c, 0x48, 0x40, 0x11, 0x47, 0x01, 0xf1, 0xa9, 0xa5, 0xb6, 0x94, 0xb6, 0x81, 0xcd, 0x44, 0x73, 0x2c, 0x15, 0x3d, 0xe2,
0x82, 0xca, 0x82, 0xfa, 0xb1, 0x55, 0x91, 0x7a, 0xb9, 0x16, 0xd8, 0x32, 0xf4, 0xb8, 0x55, 0x4d, 0x53, 0x84, 0xa0, 0x34, 0xa7, 0x5e, 0x64, 0x95, 0xa4, 0x5e, 0xae, 0x05, 0xb6, 0x08, 0x5c, 0x6e,
0x31, 0xb1, 0xb6, 0x57, 0x00, 0x45, 0x26, 0xd4, 0x80, 0xda, 0xc5, 0xe0, 0x9b, 0xc1, 0xf0, 0xdb, 0x95, 0x13, 0x4c, 0xac, 0xed, 0x25, 0x40, 0x9e, 0x09, 0xd5, 0xa0, 0x72, 0xd1, 0xfb, 0xb6, 0xd7,
0x81, 0xb9, 0x25, 0x84, 0xa3, 0xe1, 0xc5, 0x60, 0xdc, 0xc7, 0xa6, 0x82, 0x0c, 0xa8, 0x9e, 0x1c, 0xff, 0xae, 0x67, 0x6e, 0x08, 0xe1, 0xa8, 0x7f, 0xd1, 0x1b, 0x76, 0xb1, 0xa9, 0x20, 0x03, 0xca,
0x5c, 0x9c, 0xf4, 0x4d, 0x15, 0xb5, 0xc0, 0x38, 0x3d, 0x1b, 0x8d, 0x87, 0x27, 0xf8, 0xc0, 0x31, 0x27, 0x07, 0x17, 0x27, 0x5d, 0x53, 0x45, 0x0d, 0x30, 0x4e, 0xcf, 0x06, 0xc3, 0xfe, 0x09, 0x3e,
0x35, 0x84, 0xa0, 0x2d, 0x35, 0x05, 0x56, 0x11, 0xae, 0xa3, 0x0b, 0xc7, 0x39, 0xc0, 0xcf, 0xcd, 0x70, 0x4c, 0x0d, 0x21, 0x68, 0x4a, 0x4d, 0x8e, 0x95, 0x84, 0xeb, 0xe0, 0xc2, 0x71, 0x0e, 0xf0,
0x2a, 0xaa, 0x43, 0xe5, 0x6c, 0x70, 0x3c, 0x34, 0x75, 0xd4, 0x84, 0xfa, 0x68, 0x7c, 0x30, 0xee, 0x73, 0xb3, 0x8c, 0xaa, 0x50, 0x3a, 0xeb, 0x1d, 0xf7, 0x4d, 0x1d, 0xd5, 0xa1, 0x3a, 0x18, 0x1e,
0x8f, 0xfa, 0x63, 0xb3, 0x66, 0x3f, 0x01, 0x7d, 0x44, 0x82, 0xd8, 0xa7, 0x68, 0x07, 0xaa, 0x2f, 0x0c, 0xbb, 0x83, 0xee, 0xd0, 0xac, 0xd8, 0x4f, 0x40, 0x1f, 0x10, 0x3f, 0xf2, 0x28, 0xda, 0x82,
0x89, 0xbf, 0x4c, 0x8f, 0x45, 0xc1, 0xa9, 0x80, 0x3e, 0x00, 0x83, 0x7b, 0x01, 0x65, 0x9c, 0x04, 0xf2, 0x4b, 0xe2, 0x2d, 0x92, 0x6d, 0x51, 0x70, 0x22, 0xa0, 0x0f, 0xc0, 0xe0, 0xae, 0x4f, 0x19,
0xb1, 0xdc, 0xa7, 0x86, 0x0b, 0xc0, 0x8e, 0xa0, 0xde, 0xbf, 0xa2, 0x41, 0xec, 0x93, 0x04, 0xed, 0x27, 0x7e, 0x24, 0xeb, 0xd4, 0x70, 0x0e, 0xd8, 0x21, 0x54, 0xbb, 0x57, 0xd4, 0x8f, 0x3c, 0x12,
0x81, 0xee, 0x93, 0x09, 0xf5, 0x99, 0xa5, 0x74, 0xb4, 0x6e, 0x63, 0x7f, 0xbb, 0x7c, 0xae, 0xe7, 0xa3, 0x3d, 0xd0, 0x3d, 0x32, 0xa6, 0x1e, 0xb3, 0x94, 0x96, 0xd6, 0xae, 0xed, 0x6f, 0x16, 0xf7,
0x42, 0x73, 0x58, 0x79, 0xfd, 0xfb, 0x83, 0x2d, 0x9c, 0x99, 0x15, 0x09, 0xd5, 0x7f, 0x4c, 0xa8, 0xf5, 0x5c, 0x68, 0x0e, 0x4b, 0xaf, 0xff, 0x78, 0xb0, 0x81, 0x53, 0xb3, 0x3c, 0xa1, 0xfa, 0x8f,
0xbd, 0x99, 0xf0, 0x97, 0x2a, 0x18, 0xa7, 0x1e, 0xe3, 0xd1, 0x3c, 0x21, 0x01, 0xba, 0x0f, 0xc6, 0x09, 0xb5, 0xb7, 0x13, 0xfe, 0x5a, 0x06, 0xe3, 0xd4, 0x65, 0x3c, 0x9c, 0xc5, 0xc4, 0x47, 0xf7,
0x34, 0x5a, 0x86, 0xdc, 0xf5, 0x42, 0x2e, 0x69, 0x57, 0x4e, 0xb7, 0x70, 0x5d, 0x42, 0x67, 0x21, 0xc1, 0x98, 0x84, 0x8b, 0x80, 0x8f, 0xdc, 0x80, 0x4b, 0xda, 0xa5, 0xd3, 0x0d, 0x5c, 0x95, 0xd0,
0x47, 0x1f, 0x42, 0x23, 0x55, 0x5f, 0xfa, 0x11, 0xe1, 0x69, 0x9a, 0xd3, 0x2d, 0x0c, 0x12, 0x3c, 0x59, 0xc0, 0xd1, 0x87, 0x50, 0x4b, 0xd4, 0x97, 0x5e, 0x48, 0x78, 0x92, 0xe6, 0x74, 0x03, 0x83,
0x16, 0x18, 0x32, 0x41, 0x63, 0xcb, 0x40, 0xe6, 0x51, 0xb0, 0x58, 0xa2, 0xbb, 0xa0, 0xb3, 0xe9, 0x04, 0x8f, 0x05, 0x86, 0x4c, 0xd0, 0xd8, 0xc2, 0x97, 0x79, 0x14, 0x2c, 0x96, 0x68, 0x1b, 0x74,
0x82, 0x06, 0x44, 0x56, 0x6d, 0x1b, 0x67, 0x12, 0x7a, 0x08, 0xed, 0x57, 0x34, 0x89, 0x5c, 0xbe, 0x36, 0x99, 0x53, 0x9f, 0xc8, 0x53, 0xdb, 0xc4, 0xa9, 0x84, 0x1e, 0x42, 0xf3, 0x15, 0x8d, 0xc3,
0x48, 0x28, 0x5b, 0x44, 0xfe, 0x4c, 0x56, 0x50, 0xc1, 0x2d, 0x81, 0x8e, 0x73, 0x10, 0x7d, 0x94, 0x11, 0x9f, 0xc7, 0x94, 0xcd, 0x43, 0x6f, 0x2a, 0x4f, 0x50, 0xc1, 0x0d, 0x81, 0x0e, 0x33, 0x10,
0x99, 0x15, 0xbc, 0x74, 0xc9, 0x4b, 0xc1, 0x4d, 0x81, 0x1f, 0xe5, 0xdc, 0x3e, 0x05, 0xb3, 0x64, 0x7d, 0x9c, 0x9a, 0xe5, 0xbc, 0x74, 0xc9, 0x4b, 0xc1, 0x75, 0x81, 0x1f, 0x65, 0xdc, 0x3e, 0x03,
0x97, 0x12, 0xac, 0x49, 0x82, 0x0a, 0x6e, 0xaf, 0x2d, 0x53, 0x92, 0x47, 0xd0, 0x0e, 0xe9, 0x9c, 0xb3, 0x60, 0x97, 0x10, 0xac, 0x48, 0x82, 0x0a, 0x6e, 0xae, 0x2c, 0x13, 0x92, 0x47, 0xd0, 0x0c,
0x70, 0xef, 0x25, 0x75, 0x59, 0x4c, 0x42, 0x66, 0xd5, 0xe5, 0x09, 0xdf, 0x2d, 0x9f, 0xf0, 0xe1, 0xe8, 0x8c, 0x70, 0xf7, 0x25, 0x1d, 0xb1, 0x88, 0x04, 0xcc, 0xaa, 0xca, 0x1d, 0xde, 0x2e, 0xee,
0x72, 0xfa, 0x82, 0xf2, 0x51, 0x4c, 0xc2, 0xec, 0x98, 0x5b, 0xb9, 0x8f, 0xc0, 0x18, 0xfa, 0x18, 0xf0, 0xe1, 0x62, 0xf2, 0x82, 0xf2, 0x41, 0x44, 0x82, 0x74, 0x9b, 0x1b, 0x99, 0x8f, 0xc0, 0x18,
0x6e, 0xad, 0x83, 0xcc, 0xa8, 0xcf, 0x09, 0xb3, 0x8c, 0x8e, 0xd6, 0x45, 0x78, 0x1d, 0xfb, 0xa9, 0xfa, 0x04, 0xee, 0xac, 0x82, 0x4c, 0xa9, 0xc7, 0x09, 0xb3, 0x8c, 0x96, 0xd6, 0x46, 0x78, 0x15,
0x44, 0x37, 0x0c, 0x25, 0x3b, 0x66, 0x41, 0x47, 0xeb, 0x2a, 0x85, 0xa1, 0xa4, 0xc6, 0x04, 0xad, 0xfb, 0xa9, 0x44, 0xd7, 0x0c, 0x25, 0x3b, 0x66, 0x41, 0x4b, 0x6b, 0x2b, 0xb9, 0xa1, 0xa4, 0xc6,
0x38, 0x62, 0x5e, 0x89, 0x56, 0xe3, 0xdf, 0xd0, 0xca, 0x7d, 0xd6, 0xb4, 0xd6, 0x41, 0x32, 0x5a, 0x04, 0xad, 0x28, 0x64, 0x6e, 0x81, 0x56, 0xed, 0xdf, 0xd0, 0xca, 0x7c, 0x56, 0xb4, 0x56, 0x41,
0xcd, 0x94, 0x56, 0x0e, 0x17, 0xb4, 0xd6, 0x86, 0x19, 0xad, 0x56, 0x4a, 0x2b, 0x87, 0x33, 0x5a, 0x52, 0x5a, 0xf5, 0x84, 0x56, 0x06, 0xe7, 0xb4, 0x56, 0x86, 0x29, 0xad, 0x46, 0x42, 0x2b, 0x83,
0x5f, 0x03, 0x24, 0x94, 0x51, 0xee, 0x2e, 0xc4, 0xe9, 0xb7, 0x65, 0x8f, 0x3f, 0x28, 0x53, 0x5a, 0x53, 0x5a, 0xdf, 0x00, 0xc4, 0x94, 0x51, 0x3e, 0x9a, 0x8b, 0xdd, 0x6f, 0xca, 0x3b, 0xfe, 0xa0,
0xdf, 0x9f, 0x1e, 0x16, 0x76, 0xa7, 0x5e, 0xc8, 0xb1, 0x91, 0xe4, 0xcb, 0xcd, 0x0b, 0x78, 0xeb, 0x48, 0x69, 0xd5, 0x3f, 0x1d, 0x2c, 0xec, 0x4e, 0xdd, 0x80, 0x63, 0x23, 0xce, 0x96, 0xeb, 0x0d,
0xcd, 0x0b, 0xf8, 0x05, 0x18, 0x6b, 0xaf, 0xcd, 0x4e, 0xad, 0x81, 0xf6, 0xbc, 0x3f, 0x32, 0x15, 0x78, 0xe7, 0xed, 0x06, 0xfc, 0x12, 0x8c, 0x95, 0xd7, 0xfa, 0x4d, 0xad, 0x80, 0xf6, 0xbc, 0x3b,
0xa4, 0x83, 0x3a, 0x18, 0x9a, 0x6a, 0xd1, 0xad, 0xda, 0x61, 0x0d, 0xaa, 0x92, 0xf3, 0x61, 0x13, 0x30, 0x15, 0xa4, 0x83, 0xda, 0xeb, 0x9b, 0x6a, 0x7e, 0x5b, 0xb5, 0xc3, 0x0a, 0x94, 0x25, 0xe7,
0xa0, 0x28, 0xbb, 0xfd, 0x04, 0xa0, 0x38, 0x1f, 0x71, 0xf3, 0xa2, 0xcb, 0x4b, 0x46, 0xd3, 0xab, 0xc3, 0x3a, 0x40, 0x7e, 0xec, 0xf6, 0x13, 0x80, 0x7c, 0x7f, 0x44, 0xe7, 0x85, 0x97, 0x97, 0x8c,
0xbc, 0x8d, 0x33, 0x49, 0xe0, 0x3e, 0x0d, 0xe7, 0x7c, 0x21, 0x6f, 0x70, 0x0b, 0x67, 0x92, 0xfd, 0x26, 0xad, 0xbc, 0x89, 0x53, 0x49, 0xe0, 0x1e, 0x0d, 0x66, 0x7c, 0x2e, 0x3b, 0xb8, 0x81, 0x53,
0xa7, 0x02, 0x30, 0xf6, 0x02, 0x3a, 0xa2, 0x89, 0x47, 0xd9, 0xbb, 0xf7, 0xdf, 0x3e, 0xd4, 0x98, 0xc9, 0x7e, 0xa3, 0x00, 0x0c, 0x5d, 0x9f, 0x0e, 0x68, 0xec, 0x52, 0xf6, 0xee, 0xf7, 0x6f, 0x1f,
0x6c, 0x7d, 0x66, 0xa9, 0xd2, 0x03, 0x95, 0x3d, 0xd2, 0x57, 0x21, 0x73, 0xc9, 0x0d, 0xd1, 0x97, 0x2a, 0x4c, 0x5e, 0x7d, 0x66, 0xa9, 0xd2, 0x03, 0x15, 0x3d, 0x92, 0x57, 0x21, 0x75, 0xc9, 0x0c,
0x60, 0xd0, 0xac, 0xe1, 0x99, 0xa5, 0x49, 0xaf, 0x9d, 0xb2, 0x57, 0xfe, 0x1a, 0x64, 0x7e, 0x85, 0xd1, 0x57, 0x60, 0xd0, 0xf4, 0xc2, 0x33, 0x4b, 0x93, 0x5e, 0x5b, 0x45, 0xaf, 0xec, 0x35, 0x48,
0x31, 0xfa, 0x0a, 0x60, 0x91, 0x1f, 0x3c, 0xb3, 0x2a, 0xd2, 0xf5, 0xce, 0x5b, 0xcb, 0x92, 0xf9, 0xfd, 0x72, 0x63, 0xf4, 0x35, 0xc0, 0x3c, 0xdb, 0x78, 0x66, 0x95, 0xa4, 0xeb, 0xbd, 0x5b, 0x8f,
0x96, 0xcc, 0xed, 0xbf, 0x14, 0xb8, 0xed, 0x78, 0xa1, 0x17, 0x78, 0xaf, 0xe8, 0xac, 0xb4, 0xe7, 0x25, 0xf5, 0x2d, 0x98, 0xdb, 0x7f, 0x29, 0x70, 0xd7, 0x71, 0x03, 0xd7, 0x77, 0x5f, 0xd1, 0x69,
0x4f, 0xa0, 0x25, 0x37, 0xe3, 0xb2, 0x55, 0x30, 0x89, 0xb2, 0xad, 0xb7, 0xb2, 0x00, 0x4d, 0xa9, 0xa1, 0xe6, 0x4f, 0xa1, 0x21, 0x8b, 0x19, 0xb1, 0xa5, 0x3f, 0x0e, 0xd3, 0xd2, 0x1b, 0x69, 0x80,
0x1a, 0xa5, 0x9a, 0xff, 0xd3, 0x6e, 0x1f, 0x41, 0x55, 0xd6, 0x4b, 0xcc, 0x0a, 0x39, 0x5f, 0x94, 0xba, 0x54, 0x0d, 0x12, 0xcd, 0xfb, 0x54, 0xed, 0x1b, 0x05, 0xb6, 0x6f, 0xa9, 0xf6, 0x9c, 0x06,
0x74, 0x56, 0x88, 0xf5, 0xe6, 0xab, 0x69, 0x64, 0xaf, 0xa6, 0xfd, 0x18, 0xf4, 0xf3, 0xb4, 0xaa, 0xe8, 0xa3, 0xdb, 0x0a, 0xae, 0xbc, 0xbf, 0xa5, 0x3e, 0x82, 0xb2, 0x6c, 0x4d, 0x31, 0x16, 0xe5,
0xef, 0x7a, 0x0d, 0xec, 0x1f, 0x15, 0x68, 0x4a, 0xdc, 0x21, 0x7c, 0xba, 0xa0, 0x09, 0x7a, 0xb4, 0x28, 0x55, 0x92, 0xb1, 0x28, 0xd6, 0xeb, 0x03, 0xc2, 0x48, 0x07, 0x84, 0xfd, 0x18, 0xf4, 0xf3,
0x31, 0x1e, 0xef, 0xdf, 0xf0, 0xcf, 0xec, 0x7a, 0xa5, 0xb1, 0x98, 0x13, 0x55, 0xdf, 0x46, 0x54, 0xa4, 0x81, 0xdf, 0xb5, 0xe3, 0xed, 0x9f, 0x14, 0xa8, 0x4b, 0xdc, 0x21, 0x7c, 0x32, 0xa7, 0x31,
0x2b, 0x13, 0xed, 0x42, 0x45, 0x0e, 0x39, 0x1d, 0xd4, 0xfe, 0xb3, 0xb4, 0x6b, 0x06, 0xfd, 0x67, 0x7a, 0xb4, 0xf6, 0x13, 0xb8, 0x7f, 0xc3, 0x3f, 0xb5, 0xeb, 0x14, 0x7e, 0x00, 0x19, 0x51, 0xf5,
0x69, 0xd7, 0x60, 0x31, 0xd8, 0x04, 0x80, 0xfb, 0xa6, 0x66, 0xff, 0xac, 0x88, 0x56, 0x23, 0x33, 0x36, 0xa2, 0x5a, 0x91, 0x68, 0x1b, 0x4a, 0x72, 0x9e, 0xeb, 0xa0, 0x76, 0x9f, 0x25, 0x0f, 0x44,
0xd1, 0x69, 0x0c, 0xbd, 0x07, 0x35, 0xc6, 0x69, 0xec, 0x06, 0x4c, 0xf2, 0xd2, 0xb0, 0x2e, 0x44, 0xaf, 0xfb, 0x2c, 0x79, 0x20, 0xb0, 0x98, 0xe1, 0x02, 0xc0, 0x5d, 0x53, 0xb3, 0x7f, 0x51, 0xc4,
0x87, 0x89, 0xd4, 0x97, 0xcb, 0x70, 0x9a, 0xa7, 0x16, 0x6b, 0xf4, 0x3e, 0xd4, 0x19, 0x27, 0x09, 0xab, 0x42, 0xa6, 0xe2, 0x51, 0x61, 0xe8, 0x7f, 0x50, 0x61, 0x9c, 0x46, 0x23, 0x9f, 0x49, 0x5e,
0x17, 0xd6, 0xe9, 0x08, 0xa9, 0x49, 0xd9, 0x61, 0xe8, 0x0e, 0xe8, 0x34, 0x9c, 0xb9, 0xb2, 0x28, 0x1a, 0xd6, 0x85, 0xe8, 0x30, 0x91, 0xfa, 0x72, 0x11, 0x4c, 0xb2, 0xd4, 0x62, 0x8d, 0xfe, 0x0f,
0x42, 0x51, 0xa5, 0xe1, 0xcc, 0x61, 0xe8, 0x1e, 0xd4, 0xe7, 0x49, 0xb4, 0x8c, 0xbd, 0x70, 0x6e, 0x55, 0xc6, 0x49, 0xcc, 0x85, 0x75, 0x32, 0x2d, 0x2b, 0x52, 0x76, 0x18, 0xba, 0x07, 0x3a, 0x0d,
0x55, 0x3b, 0x5a, 0xd7, 0xc0, 0x6b, 0x19, 0xb5, 0x41, 0x9d, 0xac, 0xe4, 0x33, 0x5e, 0xc7, 0xea, 0xa6, 0x23, 0x79, 0x28, 0x42, 0x51, 0xa6, 0xc1, 0xd4, 0x61, 0x68, 0x07, 0xaa, 0xb3, 0x38, 0x5c,
0x64, 0x25, 0xa2, 0x27, 0x24, 0x9c, 0x53, 0x11, 0xa4, 0x96, 0x46, 0x97, 0xb2, 0xc3, 0xec, 0xdf, 0x44, 0x6e, 0x30, 0xb3, 0xca, 0x2d, 0xad, 0x6d, 0xe0, 0x95, 0x8c, 0x9a, 0xa0, 0x8e, 0x97, 0x72,
0x14, 0xa8, 0x1e, 0x2d, 0x96, 0xe1, 0x0b, 0xb4, 0x0b, 0x8d, 0xc0, 0x0b, 0x5d, 0xf1, 0x70, 0x14, 0x62, 0x55, 0xb1, 0x3a, 0x5e, 0x8a, 0xe8, 0x31, 0x09, 0x66, 0x54, 0x04, 0xa9, 0x24, 0xd1, 0xa5,
0x9c, 0x8d, 0xc0, 0x0b, 0xc5, 0xed, 0x75, 0x98, 0xd4, 0x93, 0xab, 0xb5, 0x3e, 0x9b, 0xac, 0x01, 0xec, 0x30, 0xfb, 0x77, 0x05, 0xca, 0x47, 0xf3, 0x45, 0xf0, 0x02, 0xed, 0x42, 0xcd, 0x77, 0x83,
0xb9, 0xca, 0xf4, 0xbd, 0xac, 0x08, 0x9a, 0x2c, 0xc2, 0xbd, 0x72, 0x11, 0x64, 0x82, 0x5e, 0x3f, 0x91, 0x78, 0x23, 0x73, 0xce, 0x86, 0xef, 0x06, 0xa2, 0x75, 0x1d, 0x26, 0xf5, 0xe4, 0x6a, 0xa5,
0x9c, 0x46, 0x33, 0x2f, 0x9c, 0x17, 0x15, 0x10, 0x3f, 0x16, 0xb9, 0xab, 0x26, 0x96, 0x6b, 0xfb, 0x4f, 0x3f, 0x11, 0x3e, 0xb9, 0x4a, 0xf5, 0x9d, 0xf4, 0x10, 0x34, 0x79, 0x08, 0x3b, 0xc5, 0x43,
0x29, 0xd4, 0x73, 0xab, 0x1b, 0x4f, 0xd5, 0x77, 0x43, 0xf1, 0xa1, 0xd8, 0xf8, 0x45, 0xa8, 0xe8, 0x90, 0x09, 0x3a, 0xdd, 0x60, 0x12, 0x4e, 0xdd, 0x60, 0x96, 0x9f, 0x80, 0xf8, 0x9c, 0xc9, 0xaa,
0x36, 0xdc, 0x3a, 0x3e, 0x1f, 0x1e, 0x8c, 0xdd, 0xd2, 0xd7, 0xc2, 0xfe, 0x01, 0x5a, 0x32, 0x23, 0xea, 0x58, 0xae, 0xed, 0xa7, 0x50, 0xcd, 0xac, 0x6e, 0xbc, 0xca, 0xdf, 0xf7, 0xc5, 0xdf, 0x69,
0x9d, 0xfd, 0xd7, 0x87, 0x66, 0x0f, 0xf4, 0xa9, 0x88, 0x90, 0x77, 0xde, 0xf6, 0x8d, 0xdd, 0xe4, 0xed, 0xc3, 0xa4, 0xa2, 0xbb, 0x70, 0xe7, 0xf8, 0xbc, 0x7f, 0x30, 0x1c, 0x15, 0x7e, 0x51, 0xf6,
0x0e, 0xa9, 0xd9, 0xe1, 0xce, 0xeb, 0xeb, 0x5d, 0xe5, 0xd7, 0xeb, 0x5d, 0xe5, 0x8f, 0xeb, 0x5d, 0x8f, 0xd0, 0x90, 0x19, 0xe9, 0xf4, 0xbf, 0xbe, 0xa9, 0x7b, 0xa0, 0x4f, 0x44, 0x84, 0xec, 0xe6,
0xe5, 0x7b, 0x5d, 0x58, 0xc7, 0x93, 0x89, 0x2e, 0x3f, 0x74, 0x9f, 0xff, 0x1d, 0x00, 0x00, 0xff, 0x6d, 0xde, 0xa8, 0x26, 0x73, 0x48, 0xcc, 0x0e, 0xb7, 0x5e, 0x5f, 0xef, 0x2a, 0xbf, 0x5d, 0xef,
0xff, 0x7c, 0x73, 0x0c, 0xa7, 0x01, 0x0a, 0x00, 0x00, 0x2a, 0x7f, 0x5e, 0xef, 0x2a, 0x3f, 0xe8, 0xc2, 0x3a, 0x1a, 0x8f, 0x75, 0xf9, 0x77, 0xfd, 0xe2,
0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x89, 0xa1, 0xff, 0x4a, 0xec, 0x0a, 0x00, 0x00,
} }
func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { func (m *MetricMetadata) Marshal() (dAtA []byte, err error) {
@ -1855,6 +1934,84 @@ func (m *MinimizedTimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil return len(dAtA) - i, nil
} }
func (m *MinimizedTimeSeriesLen) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MinimizedTimeSeriesLen) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *MinimizedTimeSeriesLen) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Histograms) > 0 {
for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
}
}
if len(m.Exemplars) > 0 {
for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if len(m.Samples) > 0 {
for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if len(m.LabelSymbols) > 0 {
for iNdEx := len(m.LabelSymbols) - 1; iNdEx >= 0; iNdEx-- {
i -= 4
encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.LabelSymbols[iNdEx]))
}
i = encodeVarintTypes(dAtA, i, uint64(len(m.LabelSymbols)*4))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *Label) Marshal() (dAtA []byte, err error) { func (m *Label) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)
@ -2432,6 +2589,39 @@ func (m *MinimizedTimeSeries) Size() (n int) {
return n return n
} }
func (m *MinimizedTimeSeriesLen) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.LabelSymbols) > 0 {
n += 1 + sovTypes(uint64(len(m.LabelSymbols)*4)) + len(m.LabelSymbols)*4
}
if len(m.Samples) > 0 {
for _, e := range m.Samples {
l = e.Size()
n += 1 + l + sovTypes(uint64(l))
}
}
if len(m.Exemplars) > 0 {
for _, e := range m.Exemplars {
l = e.Size()
n += 1 + l + sovTypes(uint64(l))
}
}
if len(m.Histograms) > 0 {
for _, e := range m.Histograms {
l = e.Size()
n += 1 + l + sovTypes(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Label) Size() (n int) { func (m *Label) Size() (n int) {
if m == nil { if m == nil {
return 0 return 0
@ -3980,6 +4170,211 @@ func (m *MinimizedTimeSeries) Unmarshal(dAtA []byte) error {
} }
return nil return nil
} }
func (m *MinimizedTimeSeriesLen) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MinimizedTimeSeriesLen: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MinimizedTimeSeriesLen: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType == 5 {
var v uint32
if (iNdEx + 4) > l {
return io.ErrUnexpectedEOF
}
v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:]))
iNdEx += 4
m.LabelSymbols = append(m.LabelSymbols, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
elementCount = packedLen / 4
if elementCount != 0 && len(m.LabelSymbols) == 0 {
m.LabelSymbols = make([]uint32, 0, elementCount)
}
for iNdEx < postIndex {
var v uint32
if (iNdEx + 4) > l {
return io.ErrUnexpectedEOF
}
v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:]))
iNdEx += 4
m.LabelSymbols = append(m.LabelSymbols, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field LabelSymbols", wireType)
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Samples = append(m.Samples, Sample{})
if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Exemplars = append(m.Exemplars, Exemplar{})
if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Histograms = append(m.Histograms, Histogram{})
if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Label) Unmarshal(dAtA []byte) error { func (m *Label) Unmarshal(dAtA []byte) error {
l := len(dAtA) l := len(dAtA)
iNdEx := 0 iNdEx := 0

View file

@ -134,6 +134,7 @@ message TimeSeries {
message MinimizedTimeSeries { message MinimizedTimeSeries {
// Sorted list of label name-value pair references. This list's len is always multiple of 4, // Sorted list of label name-value pair references. This list's len is always multiple of 4,
// packing tuples of (label name offset, label name length, label value offset, label value length). // packing tuples of (label name offset, label name length, label value offset, label value length).
// Offsets point to the symbol table in the higher level MinimizedWriteRequestLen.
repeated uint32 label_symbols = 1 [(gogoproto.nullable) = false]; repeated uint32 label_symbols = 1 [(gogoproto.nullable) = false];
// Sorted by time, oldest sample first. // Sorted by time, oldest sample first.
@ -144,6 +145,20 @@ message MinimizedTimeSeries {
// TODO: add metadata // TODO: add metadata
} }
message MinimizedTimeSeriesLen {
// Sorted list of label name-value pair references, encoded as 32bit uint. This
// list's real len is always multiple of 2, label name offset/label value offset.
// Offsets point to the symbol table in the higher level MinimizedWriteRequestLen.
repeated fixed32 label_symbols = 1;
// Sorted by time, oldest sample first.
// TODO: support references for other types
repeated Sample samples = 2 [(gogoproto.nullable) = false];
repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
// TODO: add metadata
}
message Label { message Label {
string name = 1; string name = 1;
string value = 2; string value = 2;

View file

@ -10,10 +10,10 @@ if ! [[ "$0" =~ "scripts/genproto.sh" ]]; then
exit 255 exit 255
fi fi
if ! [[ $(protoc --version) =~ "3.21.12" ]]; then #if ! [[ $(protoc --version) =~ "3.21.12" ]]; then
echo "could not find protoc 3.21.12, is it installed + in PATH?" # echo "could not find protoc 3.21.12, is it installed + in PATH?"
exit 255 # exit 255
fi #fi
# Since we run go install, go mod download, the go.sum will change. # Since we run go install, go mod download, the go.sum will change.
# Make a backup. # Make a backup.

View file

@ -7,7 +7,8 @@ trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT
declare -a INSTANCES declare -a INSTANCES
# (sender,receiver) pairs to run: (sender_name; sender_flags; receiver_name; receiver_flags) # (sender,receiver) pairs to run: (sender_name; sender_flags; receiver_name; receiver_flags)
INSTANCES+=('sender-v1;;receiver-v1;') INSTANCES+=('sender-v1;;receiver-v1;')
INSTANCES+=('sender-v11;--enable-feature rw-1-1-sender;receiver-v11;--enable-feature rw-1-1-receiver') INSTANCES+=('sender-v11-min32-optimized-varint;--remote-write-format 1;receiver-v11-min32-optimized-varint;--remote-write-format 1')
INSTANCES+=('sender-v11-min-len;--remote-write-format 2;receiver-v11-min-len;--remote-write-format 2')
# ~~~~~~~~~~~~~ # ~~~~~~~~~~~~~

View file

@ -1,5 +1,5 @@
global: global:
scrape_interval: 15s scrape_interval: 5s
external_labels: external_labels:
role: ${SENDER_NAME} role: ${SENDER_NAME}
@ -8,6 +8,8 @@ remote_write:
name: ${RECEIVER_NAME} name: ${RECEIVER_NAME}
metadata_config: metadata_config:
send: false send: false
queue_config:
max_samples_per_send: 5000
scrape_configs: scrape_configs:
${SCRAPE_CONFIGS} ${SCRAPE_CONFIGS}

View file

@ -81,11 +81,11 @@ func init() {
// Client allows reading and writing from/to a remote HTTP endpoint. // Client allows reading and writing from/to a remote HTTP endpoint.
type Client struct { type Client struct {
remoteName string // Used to differentiate clients in metrics. remoteName string // Used to differentiate clients in metrics.
urlString string // url.String() urlString string // url.String()
remoteWrite11 bool // For write clients, ignored for read clients. rwFormat RemoteWriteFormat // For write clients, ignored for read clients.
Client *http.Client Client *http.Client
timeout time.Duration timeout time.Duration
retryOnRateLimit bool retryOnRateLimit bool
@ -96,14 +96,14 @@ type Client struct {
// ClientConfig configures a client. // ClientConfig configures a client.
type ClientConfig struct { type ClientConfig struct {
URL *config_util.URL URL *config_util.URL
RemoteWrite11 bool RemoteWriteFormat RemoteWriteFormat
Timeout model.Duration Timeout model.Duration
HTTPClientConfig config_util.HTTPClientConfig HTTPClientConfig config_util.HTTPClientConfig
SigV4Config *sigv4.SigV4Config SigV4Config *sigv4.SigV4Config
AzureADConfig *azuread.AzureADConfig AzureADConfig *azuread.AzureADConfig
Headers map[string]string Headers map[string]string
RetryOnRateLimit bool RetryOnRateLimit bool
} }
// ReadClient uses the SAMPLES method of remote read to read series samples from remote server. // ReadClient uses the SAMPLES method of remote read to read series samples from remote server.
@ -165,7 +165,7 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
httpClient.Transport = otelhttp.NewTransport(t) httpClient.Transport = otelhttp.NewTransport(t)
return &Client{ return &Client{
remoteWrite11: conf.RemoteWrite11, rwFormat: conf.RemoteWriteFormat,
remoteName: name, remoteName: name,
urlString: conf.URL.String(), urlString: conf.URL.String(),
Client: httpClient, Client: httpClient,
@ -211,11 +211,11 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
httpReq.Header.Set("Content-Type", "application/x-protobuf") httpReq.Header.Set("Content-Type", "application/x-protobuf")
httpReq.Header.Set("User-Agent", UserAgent) httpReq.Header.Set("User-Agent", UserAgent)
// Set the right header if we're using v1.1 remote write protocol if c.rwFormat == Base1 {
if c.remoteWrite11 {
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
} else {
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion1HeaderValue) httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion1HeaderValue)
} else {
// Set the right header if we're using v1.1 remote write protocol
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
} }
if attempt > 0 { if attempt > 0 {

View file

@ -15,6 +15,7 @@ package remote
import ( import (
"compress/gzip" "compress/gzip"
"encoding/binary"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -23,6 +24,7 @@ import (
"sort" "sort"
"strings" "strings"
"sync" "sync"
"unsafe"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/golang/snappy" "github.com/golang/snappy"
@ -752,15 +754,6 @@ func spansToSpansProto(s []histogram.Span) []prompb.BucketSpan {
return spans return spans
} }
// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric
func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
metric := make(model.Metric, len(labelPairs))
for _, l := range labelPairs {
metric[model.LabelName(l.Name)] = model.LabelValue(l.Value)
}
return metric
}
func labelProtosToLabels(labelPairs []prompb.Label) labels.Labels { func labelProtosToLabels(labelPairs []prompb.Label) labels.Labels {
b := labels.ScratchBuilder{} b := labels.ScratchBuilder{}
for _, l := range labelPairs { for _, l := range labelPairs {
@ -794,6 +787,44 @@ func labelsToUint32Slice(lbls labels.Labels, symbolTable *rwSymbolTable, buf []u
return result return result
} }
func labelsToUint32SliceLen(lbls labels.Labels, symbolTable *rwSymbolTable, buf []uint32) []uint32 {
result := buf[:0]
lbls.Range(func(l labels.Label) {
off := symbolTable.RefLen(l.Name)
result = append(result, off)
off = symbolTable.RefLen(l.Value)
result = append(result, off)
})
return result
}
func Uint32LenRefToLabels(symbols []byte, minLabels []uint32) labels.Labels {
ls := labels.NewScratchBuilder(len(minLabels) / 2)
labelIdx := 0
for labelIdx < len(minLabels) {
// todo, check for overflow?
offset := minLabels[labelIdx]
labelIdx++
length, n := binary.Uvarint(symbols[offset:])
offset += uint32(n)
name := symbols[offset : uint64(offset)+length]
offset = minLabels[labelIdx]
labelIdx++
length, n = binary.Uvarint(symbols[offset:])
offset += uint32(n)
value := symbols[offset : uint64(offset)+length]
ls.Add(yoloString(name), yoloString(value))
}
return ls.Labels()
}
func yoloString(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
func Uint32RefToLabels(symbols string, minLabels []uint32) labels.Labels { func Uint32RefToLabels(symbols string, minLabels []uint32) labels.Labels {
ls := labels.NewScratchBuilder(len(minLabels) / 2) ls := labels.NewScratchBuilder(len(minLabels) / 2)
@ -923,10 +954,29 @@ func DecodeMinimizedWriteRequest(r io.Reader) (*prompb.MinimizedWriteRequest, er
return &req, nil return &req, nil
} }
func DecodeMinimizedWriteRequestLen(r io.Reader) (*prompb.MinimizedWriteRequestLen, error) {
compressed, err := io.ReadAll(r)
if err != nil {
return nil, err
}
reqBuf, err := snappy.Decode(nil, compressed)
if err != nil {
return nil, err
}
var req prompb.MinimizedWriteRequestLen
if err := proto.Unmarshal(reqBuf, &req); err != nil {
return nil, err
}
return &req, nil
}
func MinimizedWriteRequestToWriteRequest(redReq *prompb.MinimizedWriteRequest) (*prompb.WriteRequest, error) { func MinimizedWriteRequestToWriteRequest(redReq *prompb.MinimizedWriteRequest) (*prompb.WriteRequest, error) {
req := &prompb.WriteRequest{ req := &prompb.WriteRequest{
Timeseries: make([]prompb.TimeSeries, len(redReq.Timeseries)), Timeseries: make([]prompb.TimeSeries, len(redReq.Timeseries)),
//Metadata: redReq.Metadata, // TODO handle metadata?
} }
for i, rts := range redReq.Timeseries { for i, rts := range redReq.Timeseries {
@ -951,12 +1001,3 @@ func MinimizedWriteRequestToWriteRequest(redReq *prompb.MinimizedWriteRequest) (
} }
return req, nil return req, nil
} }
// for use with minimized remote write proto format
func packRef(offset, length int) uint32 {
return uint32((offset&0xFFFFF)<<12 | (length & 0xFFF))
}
func unpackRef(ref uint32) (offset, length int) {
return int(ref>>12) & 0xFFFFF, int(ref & 0xFFF)
}

View file

@ -77,7 +77,7 @@ var writeRequestFixture = &prompb.WriteRequest{
// writeRequestMinimizedFixture represents the same request as writeRequestFixture, but using the minimized representation. // writeRequestMinimizedFixture represents the same request as writeRequestFixture, but using the minimized representation.
var writeRequestMinimizedFixture = func() *prompb.MinimizedWriteRequest { var writeRequestMinimizedFixture = func() *prompb.MinimizedWriteRequest {
st := newRwSymbolTable() st := newRwSymbolTable()
labels := []uint32{} var labels []uint32
for _, s := range []string{ for _, s := range []string{
"__name__", "test_metric1", "__name__", "test_metric1",
"b", "c", "b", "c",
@ -85,8 +85,8 @@ var writeRequestMinimizedFixture = func() *prompb.MinimizedWriteRequest {
"d", "e", "d", "e",
"foo", "bar", "foo", "bar",
} { } {
off, len := st.Ref(s) off, length := st.Ref(s)
labels = append(labels, off, len) labels = append(labels, off, length)
} }
return &prompb.MinimizedWriteRequest{ return &prompb.MinimizedWriteRequest{
Timeseries: []prompb.MinimizedTimeSeries{ Timeseries: []prompb.MinimizedTimeSeries{
@ -568,13 +568,6 @@ func TestDecodeMinWriteRequest(t *testing.T) {
require.Equal(t, writeRequestMinimizedFixture, actual) require.Equal(t, writeRequestMinimizedFixture, actual)
} }
func TestMinimizedWriteRequestToWriteRequest(t *testing.T) {
actual, err := MinimizedWriteRequestToWriteRequest(writeRequestMinimizedFixture)
require.NoError(t, err)
require.Equal(t, writeRequestFixture, actual)
}
func TestNilHistogramProto(t *testing.T) { func TestNilHistogramProto(t *testing.T) {
// This function will panic if it impromperly handles nil // This function will panic if it impromperly handles nil
// values, causing the test to fail. // values, causing the test to fail.
@ -893,3 +886,11 @@ func (c *mockChunkIterator) Next() bool {
func (c *mockChunkIterator) Err() error { func (c *mockChunkIterator) Err() error {
return nil return nil
} }
func TestLenFormat(t *testing.T) {
r := newRwSymbolTable()
ls := labels.FromStrings("asdf", "qwer", "zxcv", "1234")
encoded := labelsToUint32SliceLen(ls, &r, nil)
decoded := Uint32LenRefToLabels(r.LabelsData(), encoded)
require.Equal(t, ls, decoded)
}

View file

@ -15,6 +15,7 @@ package remote
import ( import (
"context" "context"
"encoding/binary"
"errors" "errors"
"math" "math"
"strconv" "strconv"
@ -25,7 +26,6 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/golang/snappy" "github.com/golang/snappy"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -389,6 +389,14 @@ type WriteClient interface {
Endpoint() string Endpoint() string
} }
type RemoteWriteFormat int64
const (
Base1 RemoteWriteFormat = iota // original map based format
Min32Optimized // two 32bit varint plus marshalling optimization
MinLen // symbols are now just offsets, and we encode lengths as varints in the large symbols string (which is also now a byte slice)
)
// QueueManager manages a queue of samples to be sent to the Storage // QueueManager manages a queue of samples to be sent to the Storage
// indicated by the provided WriteClient. Implements writeTo interface // indicated by the provided WriteClient. Implements writeTo interface
// used by WAL Watcher. // used by WAL Watcher.
@ -406,7 +414,7 @@ type QueueManager struct {
watcher *wlog.Watcher watcher *wlog.Watcher
metadataWatcher *MetadataWatcher metadataWatcher *MetadataWatcher
// experimental feature, new remote write proto format // experimental feature, new remote write proto format
internFormat bool rwFormat RemoteWriteFormat
clientMtx sync.RWMutex clientMtx sync.RWMutex
storeClient WriteClient storeClient WriteClient
@ -454,7 +462,7 @@ func NewQueueManager(
sm ReadyScrapeManager, sm ReadyScrapeManager,
enableExemplarRemoteWrite bool, enableExemplarRemoteWrite bool,
enableNativeHistogramRemoteWrite bool, enableNativeHistogramRemoteWrite bool,
internFormat bool, rwFormat RemoteWriteFormat,
) *QueueManager { ) *QueueManager {
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
@ -477,7 +485,9 @@ func NewQueueManager(
storeClient: client, storeClient: client,
sendExemplars: enableExemplarRemoteWrite, sendExemplars: enableExemplarRemoteWrite,
sendNativeHistograms: enableNativeHistogramRemoteWrite, sendNativeHistograms: enableNativeHistogramRemoteWrite,
internFormat: internFormat, // TODO: we should eventually set the format via content negotiation,
// so this field would be the desired format, maybe with a fallback?
rwFormat: rwFormat,
seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels), seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels),
seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int), seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int),
@ -1276,7 +1286,6 @@ func (q *queue) Chan() <-chan []timeSeries {
func (q *queue) Batch() []timeSeries { func (q *queue) Batch() []timeSeries {
q.batchMtx.Lock() q.batchMtx.Lock()
defer q.batchMtx.Unlock() defer q.batchMtx.Unlock()
select { select {
case batch := <-q.batchQueue: case batch := <-q.batchQueue:
return batch return batch
@ -1363,6 +1372,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
max += int(float64(max) * 0.1) max += int(float64(max) * 0.1)
} }
// TODO we should make an interface for the timeseries type
batchQueue := queue.Chan() batchQueue := queue.Chan()
pendingData := make([]prompb.TimeSeries, max) pendingData := make([]prompb.TimeSeries, max)
for i := range pendingData { for i := range pendingData {
@ -1377,6 +1387,11 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
pendingMinimizedData[i].Samples = []prompb.Sample{{}} pendingMinimizedData[i].Samples = []prompb.Sample{{}}
} }
pendingMinLenData := make([]prompb.MinimizedTimeSeriesLen, max)
for i := range pendingMinLenData {
pendingMinLenData[i].Samples = []prompb.Sample{{}}
}
timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline)) timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline))
stop := func() { stop := func() {
if !timer.Stop() { if !timer.Stop() {
@ -1411,17 +1426,23 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
if !ok { if !ok {
return return
} }
if s.qm.internFormat { switch s.qm.rwFormat {
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms) case Base1:
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
symbolTable.clear()
} else {
nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms) nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf) s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
case Min32Optimized:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
symbolTable.clear()
case MinLen:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeriesLen(&symbolTable, batch, pendingMinLenData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendMinLenSamples(ctx, pendingMinLenData[:n], symbolTable.LabelsData(), nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
symbolTable.clear()
} }
queue.ReturnForReuse(batch) queue.ReturnForReuse(batch)
stop() stop()
@ -1430,18 +1451,27 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
case <-timer.C: case <-timer.C:
batch := queue.Batch() batch := queue.Batch()
if len(batch) > 0 { if len(batch) > 0 {
if s.qm.internFormat { switch s.qm.rwFormat {
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms) case Base1:
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
symbolTable.clear()
} else {
nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms) nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf) s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms) "exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
case Min32Optimized:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
symbolTable.clear()
case MinLen:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeriesLen(&symbolTable, batch, pendingMinLenData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
s.sendMinLenSamples(ctx, pendingMinLenData[:n], symbolTable.LabelsData(), nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
symbolTable.clear()
} }
} }
queue.ReturnForReuse(batch) queue.ReturnForReuse(batch)
@ -1502,7 +1532,7 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, time.Since(begin)) s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, time.Since(begin))
} }
func (s *shards) sendMinSamples(ctx context.Context, samples []prompb.MinimizedTimeSeries, labels string, sampleCount, exemplarCount, histogramCount int, pBuf *[]byte, buf *[]byte) { func (s *shards) sendMinSamples(ctx context.Context, samples []prompb.MinimizedTimeSeries, labels string, sampleCount, exemplarCount, histogramCount int, pBuf, buf *[]byte) {
begin := time.Now() begin := time.Now()
// Build the ReducedWriteRequest with no metadata. // Build the ReducedWriteRequest with no metadata.
// Failing to build the write request is non-recoverable, since it will // Failing to build the write request is non-recoverable, since it will
@ -1514,6 +1544,18 @@ func (s *shards) sendMinSamples(ctx context.Context, samples []prompb.MinimizedT
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, time.Since(begin)) s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, time.Since(begin))
} }
func (s *shards) sendMinLenSamples(ctx context.Context, samples []prompb.MinimizedTimeSeriesLen, labels []byte, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) {
begin := time.Now()
// Build the ReducedWriteRequest with no metadata.
// Failing to build the write request is non-recoverable, since it will
// only error if marshaling the proto to bytes fails.
req, highest, err := buildMinimizedWriteRequestLen(samples, labels, pBuf, buf)
if err == nil {
err = s.sendSamplesWithBackoff(ctx, req, sampleCount, exemplarCount, histogramCount, highest)
}
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, time.Since(begin))
}
func (s *shards) updateMetrics(ctx context.Context, err error, sampleCount, exemplarCount, histogramCount int, duration time.Duration) { func (s *shards) updateMetrics(ctx context.Context, err error, sampleCount, exemplarCount, histogramCount int, duration time.Duration) {
if err != nil { if err != nil {
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err) level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err)
@ -1638,6 +1680,42 @@ func populateMinimizedTimeSeries(symbolTable *rwSymbolTable, batch []timeSeries,
return nPendingSamples, nPendingExemplars, nPendingHistograms return nPendingSamples, nPendingExemplars, nPendingHistograms
} }
func populateMinimizedTimeSeriesLen(symbolTable *rwSymbolTable, batch []timeSeries, pendingData []prompb.MinimizedTimeSeriesLen, sendExemplars, sendNativeHistograms bool) (int, int, int) {
var nPendingSamples, nPendingExemplars, nPendingHistograms int
for nPending, d := range batch {
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
if sendExemplars {
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
}
if sendNativeHistograms {
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
}
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
// pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
pendingData[nPending].LabelSymbols = labelsToUint32SliceLen(d.seriesLabels, symbolTable, pendingData[nPending].LabelSymbols)
switch d.sType {
case tSample:
pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
Value: d.value,
Timestamp: d.timestamp,
})
nPendingSamples++
// TODO: handle all exemplars
case tHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram))
nPendingHistograms++
case tFloatHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, FloatHistogramToHistogramProto(d.timestamp, d.floatHistogram))
nPendingHistograms++
}
}
return nPendingSamples, nPendingExemplars, nPendingHistograms
}
func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l log.Logger, attempt func(int) error, onRetry func()) error { func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l log.Logger, attempt func(int) error, onRetry func()) error {
backoff := cfg.MinBackoff backoff := cfg.MinBackoff
sleepDuration := model.Duration(0) sleepDuration := model.Duration(0)
@ -1728,7 +1806,7 @@ func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMeta
buf = &[]byte{} buf = &[]byte{}
} }
compressed := snappy.Encode(*buf, pBuf.Bytes()) compressed := snappy.Encode(*buf, pBuf.Bytes())
if n := snappy.MaxEncodedLen(len(pBuf.Bytes())); buf != nil && n > len(*buf) { if n := snappy.MaxEncodedLen(len(pBuf.Bytes())); n > len(*buf) {
// grow the buffer for the next time // grow the buffer for the next time
*buf = make([]byte, n) *buf = make([]byte, n)
} }
@ -1742,38 +1820,64 @@ type offLenPair struct {
} }
type rwSymbolTable struct { type rwSymbolTable struct {
symbols []byte symbols []byte
symbolsMap map[string]offLenPair symbolsMap map[string]offLenPair
symbolsMapBytes map[string]uint32
} }
func newRwSymbolTable() rwSymbolTable { func newRwSymbolTable() rwSymbolTable {
return rwSymbolTable{ return rwSymbolTable{
symbolsMap: make(map[string]offLenPair), symbolsMap: make(map[string]offLenPair),
symbolsMapBytes: make(map[string]uint32),
} }
} }
func (r *rwSymbolTable) Ref(str string) (off uint32, leng uint32) { func (r *rwSymbolTable) Ref(str string) (uint32, uint32) {
if offlen, ok := r.symbolsMap[str]; ok { if offlen, ok := r.symbolsMap[str]; ok {
return offlen.Off, offlen.Len return offlen.Off, offlen.Len
} }
off, leng = uint32(len(r.symbols)), uint32(len(str)) off, length := uint32(len(r.symbols)), uint32(len(str))
if int(off) > len(r.symbols) {
panic(1)
}
r.symbols = append(r.symbols, str...) r.symbols = append(r.symbols, str...)
r.symbolsMap[str] = offLenPair{off, leng} if len(r.symbols) < int(off+length) {
return panic(2)
}
r.symbolsMap[str] = offLenPair{off, length}
return off, length
}
func (r *rwSymbolTable) RefLen(str string) uint32 {
if ref, ok := r.symbolsMapBytes[str]; ok {
return ref
}
ref := uint32(len(r.symbols))
r.symbols = binary.AppendUvarint(r.symbols, uint64(len(str)))
r.symbols = append(r.symbols, str...)
r.symbolsMapBytes[str] = ref
return ref
} }
func (r *rwSymbolTable) LabelsString() string { func (r *rwSymbolTable) LabelsString() string {
return *((*string)(unsafe.Pointer(&r.symbols))) return *((*string)(unsafe.Pointer(&r.symbols)))
} }
func (r *rwSymbolTable) LabelsData() []byte {
return r.symbols
}
func (r *rwSymbolTable) clear() { func (r *rwSymbolTable) clear() {
for k := range r.symbolsMap { for k := range r.symbolsMap {
delete(r.symbolsMap, k) delete(r.symbolsMap, k)
} }
for k := range r.symbolsMapBytes {
delete(r.symbolsMapBytes, k)
}
r.symbols = r.symbols[:0] r.symbols = r.symbols[:0]
} }
func buildMinimizedWriteRequest(samples []prompb.MinimizedTimeSeries, labels string, pBuf *[]byte, buf *[]byte) ([]byte, int64, error) { func buildMinimizedWriteRequest(samples []prompb.MinimizedTimeSeries, labels string, pBuf, buf *[]byte) ([]byte, int64, error) {
var highest int64 var highest int64
for _, ts := range samples { for _, ts := range samples {
// At the moment we only ever append a TimeSeries with a single sample or exemplar in it. // At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
@ -1811,7 +1915,53 @@ func buildMinimizedWriteRequest(samples []prompb.MinimizedTimeSeries, labels str
} }
compressed := snappy.Encode(*buf, data) compressed := snappy.Encode(*buf, data)
if n := snappy.MaxEncodedLen(len(data)); buf != nil && n > len(*buf) { if n := snappy.MaxEncodedLen(len(data)); n > len(*buf) {
// grow the buffer for the next time
*buf = make([]byte, n)
}
return compressed, highest, nil
}
func buildMinimizedWriteRequestLen(samples []prompb.MinimizedTimeSeriesLen, labels []byte, pBuf *proto.Buffer, buf *[]byte) ([]byte, int64, error) {
var highest int64
for _, ts := range samples {
// At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
if len(ts.Samples) > 0 && ts.Samples[0].Timestamp > highest {
highest = ts.Samples[0].Timestamp
}
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest {
highest = ts.Exemplars[0].Timestamp
}
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp > highest {
highest = ts.Histograms[0].Timestamp
}
}
req := &prompb.MinimizedWriteRequestLen{
Symbols: labels,
Timeseries: samples,
}
if pBuf == nil {
pBuf = proto.NewBuffer(nil) // For convenience in tests. Not efficient.
} else {
pBuf.Reset()
}
err := pBuf.Marshal(req)
if err != nil {
return nil, 0, err
}
// snappy uses len() to see if it needs to allocate a new slice. Make the
// buffer as long as possible.
if buf != nil {
*buf = (*buf)[0:cap(*buf)]
} else {
buf = &[]byte{}
}
compressed := snappy.Encode(*buf, pBuf.Bytes())
if n := snappy.MaxEncodedLen(len(pBuf.Bytes())); n > len(*buf) {
// grow the buffer for the next time // grow the buffer for the next time
*buf = make([]byte, n) *buf = make([]byte, n)
} }

View file

@ -67,7 +67,7 @@ func TestSampleDelivery(t *testing.T) {
exemplars bool exemplars bool
histograms bool histograms bool
floatHistograms bool floatHistograms bool
remoteWrite11 bool rwFormat RemoteWriteFormat
}{ }{
{samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"}, {samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"},
{samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"}, {samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"},
@ -75,11 +75,11 @@ func TestSampleDelivery(t *testing.T) {
{samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"}, {samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"},
{samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"}, {samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"},
{remoteWrite11: true, samples: true, exemplars: false, histograms: false, name: "interned samples only"}, {rwFormat: Min32Optimized, samples: true, exemplars: false, histograms: false, name: "interned samples only"},
{remoteWrite11: true, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "interned samples, exemplars, and histograms"}, {rwFormat: Min32Optimized, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "interned samples, exemplars, and histograms"},
{remoteWrite11: true, samples: false, exemplars: true, histograms: false, name: "interned exemplars only"}, {rwFormat: Min32Optimized, samples: false, exemplars: true, histograms: false, name: "interned exemplars only"},
{remoteWrite11: true, samples: false, exemplars: false, histograms: true, name: "interned histograms only"}, {rwFormat: Min32Optimized, samples: false, exemplars: false, histograms: true, name: "interned histograms only"},
{remoteWrite11: true, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "interned float histograms only"}, {rwFormat: Min32Optimized, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "interned float histograms only"},
} }
// Let's create an even number of send batches so we don't run into the // Let's create an even number of send batches so we don't run into the
@ -106,7 +106,7 @@ func TestSampleDelivery(t *testing.T) {
for _, tc := range testcases { for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, tc.remoteWrite11) s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, tc.rwFormat)
defer s.Close() defer s.Close()
var ( var (
@ -139,7 +139,7 @@ func TestSampleDelivery(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
qm := s.rws.queues[hash] qm := s.rws.queues[hash]
c := NewTestWriteClient(tc.remoteWrite11) c := NewTestWriteClient(tc.rwFormat)
qm.SetClient(c) qm.SetClient(c)
qm.StoreSeries(series, 0) qm.StoreSeries(series, 0)
@ -170,7 +170,7 @@ func TestSampleDelivery(t *testing.T) {
} }
func TestMetadataDelivery(t *testing.T) { func TestMetadataDelivery(t *testing.T) {
c := NewTestWriteClient(false) c := NewTestWriteClient(Base1)
dir := t.TempDir() dir := t.TempDir()
@ -178,7 +178,7 @@ func TestMetadataDelivery(t *testing.T) {
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, 0)
m.Start() m.Start()
defer m.Stop() defer m.Stop()
@ -204,13 +204,12 @@ func TestMetadataDelivery(t *testing.T) {
} }
func TestSampleDeliveryTimeout(t *testing.T) { func TestSampleDeliveryTimeout(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} { for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized} {
t.Run(proto, func(t *testing.T) { t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
remoteWrite11 := proto == "1.1"
// Let's send one less sample than batch size, and wait the timeout duration // Let's send one less sample than batch size, and wait the timeout duration
n := 9 n := 9
samples, series := createTimeseries(n, n) samples, series := createTimeseries(n, n)
c := NewTestWriteClient(remoteWrite11) c := NewTestWriteClient(rwFormat)
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
@ -220,7 +219,7 @@ func TestSampleDeliveryTimeout(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
m.Start() m.Start()
defer m.Stop() defer m.Stop()
@ -238,9 +237,8 @@ func TestSampleDeliveryTimeout(t *testing.T) {
} }
func TestSampleDeliveryOrder(t *testing.T) { func TestSampleDeliveryOrder(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} { for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized} {
t.Run(proto, func(t *testing.T) { t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
remoteWrite11 := proto == "1.1"
ts := 10 ts := 10
n := config.DefaultQueueConfig.MaxSamplesPerSend * ts n := config.DefaultQueueConfig.MaxSamplesPerSend * ts
samples := make([]record.RefSample, 0, n) samples := make([]record.RefSample, 0, n)
@ -258,7 +256,7 @@ func TestSampleDeliveryOrder(t *testing.T) {
}) })
} }
c := NewTestWriteClient(remoteWrite11) c := NewTestWriteClient(rwFormat)
c.expectSamples(samples, series) c.expectSamples(samples, series)
dir := t.TempDir() dir := t.TempDir()
@ -267,7 +265,7 @@ func TestSampleDeliveryOrder(t *testing.T) {
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
m.Start() m.Start()
@ -289,7 +287,7 @@ func TestShutdown(t *testing.T) {
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, false) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend
samples, series := createTimeseries(n, n) samples, series := createTimeseries(n, n)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
@ -327,7 +325,7 @@ func TestSeriesReset(t *testing.T) {
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, false) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
for i := 0; i < numSegments; i++ { for i := 0; i < numSegments; i++ {
series := []record.RefSeries{} series := []record.RefSeries{}
for j := 0; j < numSeries; j++ { for j := 0; j < numSeries; j++ {
@ -341,15 +339,14 @@ func TestSeriesReset(t *testing.T) {
} }
func TestReshard(t *testing.T) { func TestReshard(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} { for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized} {
t.Run(proto, func(t *testing.T) { t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
remoteWrite11 := proto == "1.1"
size := 10 // Make bigger to find more races. size := 10 // Make bigger to find more races.
nSeries := 6 nSeries := 6
nSamples := config.DefaultQueueConfig.Capacity * size nSamples := config.DefaultQueueConfig.Capacity * size
samples, series := createTimeseries(nSamples, nSeries) samples, series := createTimeseries(nSamples, nSeries)
c := NewTestWriteClient(remoteWrite11) c := NewTestWriteClient(rwFormat)
c.expectSamples(samples, series) c.expectSamples(samples, series)
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
@ -359,7 +356,7 @@ func TestReshard(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
m.Start() m.Start()
@ -385,10 +382,9 @@ func TestReshard(t *testing.T) {
} }
func TestReshardRaceWithStop(t *testing.T) { func TestReshardRaceWithStop(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} { for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized} {
t.Run(proto, func(t *testing.T) { t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
remoteWrite11 := proto == "1.1" c := NewTestWriteClient(rwFormat)
c := NewTestWriteClient(remoteWrite11)
var m *QueueManager var m *QueueManager
h := sync.Mutex{} h := sync.Mutex{}
@ -400,7 +396,7 @@ func TestReshardRaceWithStop(t *testing.T) {
go func() { go func() {
for { for {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11) m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.Start() m.Start()
h.Unlock() h.Unlock()
h.Lock() h.Lock()
@ -425,9 +421,8 @@ func TestReshardRaceWithStop(t *testing.T) {
} }
func TestReshardPartialBatch(t *testing.T) { func TestReshardPartialBatch(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} { for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized} {
t.Run(proto, func(t *testing.T) { t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
remoteWrite11 := proto == "1.1"
samples, series := createTimeseries(1, 10) samples, series := createTimeseries(1, 10)
c := NewTestBlockedWriteClient() c := NewTestBlockedWriteClient()
@ -440,7 +435,7 @@ func TestReshardPartialBatch(t *testing.T) {
cfg.BatchSendDeadline = model.Duration(batchSendDeadline) cfg.BatchSendDeadline = model.Duration(batchSendDeadline)
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11) m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
m.Start() m.Start()
@ -472,9 +467,8 @@ func TestReshardPartialBatch(t *testing.T) {
// where a large scrape (> capacity + max samples per send) is appended at the // where a large scrape (> capacity + max samples per send) is appended at the
// same time as a batch times out according to the batch send deadline. // same time as a batch times out according to the batch send deadline.
func TestQueueFilledDeadlock(t *testing.T) { func TestQueueFilledDeadlock(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} { for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized} {
t.Run(proto, func(t *testing.T) { t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
remoteWrite11 := proto == "1.1"
samples, series := createTimeseries(50, 1) samples, series := createTimeseries(50, 1)
c := NewNopWriteClient() c := NewNopWriteClient()
@ -490,7 +484,7 @@ func TestQueueFilledDeadlock(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11) m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
m.Start() m.Start()
defer m.Stop() defer m.Stop()
@ -515,14 +509,13 @@ func TestQueueFilledDeadlock(t *testing.T) {
} }
func TestReleaseNoninternedString(t *testing.T) { func TestReleaseNoninternedString(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} { for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized} {
t.Run(proto, func(t *testing.T) { t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
remoteWrite11 := proto == "1.1"
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
c := NewTestWriteClient(remoteWrite11) c := NewTestWriteClient(rwFormat)
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11) m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.Start() m.Start()
defer m.Stop() defer m.Stop()
@ -570,8 +563,9 @@ func TestShouldReshard(t *testing.T) {
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
for _, c := range cases { for _, c := range cases {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
client := NewTestWriteClient(false) // todo: test with new proto type(s)
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false) client := NewTestWriteClient(Base1)
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
m.numShards = c.startingShards m.numShards = c.startingShards
m.dataIn.incr(c.samplesIn) m.dataIn.incr(c.samplesIn)
m.dataOut.incr(c.samplesOut) m.dataOut.incr(c.samplesOut)
@ -706,16 +700,16 @@ type TestWriteClient struct {
wg sync.WaitGroup wg sync.WaitGroup
mtx sync.Mutex mtx sync.Mutex
buf []byte buf []byte
expectRemoteWrite11 bool rwFormat RemoteWriteFormat
} }
func NewTestWriteClient(expectRemoteWrite11 bool) *TestWriteClient { func NewTestWriteClient(rwFormat RemoteWriteFormat) *TestWriteClient {
return &TestWriteClient{ return &TestWriteClient{
withWaitGroup: true, withWaitGroup: true,
receivedSamples: map[string][]prompb.Sample{}, receivedSamples: map[string][]prompb.Sample{},
expectedSamples: map[string][]prompb.Sample{}, expectedSamples: map[string][]prompb.Sample{},
receivedMetadata: map[string][]prompb.MetricMetadata{}, receivedMetadata: map[string][]prompb.MetricMetadata{},
expectRemoteWrite11: expectRemoteWrite11, rwFormat: rwFormat,
} }
} }
@ -803,6 +797,7 @@ func (c *TestWriteClient) waitForExpectedData(tb testing.TB) {
c.mtx.Lock() c.mtx.Lock()
defer c.mtx.Unlock() defer c.mtx.Unlock()
for ts, expectedSamples := range c.expectedSamples { for ts, expectedSamples := range c.expectedSamples {
require.Equal(tb, expectedSamples, c.receivedSamples[ts], ts) require.Equal(tb, expectedSamples, c.receivedSamples[ts], ts)
} }
@ -831,25 +826,27 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
} }
var reqProto *prompb.WriteRequest var reqProto *prompb.WriteRequest
if c.expectRemoteWrite11 { switch c.rwFormat {
var reqReduced prompb.MinimizedWriteRequest case Base1:
err = proto.Unmarshal(reqBuf, &reqReduced)
if err == nil {
reqProto, err = MinimizedWriteRequestToWriteRequest(&reqReduced)
}
} else {
reqProto = &prompb.WriteRequest{} reqProto = &prompb.WriteRequest{}
err = proto.Unmarshal(reqBuf, reqProto) err = proto.Unmarshal(reqBuf, reqProto)
case Min32Optimized:
var reqMin prompb.MinimizedWriteRequest
err = proto.Unmarshal(reqBuf, &reqMin)
if err == nil {
reqProto, err = MinimizedWriteRequestToWriteRequest(&reqMin)
}
} }
if err != nil { if err != nil {
fmt.Println("error: ", err)
return err return err
} }
count := 0 count := 0
for _, ts := range reqProto.Timeseries { for _, ts := range reqProto.Timeseries {
labels := labelProtosToLabels(ts.Labels) ls := labelProtosToLabels(ts.Labels)
seriesName := labels.Get("__name__") seriesName := ls.Get("__name__")
for _, sample := range ts.Samples { for _, sample := range ts.Samples {
count++ count++
c.receivedSamples[seriesName] = append(c.receivedSamples[seriesName], sample) c.receivedSamples[seriesName] = append(c.receivedSamples[seriesName], sample)
@ -860,12 +857,12 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
c.receivedExemplars[seriesName] = append(c.receivedExemplars[seriesName], ex) c.receivedExemplars[seriesName] = append(c.receivedExemplars[seriesName], ex)
} }
for _, histogram := range ts.Histograms { for _, hist := range ts.Histograms {
count++ count++
if histogram.IsFloatHistogram() { if hist.IsFloatHistogram() {
c.receivedFloatHistograms[seriesName] = append(c.receivedFloatHistograms[seriesName], histogram) c.receivedFloatHistograms[seriesName] = append(c.receivedFloatHistograms[seriesName], hist)
} else { } else {
c.receivedHistograms[seriesName] = append(c.receivedHistograms[seriesName], histogram) c.receivedHistograms[seriesName] = append(c.receivedHistograms[seriesName], hist)
} }
} }
@ -965,7 +962,8 @@ func BenchmarkSampleSend(b *testing.B) {
dir := b.TempDir() dir := b.TempDir()
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false) // todo: test with new proto type(s)
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
// These should be received by the client. // These should be received by the client.
@ -1009,9 +1007,10 @@ func BenchmarkStartup(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
c := NewTestBlockedWriteClient() c := NewTestBlockedWriteClient()
// todo: test with new proto type(s)
m := NewQueueManager(metrics, nil, nil, logger, dir, m := NewQueueManager(metrics, nil, nil, logger, dir,
newEWMARate(ewmaWeight, shardUpdateDuration), newEWMARate(ewmaWeight, shardUpdateDuration),
cfg, mcfg, labels.EmptyLabels(), nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false, false, false) cfg, mcfg, labels.EmptyLabels(), nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
m.watcher.SetStartTime(timestamp.Time(math.MaxInt64)) m.watcher.SetStartTime(timestamp.Time(math.MaxInt64))
m.watcher.MaxSegment = segments[len(segments)-2] m.watcher.MaxSegment = segments[len(segments)-2]
err := m.watcher.Run() err := m.watcher.Run()
@ -1094,7 +1093,8 @@ func TestCalculateDesiredShards(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration) samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false) // todo: test with new proto type(s)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
// Need to start the queue manager so the proper metrics are initialized. // Need to start the queue manager so the proper metrics are initialized.
// However we can stop it right away since we don't need to do any actual // However we can stop it right away since we don't need to do any actual
@ -1163,7 +1163,7 @@ func TestCalculateDesiredShards(t *testing.T) {
} }
func TestCalculateDesiredShardsDetail(t *testing.T) { func TestCalculateDesiredShardsDetail(t *testing.T) {
c := NewTestWriteClient(false) c := NewTestWriteClient(Base1)
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
@ -1171,7 +1171,8 @@ func TestCalculateDesiredShardsDetail(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration) samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false) // todo: test with new proto type(s)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
for _, tc := range []struct { for _, tc := range []struct {
name string name string
@ -1432,9 +1433,9 @@ func createDummyTimeSeries(instances int) []timeSeries {
b := labels.NewBuilder(commonLabels) b := labels.NewBuilder(commonLabels)
b.Set("pod", "prometheus-"+strconv.Itoa(i)) b.Set("pod", "prometheus-"+strconv.Itoa(i))
for _, lbls := range metrics { for _, lbls := range metrics {
for _, l := range lbls { lbls.Range(func(l labels.Label) {
b.Set(l.Name, l.Value) b.Set(l.Name, l.Value)
} })
result = append(result, timeSeries{ result = append(result, timeSeries{
seriesLabels: b.Labels(), seriesLabels: b.Labels(),
value: r.Float64(), value: r.Float64(),
@ -1495,9 +1496,9 @@ func BenchmarkBuildMinimizedWriteRequest(b *testing.B) {
batch []timeSeries batch []timeSeries
} }
testCases := []testcase{ testCases := []testcase{
testcase{createDummyTimeSeries(2)}, {createDummyTimeSeries(2)},
testcase{createDummyTimeSeries(10)}, {createDummyTimeSeries(10)},
testcase{createDummyTimeSeries(100)}, {createDummyTimeSeries(100)},
} }
for _, tc := range testCases { for _, tc := range testCases {
symbolTable := newRwSymbolTable() symbolTable := newRwSymbolTable()

View file

@ -91,7 +91,8 @@ func TestNoDuplicateReadConfigs(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) // todo: test with new format type(s)?
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig, GlobalConfig: config.DefaultGlobalConfig,
RemoteReadConfigs: tc.cfgs, RemoteReadConfigs: tc.cfgs,

View file

@ -62,7 +62,7 @@ type Storage struct {
} }
// NewStorage returns a remote.Storage. // NewStorage returns a remote.Storage.
func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, remoteWrite11 bool) *Storage { func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, rwFormat RemoteWriteFormat) *Storage {
if l == nil { if l == nil {
l = log.NewNopLogger() l = log.NewNopLogger()
} }
@ -72,7 +72,7 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal
logger: logger, logger: logger,
localStartTimeCallback: stCallback, localStartTimeCallback: stCallback,
} }
s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, remoteWrite11) s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, rwFormat)
return s return s
} }

View file

@ -27,7 +27,8 @@ import (
func TestStorageLifecycle(t *testing.T) { func TestStorageLifecycle(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) // todo: test with new format type(s)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig, GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: []*config.RemoteWriteConfig{ RemoteWriteConfigs: []*config.RemoteWriteConfig{
@ -54,7 +55,8 @@ func TestStorageLifecycle(t *testing.T) {
func TestUpdateRemoteReadConfigs(t *testing.T) { func TestUpdateRemoteReadConfigs(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) // todo: test with new format type(s)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.GlobalConfig{}, GlobalConfig: config.GlobalConfig{},
@ -75,7 +77,8 @@ func TestUpdateRemoteReadConfigs(t *testing.T) {
func TestFilterExternalLabels(t *testing.T) { func TestFilterExternalLabels(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) // todo: test with new format type(s)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.GlobalConfig{ GlobalConfig: config.GlobalConfig{
@ -100,7 +103,8 @@ func TestFilterExternalLabels(t *testing.T) {
func TestIgnoreExternalLabels(t *testing.T) { func TestIgnoreExternalLabels(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) // todo: test with new format type(s)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.GlobalConfig{ GlobalConfig: config.GlobalConfig{

View file

@ -65,7 +65,7 @@ type WriteStorage struct {
externalLabels labels.Labels externalLabels labels.Labels
dir string dir string
queues map[string]*QueueManager queues map[string]*QueueManager
remoteWrite11 bool rwFormat RemoteWriteFormat
samplesIn *ewmaRate samplesIn *ewmaRate
flushDeadline time.Duration flushDeadline time.Duration
interner *pool interner *pool
@ -77,13 +77,13 @@ type WriteStorage struct {
} }
// NewWriteStorage creates and runs a WriteStorage. // NewWriteStorage creates and runs a WriteStorage.
func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, remoteWrite11 bool) *WriteStorage { func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, rwFormat RemoteWriteFormat) *WriteStorage {
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
} }
rws := &WriteStorage{ rws := &WriteStorage{
queues: make(map[string]*QueueManager), queues: make(map[string]*QueueManager),
remoteWrite11: remoteWrite11, rwFormat: rwFormat,
watcherMetrics: wlog.NewWatcherMetrics(reg), watcherMetrics: wlog.NewWatcherMetrics(reg),
liveReaderMetrics: wlog.NewLiveReaderMetrics(reg), liveReaderMetrics: wlog.NewLiveReaderMetrics(reg),
logger: logger, logger: logger,
@ -156,14 +156,14 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
} }
c, err := NewWriteClient(name, &ClientConfig{ c, err := NewWriteClient(name, &ClientConfig{
URL: rwConf.URL, URL: rwConf.URL,
RemoteWrite11: rws.remoteWrite11, RemoteWriteFormat: rws.rwFormat,
Timeout: rwConf.RemoteTimeout, Timeout: rwConf.RemoteTimeout,
HTTPClientConfig: rwConf.HTTPClientConfig, HTTPClientConfig: rwConf.HTTPClientConfig,
SigV4Config: rwConf.SigV4Config, SigV4Config: rwConf.SigV4Config,
AzureADConfig: rwConf.AzureADConfig, AzureADConfig: rwConf.AzureADConfig,
Headers: rwConf.Headers, Headers: rwConf.Headers,
RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit, RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit,
}) })
if err != nil { if err != nil {
return err return err
@ -200,7 +200,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
rws.scraper, rws.scraper,
rwConf.SendExemplars, rwConf.SendExemplars,
rwConf.SendNativeHistograms, rwConf.SendNativeHistograms,
rws.remoteWrite11, rws.rwFormat,
) )
// Keep track of which queues are new so we know which to start. // Keep track of which queues are new so we know which to start.
newHashes = append(newHashes, hash) newHashes = append(newHashes, hash)

View file

@ -46,17 +46,17 @@ type writeHandler struct {
// Experimental feature, new remote write proto format // Experimental feature, new remote write proto format
// The handler will accept the new format, but it can still accept the old one // The handler will accept the new format, but it can still accept the old one
enableRemoteWrite11 bool // TODO: this should eventually be via content negotiation
rwFormat RemoteWriteFormat
} }
// NewWriteHandler creates a http.Handler that accepts remote write requests and // NewWriteHandler creates a http.Handler that accepts remote write requests and
// writes them to the provided appendable. // writes them to the provided appendable.
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, enableRemoteWrite11 bool) http.Handler { func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, rwFormat RemoteWriteFormat) http.Handler {
h := &writeHandler{ h := &writeHandler{
logger: logger, logger: logger,
appendable: appendable, appendable: appendable,
enableRemoteWrite11: enableRemoteWrite11, rwFormat: rwFormat,
samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{ samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "prometheus", Namespace: "prometheus",
Subsystem: "api", Subsystem: "api",
@ -74,11 +74,16 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var err error var err error
var req *prompb.WriteRequest var req *prompb.WriteRequest
var reqMin *prompb.MinimizedWriteRequest var reqMin *prompb.MinimizedWriteRequest
var reqMinLen *prompb.MinimizedWriteRequestLen
if h.enableRemoteWrite11 && r.Header.Get(RemoteWriteVersionHeader) == RemoteWriteVersion11HeaderValue { // TODO: this should eventually be done via content negotiation/looking at the header
reqMin, err = DecodeMinimizedWriteRequest(r.Body) switch h.rwFormat {
} else { case Base1:
req, err = DecodeWriteRequest(r.Body) req, err = DecodeWriteRequest(r.Body)
case Min32Optimized:
reqMin, err = DecodeMinimizedWriteRequest(r.Body)
case MinLen:
reqMinLen, err = DecodeMinimizedWriteRequestLen(r.Body)
} }
if err != nil { if err != nil {
@ -87,11 +92,16 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return return
} }
if h.enableRemoteWrite11 && r.Header.Get(RemoteWriteVersionHeader) == RemoteWriteVersion11HeaderValue { // TODO: this should eventually be done detecting the format version above
err = h.writeMin(r.Context(), reqMin) switch h.rwFormat {
} else { case Base1:
err = h.write(r.Context(), req) err = h.write(r.Context(), req)
case Min32Optimized:
err = h.writeMin(r.Context(), reqMin)
case MinLen:
err = h.writeMinLen(r.Context(), reqMinLen)
} }
switch err { switch err {
case nil: case nil:
case storage.ErrOutOfOrderSample, storage.ErrOutOfBounds, storage.ErrDuplicateSampleForTimestamp: case storage.ErrOutOfOrderSample, storage.ErrOutOfBounds, storage.ErrDuplicateSampleForTimestamp:
@ -320,3 +330,41 @@ func (h *writeHandler) writeMin(ctx context.Context, req *prompb.MinimizedWriteR
return nil return nil
} }
func (h *writeHandler) writeMinLen(ctx context.Context, req *prompb.MinimizedWriteRequestLen) (err error) {
outOfOrderExemplarErrs := 0
app := h.appendable.Appender(ctx)
defer func() {
if err != nil {
_ = app.Rollback()
return
}
err = app.Commit()
}()
for _, ts := range req.Timeseries {
ls := Uint32LenRefToLabels(req.Symbols, ts.LabelSymbols)
err := h.appendSamples(app, ts.Samples, ls)
if err != nil {
return err
}
for _, ep := range ts.Exemplars {
e := exemplarProtoToExemplar(ep)
h.appendExemplar(app, e, ls, &outOfOrderExemplarErrs)
}
err = h.appendHistograms(app, ts.Histograms, ls)
if err != nil {
return err
}
}
if outOfOrderExemplarErrs > 0 {
_ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
}
return nil
}

View file

@ -45,7 +45,8 @@ func TestRemoteWriteHandler(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
appendable := &mockAppendable{} appendable := &mockAppendable{}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false) // TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req) handler.ServeHTTP(recorder, req)
@ -57,25 +58,25 @@ func TestRemoteWriteHandler(t *testing.T) {
j := 0 j := 0
k := 0 k := 0
for _, ts := range writeRequestFixture.Timeseries { for _, ts := range writeRequestFixture.Timeseries {
labels := labelProtosToLabels(ts.Labels) ls := labelProtosToLabels(ts.Labels)
for _, s := range ts.Samples { for _, s := range ts.Samples {
require.Equal(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i]) require.Equal(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i])
i++ i++
} }
for _, e := range ts.Exemplars { for _, e := range ts.Exemplars {
exemplarLabels := labelProtosToLabels(e.Labels) exemplarLabels := labelProtosToLabels(e.Labels)
require.Equal(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j]) require.Equal(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
j++ j++
} }
for _, hp := range ts.Histograms { for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() { if hp.IsFloatHistogram() {
fh := FloatHistogramProtoToFloatHistogram(hp) fh := FloatHistogramProtoToFloatHistogram(hp)
require.Equal(t, mockHistogram{labels, hp.Timestamp, nil, fh}, appendable.histograms[k]) require.Equal(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k])
} else { } else {
h := HistogramProtoToHistogram(hp) h := HistogramProtoToHistogram(hp)
require.Equal(t, mockHistogram{labels, hp.Timestamp, h, nil}, appendable.histograms[k]) require.Equal(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k])
} }
k++ k++
@ -92,7 +93,8 @@ func TestRemoteWriteHandlerMinimizedFormat(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
appendable := &mockAppendable{} appendable := &mockAppendable{}
handler := NewWriteHandler(nil, nil, appendable, true) // TODO: test with other proto format(s)
handler := NewWriteHandler(nil, nil, appendable, Min32Optimized)
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req) handler.ServeHTTP(recorder, req)
@ -145,7 +147,8 @@ func TestOutOfOrderSample(t *testing.T) {
appendable := &mockAppendable{ appendable := &mockAppendable{
latestSample: 100, latestSample: 100,
} }
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false) // TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req) handler.ServeHTTP(recorder, req)
@ -170,7 +173,8 @@ func TestOutOfOrderExemplar(t *testing.T) {
appendable := &mockAppendable{ appendable := &mockAppendable{
latestExemplar: 100, latestExemplar: 100,
} }
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false) // TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req) handler.ServeHTTP(recorder, req)
@ -193,8 +197,8 @@ func TestOutOfOrderHistogram(t *testing.T) {
appendable := &mockAppendable{ appendable := &mockAppendable{
latestHistogram: 100, latestHistogram: 100,
} }
// TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false) handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req) handler.ServeHTTP(recorder, req)
@ -205,7 +209,7 @@ func TestOutOfOrderHistogram(t *testing.T) {
func BenchmarkRemoteWritehandler(b *testing.B) { func BenchmarkRemoteWritehandler(b *testing.B) {
const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte" const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte"
reqs := []*http.Request{} var reqs []*http.Request
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
num := strings.Repeat(strconv.Itoa(i), 16) num := strings.Repeat(strconv.Itoa(i), 16)
buf, _, err := buildWriteRequest([]prompb.TimeSeries{{ buf, _, err := buildWriteRequest([]prompb.TimeSeries{{
@ -222,7 +226,8 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
} }
appendable := &mockAppendable{} appendable := &mockAppendable{}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false) // TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
b.ResetTimer() b.ResetTimer()
@ -231,37 +236,6 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
} }
} }
// TODO(npazosmendez): adapt to minimized version
// func BenchmarkReducedRemoteWriteHandler(b *testing.B) {
// const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte"
// reqs := []*http.Request{}
// for i := 0; i < b.N; i++ {
// pool := newLookupPool()
// num := strings.Repeat(strconv.Itoa(i), 16)
// buf, _, err := buildReducedWriteRequest([]prompb.ReducedTimeSeries{{
// Labels: []prompb.LabelRef{
// {NameRef: pool.intern("__name__"), ValueRef: pool.intern("test_metric")},
// {NameRef: pool.intern("test_label_name_" + num), ValueRef: pool.intern(labelValue + num)},
// },
// Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram)},
// }}, pool.getTable(), nil, nil)
// require.NoError(b, err)
// req, err := http.NewRequest("", "", bytes.NewReader(buf))
// require.NoError(b, err)
// req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
// reqs = append(reqs, req)
// }
// appendable := &mockAppendable{}
// handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, true, false)
// recorder := httptest.NewRecorder()
// b.ResetTimer()
// for _, req := range reqs {
// handler.ServeHTTP(recorder, req)
// }
// }
func TestCommitErr(t *testing.T) { func TestCommitErr(t *testing.T) {
buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil, nil) buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil, nil)
require.NoError(t, err) require.NoError(t, err)
@ -272,7 +246,8 @@ func TestCommitErr(t *testing.T) {
appendable := &mockAppendable{ appendable := &mockAppendable{
commitErr: fmt.Errorf("commit error"), commitErr: fmt.Errorf("commit error"),
} }
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false) // TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req) handler.ServeHTTP(recorder, req)
@ -297,8 +272,8 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) {
b.Cleanup(func() { b.Cleanup(func() {
require.NoError(b, db.Close()) require.NoError(b, db.Close())
}) })
// TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head(), false) handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head(), Base1)
buf, _, err := buildWriteRequest(genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil) buf, _, err := buildWriteRequest(genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil)
require.NoError(b, err) require.NoError(b, err)

View file

@ -117,7 +117,8 @@ func TestNoDuplicateWriteConfigs(t *testing.T) {
} }
for _, tc := range cases { for _, tc := range cases {
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false) // todo: test with new format type(s)
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig, GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: tc.cfgs, RemoteWriteConfigs: tc.cfgs,
@ -139,7 +140,8 @@ func TestRestartOnNameChange(t *testing.T) {
hash, err := toHash(cfg) hash, err := toHash(cfg)
require.NoError(t, err) require.NoError(t, err)
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false) // todo: test with new format type(s)
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig, GlobalConfig: config.DefaultGlobalConfig,
@ -164,7 +166,8 @@ func TestRestartOnNameChange(t *testing.T) {
func TestUpdateWithRegisterer(t *testing.T) { func TestUpdateWithRegisterer(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil, false) // todo: test with new format type(s)
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil, Base1)
c1 := &config.RemoteWriteConfig{ c1 := &config.RemoteWriteConfig{
Name: "named", Name: "named",
URL: &common_config.URL{ URL: &common_config.URL{
@ -204,7 +207,8 @@ func TestUpdateWithRegisterer(t *testing.T) {
func TestWriteStorageLifecycle(t *testing.T) { func TestWriteStorageLifecycle(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false) // todo: test with new format type(s)
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig, GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: []*config.RemoteWriteConfig{ RemoteWriteConfigs: []*config.RemoteWriteConfig{
@ -221,7 +225,8 @@ func TestWriteStorageLifecycle(t *testing.T) {
func TestUpdateExternalLabels(t *testing.T) { func TestUpdateExternalLabels(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil, false) // todo: test with new format type(s)
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil, Base1)
externalLabels := labels.FromStrings("external", "true") externalLabels := labels.FromStrings("external", "true")
conf := &config.Config{ conf := &config.Config{
@ -250,8 +255,8 @@ func TestUpdateExternalLabels(t *testing.T) {
func TestWriteStorageApplyConfigsIdempotent(t *testing.T) { func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false) // todo: test with new format type(s)
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.GlobalConfig{}, GlobalConfig: config.GlobalConfig{},
RemoteWriteConfigs: []*config.RemoteWriteConfig{ RemoteWriteConfigs: []*config.RemoteWriteConfig{
@ -276,7 +281,8 @@ func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) { func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false) // todo: test with new format type(s)
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, Base1)
c0 := &config.RemoteWriteConfig{ c0 := &config.RemoteWriteConfig{
RemoteTimeout: model.Duration(10 * time.Second), RemoteTimeout: model.Duration(10 * time.Second),

View file

@ -88,7 +88,7 @@ func createTestAgentDB(t *testing.T, reg prometheus.Registerer, opts *Options) *
t.Helper() t.Helper()
dbDir := t.TempDir() dbDir := t.TempDir()
rs := remote.NewStorage(log.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil, false) rs := remote.NewStorage(log.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil, remote.Base1)
t.Cleanup(func() { t.Cleanup(func() {
require.NoError(t, rs.Close()) require.NoError(t, rs.Close())
}) })
@ -584,7 +584,7 @@ func TestLockfile(t *testing.T) {
tsdbutil.TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*tsdbutil.DirLocker, testutil.Closer) { tsdbutil.TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*tsdbutil.DirLocker, testutil.Closer) {
logger := log.NewNopLogger() logger := log.NewNopLogger()
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil, false) rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil, remote.Base1)
t.Cleanup(func() { t.Cleanup(func() {
require.NoError(t, rs.Close()) require.NoError(t, rs.Close())
}) })
@ -604,7 +604,7 @@ func TestLockfile(t *testing.T) {
func Test_ExistingWAL_NextRef(t *testing.T) { func Test_ExistingWAL_NextRef(t *testing.T) {
dbDir := t.TempDir() dbDir := t.TempDir()
rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, false) rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, remote.Base1)
defer func() { defer func() {
require.NoError(t, rs.Close()) require.NoError(t, rs.Close())
}() }()

View file

@ -253,8 +253,8 @@ func NewAPI(
registerer prometheus.Registerer, registerer prometheus.Registerer,
statsRenderer StatsRenderer, statsRenderer StatsRenderer,
rwEnabled bool, rwEnabled bool,
rwFormat remote.RemoteWriteFormat,
otlpEnabled bool, otlpEnabled bool,
enableRemoteWrite11 bool,
) *API { ) *API {
a := &API{ a := &API{
QueryEngine: qe, QueryEngine: qe,
@ -296,7 +296,7 @@ func NewAPI(
} }
if rwEnabled { if rwEnabled {
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, enableRemoteWrite11) a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, rwFormat)
} }
if otlpEnabled { if otlpEnabled {
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap) a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap)

View file

@ -459,9 +459,10 @@ func TestEndpoints(t *testing.T) {
dbDir := t.TempDir() dbDir := t.TempDir()
// TODO: test with other proto format(s)?
remote := remote.NewStorage(promlog.New(&promlogConfig), prometheus.DefaultRegisterer, func() (int64, error) { remote := remote.NewStorage(promlog.New(&promlogConfig), prometheus.DefaultRegisterer, func() (int64, error) {
return 0, nil return 0, nil
}, dbDir, 1*time.Second, nil, false) }, dbDir, 1*time.Second, nil, remote.Base1)
err = remote.ApplyConfig(&config.Config{ err = remote.ApplyConfig(&config.Config{
RemoteReadConfigs: []*config.RemoteReadConfig{ RemoteReadConfigs: []*config.RemoteReadConfig{

View file

@ -36,6 +36,7 @@ import (
"github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/annotations"
) )
@ -136,7 +137,7 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable) *route.Router {
nil, nil,
nil, nil,
false, false,
false, remote.Base1,
false, // Disable experimental reduce remote write proto support. false, // Disable experimental reduce remote write proto support.
) )

View file

@ -58,6 +58,7 @@ import (
"github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/template" "github.com/prometheus/prometheus/template"
"github.com/prometheus/prometheus/util/httputil" "github.com/prometheus/prometheus/util/httputil"
api_v1 "github.com/prometheus/prometheus/web/api/v1" api_v1 "github.com/prometheus/prometheus/web/api/v1"
@ -242,27 +243,27 @@ type Options struct {
Version *PrometheusVersion Version *PrometheusVersion
Flags map[string]string Flags map[string]string
ListenAddress string ListenAddress string
CORSOrigin *regexp.Regexp CORSOrigin *regexp.Regexp
ReadTimeout time.Duration ReadTimeout time.Duration
MaxConnections int MaxConnections int
ExternalURL *url.URL ExternalURL *url.URL
RoutePrefix string RoutePrefix string
UseLocalAssets bool UseLocalAssets bool
UserAssetsPath string UserAssetsPath string
ConsoleTemplatesPath string ConsoleTemplatesPath string
ConsoleLibrariesPath string ConsoleLibrariesPath string
EnableLifecycle bool EnableLifecycle bool
EnableAdminAPI bool EnableAdminAPI bool
PageTitle string PageTitle string
RemoteReadSampleLimit int RemoteReadSampleLimit int
RemoteReadConcurrencyLimit int RemoteReadConcurrencyLimit int
RemoteReadBytesInFrame int RemoteReadBytesInFrame int
EnableRemoteWriteReceiver bool EnableRemoteWriteReceiver bool
EnableOTLPWriteReceiver bool EnableOTLPWriteReceiver bool
IsAgent bool IsAgent bool
AppName string AppName string
EnableReceiverRemoteWrite11 bool RemoteWriteFormat remote.RemoteWriteFormat
Gatherer prometheus.Gatherer Gatherer prometheus.Gatherer
Registerer prometheus.Registerer Registerer prometheus.Registerer
@ -352,8 +353,8 @@ func New(logger log.Logger, o *Options) *Handler {
o.Registerer, o.Registerer,
nil, nil,
o.EnableRemoteWriteReceiver, o.EnableRemoteWriteReceiver,
o.RemoteWriteFormat,
o.EnableOTLPWriteReceiver, o.EnableOTLPWriteReceiver,
o.EnableReceiverRemoteWrite11,
) )
if o.RoutePrefix != "/" { if o.RoutePrefix != "/" {