remove all code from previous interning approach

the 'minimized' version is now the only v1.1 version

Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
This commit is contained in:
Nicolás Pazos 2023-11-09 16:44:18 -03:00
parent 785590ebbf
commit 8426752e0f
20 changed files with 289 additions and 2332 deletions

View file

@ -150,12 +150,11 @@ type flagConfig struct {
featureList []string
// These options are extracted from featureList
// for ease of use.
enableExpandExternalLabels bool
enableNewSDManager bool
enablePerStepStats bool
enableAutoGOMAXPROCS bool
enableSenderRemoteWrite11 bool
enableSenderRemoteWrite11Minimized bool
enableExpandExternalLabels bool
enableNewSDManager bool
enablePerStepStats bool
enableAutoGOMAXPROCS bool
enableSenderRemoteWrite11 bool
prometheusURL string
corsRegexString string
@ -224,14 +223,8 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
case "rw-1-1-sender":
c.enableSenderRemoteWrite11 = true
level.Info(logger).Log("msg", "Experimental remote write 1.1 will be used on the sender end, receiver must be able to parse this new protobuf format.")
case "rw-1-1-sender-min":
c.enableSenderRemoteWrite11Minimized = true
level.Info(logger).Log("msg", "Experimental remote write 1.1 will be used on the sender end, receiver must be able to parse this new protobuf format.")
case "rw-1-1-receiver":
c.web.EnableReceiverRemoteWrite11 = true
case "rw-1-1-receiver-min":
c.web.EnableReceiverRemoteWrite11Min = true
level.Info(logger).Log("msg", "Experimental remote write 1.1 will be supported on the receiver end, sender can send this new protobuf format.")
default:
level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o)
}
@ -617,7 +610,7 @@ func main() {
var (
localStorage = &readyStorage{stats: tsdb.NewDBStats()}
scraper = &readyScrapeManager{}
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.enableSenderRemoteWrite11, cfg.enableSenderRemoteWrite11Minimized)
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.enableSenderRemoteWrite11)
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
)

View file

@ -60,7 +60,7 @@ func (x ReadRequest_ResponseType) String() string {
}
func (ReadRequest_ResponseType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{3, 0}
return fileDescriptor_eefc82927d57d89b, []int{2, 0}
}
type WriteRequest struct {
@ -118,62 +118,6 @@ func (m *WriteRequest) GetMetadata() []MetricMetadata {
return nil
}
type WriteRequestWithRefs struct {
Timeseries []ReducedTimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
// Symbol table for label names/values.
StringSymbolTable map[uint64]string `protobuf:"bytes,4,rep,name=string_symbol_table,json=stringSymbolTable,proto3" json:"string_symbol_table" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *WriteRequestWithRefs) Reset() { *m = WriteRequestWithRefs{} }
func (m *WriteRequestWithRefs) String() string { return proto.CompactTextString(m) }
func (*WriteRequestWithRefs) ProtoMessage() {}
func (*WriteRequestWithRefs) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{1}
}
func (m *WriteRequestWithRefs) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *WriteRequestWithRefs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_WriteRequestWithRefs.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *WriteRequestWithRefs) XXX_Merge(src proto.Message) {
xxx_messageInfo_WriteRequestWithRefs.Merge(m, src)
}
func (m *WriteRequestWithRefs) XXX_Size() int {
return m.Size()
}
func (m *WriteRequestWithRefs) XXX_DiscardUnknown() {
xxx_messageInfo_WriteRequestWithRefs.DiscardUnknown(m)
}
var xxx_messageInfo_WriteRequestWithRefs proto.InternalMessageInfo
func (m *WriteRequestWithRefs) GetTimeseries() []ReducedTimeSeries {
if m != nil {
return m.Timeseries
}
return nil
}
func (m *WriteRequestWithRefs) GetStringSymbolTable() map[uint64]string {
if m != nil {
return m.StringSymbolTable
}
return nil
}
type MinimizedWriteRequest struct {
Timeseries []MinimizedTimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
// The symbols table. All symbols are concatenated strings. To read the symbols table, it's required
@ -188,7 +132,7 @@ func (m *MinimizedWriteRequest) Reset() { *m = MinimizedWriteRequest{} }
func (m *MinimizedWriteRequest) String() string { return proto.CompactTextString(m) }
func (*MinimizedWriteRequest) ProtoMessage() {}
func (*MinimizedWriteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{2}
return fileDescriptor_eefc82927d57d89b, []int{1}
}
func (m *MinimizedWriteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -249,7 +193,7 @@ func (m *ReadRequest) Reset() { *m = ReadRequest{} }
func (m *ReadRequest) String() string { return proto.CompactTextString(m) }
func (*ReadRequest) ProtoMessage() {}
func (*ReadRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{3}
return fileDescriptor_eefc82927d57d89b, []int{2}
}
func (m *ReadRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -305,7 +249,7 @@ func (m *ReadResponse) Reset() { *m = ReadResponse{} }
func (m *ReadResponse) String() string { return proto.CompactTextString(m) }
func (*ReadResponse) ProtoMessage() {}
func (*ReadResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{4}
return fileDescriptor_eefc82927d57d89b, []int{3}
}
func (m *ReadResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -355,7 +299,7 @@ func (m *Query) Reset() { *m = Query{} }
func (m *Query) String() string { return proto.CompactTextString(m) }
func (*Query) ProtoMessage() {}
func (*Query) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{5}
return fileDescriptor_eefc82927d57d89b, []int{4}
}
func (m *Query) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -424,7 +368,7 @@ func (m *QueryResult) Reset() { *m = QueryResult{} }
func (m *QueryResult) String() string { return proto.CompactTextString(m) }
func (*QueryResult) ProtoMessage() {}
func (*QueryResult) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{6}
return fileDescriptor_eefc82927d57d89b, []int{5}
}
func (m *QueryResult) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -477,7 +421,7 @@ func (m *ChunkedReadResponse) Reset() { *m = ChunkedReadResponse{} }
func (m *ChunkedReadResponse) String() string { return proto.CompactTextString(m) }
func (*ChunkedReadResponse) ProtoMessage() {}
func (*ChunkedReadResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{7}
return fileDescriptor_eefc82927d57d89b, []int{6}
}
func (m *ChunkedReadResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@ -523,8 +467,6 @@ func (m *ChunkedReadResponse) GetQueryIndex() int64 {
func init() {
proto.RegisterEnum("prometheus.ReadRequest_ResponseType", ReadRequest_ResponseType_name, ReadRequest_ResponseType_value)
proto.RegisterType((*WriteRequest)(nil), "prometheus.WriteRequest")
proto.RegisterType((*WriteRequestWithRefs)(nil), "prometheus.WriteRequestWithRefs")
proto.RegisterMapType((map[uint64]string)(nil), "prometheus.WriteRequestWithRefs.StringSymbolTableEntry")
proto.RegisterType((*MinimizedWriteRequest)(nil), "prometheus.MinimizedWriteRequest")
proto.RegisterType((*ReadRequest)(nil), "prometheus.ReadRequest")
proto.RegisterType((*ReadResponse)(nil), "prometheus.ReadResponse")
@ -536,48 +478,41 @@ func init() {
func init() { proto.RegisterFile("remote.proto", fileDescriptor_eefc82927d57d89b) }
var fileDescriptor_eefc82927d57d89b = []byte{
// 655 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcd, 0x6e, 0xd3, 0x4a,
0x14, 0xae, 0x93, 0xb4, 0xc9, 0x3d, 0xe9, 0xad, 0xdc, 0x69, 0xda, 0xfa, 0x46, 0xba, 0x6d, 0x64,
0xdd, 0x45, 0xa4, 0x5e, 0x05, 0x51, 0x2a, 0x40, 0xa8, 0x0b, 0xfa, 0x13, 0xa9, 0x94, 0x9a, 0x9f,
0x71, 0x50, 0x11, 0x42, 0xb2, 0x1c, 0xfb, 0xd0, 0x58, 0x8d, 0x7f, 0xea, 0x19, 0xa3, 0x9a, 0x35,
0x0f, 0xc0, 0x03, 0xf0, 0x34, 0xac, 0xba, 0x42, 0x3c, 0x01, 0x42, 0x7d, 0x12, 0xe4, 0xb1, 0x1d,
0x26, 0x25, 0xaa, 0xd8, 0xcd, 0x9c, 0xef, 0x67, 0xce, 0x7c, 0x67, 0x6c, 0x58, 0x8c, 0xd1, 0x0f,
0x39, 0xf6, 0xa2, 0x38, 0xe4, 0x21, 0x81, 0x28, 0x0e, 0x7d, 0xe4, 0x23, 0x4c, 0x58, 0xbb, 0xc9,
0xd3, 0x08, 0x59, 0x0e, 0xb4, 0x5b, 0x67, 0xe1, 0x59, 0x28, 0x96, 0x77, 0xb2, 0x55, 0x5e, 0xd5,
0x3f, 0x29, 0xb0, 0x78, 0x1a, 0x7b, 0x1c, 0x29, 0x5e, 0x24, 0xc8, 0x38, 0xd9, 0x05, 0xe0, 0x9e,
0x8f, 0x0c, 0x63, 0x0f, 0x99, 0xa6, 0x74, 0xaa, 0xdd, 0xe6, 0xf6, 0x5a, 0xef, 0x97, 0x69, 0x6f,
0xe0, 0xf9, 0x68, 0x0a, 0x74, 0xbf, 0x76, 0xf5, 0x7d, 0x73, 0x8e, 0x4a, 0x7c, 0xb2, 0x0b, 0x0d,
0x1f, 0xb9, 0xed, 0xda, 0xdc, 0xd6, 0xaa, 0x42, 0xdb, 0x96, 0xb5, 0x06, 0xf2, 0xd8, 0x73, 0x8c,
0x82, 0x51, 0xe8, 0x27, 0x8a, 0xe3, 0x5a, 0xa3, 0xa2, 0x56, 0xf5, 0xcf, 0x15, 0x68, 0xc9, 0x2d,
0x9d, 0x7a, 0x7c, 0x44, 0xf1, 0x1d, 0x23, 0x07, 0x33, 0x5a, 0xfb, 0x57, 0xb6, 0xa7, 0xe8, 0x26,
0x0e, 0xba, 0xb7, 0x76, 0xe8, 0xc3, 0x0a, 0xe3, 0xb1, 0x17, 0x9c, 0x59, 0x2c, 0xf5, 0x87, 0xe1,
0xd8, 0xe2, 0xf6, 0x70, 0x8c, 0x5a, 0x4d, 0xb8, 0x3d, 0x90, 0xdd, 0x66, 0xf5, 0xd0, 0x33, 0x85,
0xd6, 0x14, 0xd2, 0x41, 0xa6, 0xec, 0x07, 0x3c, 0x4e, 0x8b, 0x73, 0x96, 0xd9, 0x4d, 0xb4, 0x7d,
0x08, 0x6b, 0xb3, 0x25, 0x44, 0x85, 0xea, 0x39, 0xa6, 0x9a, 0xd2, 0x51, 0xba, 0x35, 0x9a, 0x2d,
0x49, 0x0b, 0xe6, 0xdf, 0xdb, 0xe3, 0x04, 0xb5, 0x4a, 0x47, 0xe9, 0xfe, 0x45, 0xf3, 0xcd, 0xa3,
0xca, 0x43, 0x25, 0x0f, 0xe6, 0xb8, 0xd6, 0xa8, 0xaa, 0x35, 0xfd, 0xa3, 0x02, 0xab, 0x86, 0x17,
0x78, 0xbe, 0xf7, 0x01, 0xdd, 0xa9, 0xd1, 0xf5, 0x67, 0xe4, 0xb3, 0x39, 0x15, 0x7f, 0x29, 0xbb,
0x35, 0x21, 0x0d, 0xea, 0x79, 0x34, 0x4c, 0xab, 0x89, 0x46, 0xca, 0xed, 0x54, 0x1b, 0x5f, 0x15,
0x68, 0x52, 0xb4, 0xdd, 0xf2, 0xf0, 0x2d, 0xa8, 0x5f, 0x24, 0xf2, 0xc9, 0xcb, 0xf2, 0xc9, 0x2f,
0x13, 0x8c, 0x53, 0x5a, 0x32, 0xc8, 0x5b, 0x58, 0xb7, 0x1d, 0x07, 0x23, 0x8e, 0xae, 0x15, 0x23,
0x8b, 0xc2, 0x80, 0xa1, 0x25, 0x1e, 0xab, 0x56, 0xe9, 0x54, 0xbb, 0x4b, 0xdb, 0xff, 0x4d, 0x8f,
0x75, 0x72, 0x4c, 0x8f, 0x16, 0xec, 0x41, 0x1a, 0x21, 0x5d, 0x2d, 0x4d, 0xe4, 0x2a, 0xd3, 0x77,
0x60, 0x51, 0x2e, 0x90, 0x26, 0xd4, 0xcd, 0x3d, 0xe3, 0xc5, 0x49, 0xdf, 0x54, 0xe7, 0xc8, 0x3a,
0xac, 0x98, 0x03, 0xda, 0xdf, 0x33, 0xfa, 0x87, 0xd6, 0xeb, 0xe7, 0xd4, 0x3a, 0x38, 0x7a, 0xf5,
0xec, 0xa9, 0xa9, 0x2a, 0xfa, 0x5e, 0xa6, 0xb2, 0x27, 0x56, 0xe4, 0x2e, 0xd4, 0x63, 0x64, 0xc9,
0x98, 0x97, 0x17, 0x5a, 0xff, 0xfd, 0x42, 0x02, 0xa7, 0x25, 0x4f, 0xff, 0xa2, 0xc0, 0xbc, 0x00,
0xc8, 0xff, 0x40, 0x18, 0xb7, 0x63, 0x6e, 0x89, 0x5c, 0xb9, 0xed, 0x47, 0x96, 0xcf, 0xc4, 0xac,
0xab, 0x54, 0x15, 0xc8, 0xa0, 0x04, 0x0c, 0x46, 0xba, 0xa0, 0x62, 0xe0, 0x4e, 0x73, 0x2b, 0x82,
0xbb, 0x84, 0x81, 0x2b, 0x33, 0x77, 0xa0, 0xe1, 0xdb, 0xdc, 0x19, 0x61, 0xcc, 0x8a, 0xef, 0x4b,
0x93, 0xbb, 0x3a, 0xb1, 0x87, 0x38, 0x36, 0x72, 0x02, 0x9d, 0x30, 0xc9, 0x16, 0xcc, 0x8f, 0xbc,
0x80, 0xe7, 0xf3, 0x6c, 0x6e, 0xaf, 0xde, 0x0c, 0xf7, 0x28, 0x03, 0x69, 0xce, 0xd1, 0xfb, 0xd0,
0x94, 0x2e, 0x47, 0xee, 0xff, 0xf9, 0xff, 0x40, 0x7e, 0x45, 0xfa, 0x25, 0xac, 0x1c, 0x8c, 0x92,
0xe0, 0x3c, 0x1b, 0x8e, 0x94, 0xea, 0x63, 0x58, 0x72, 0xf2, 0xb2, 0x35, 0x65, 0xf9, 0x8f, 0x6c,
0x59, 0x08, 0x0b, 0xd7, 0xbf, 0x1d, 0x79, 0x4b, 0x36, 0xa1, 0x99, 0x3d, 0xa3, 0xd4, 0xf2, 0x02,
0x17, 0x2f, 0x8b, 0x9c, 0x40, 0x94, 0x9e, 0x64, 0x95, 0xfd, 0xd6, 0xd5, 0xf5, 0x86, 0xf2, 0xed,
0x7a, 0x43, 0xf9, 0x71, 0xbd, 0xa1, 0xbc, 0x59, 0xc8, 0x7c, 0xa3, 0xe1, 0x70, 0x41, 0xfc, 0xef,
0xee, 0xfd, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x42, 0x7e, 0x2f, 0x97, 0x2e, 0x05, 0x00, 0x00,
// 543 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40,
0x10, 0xae, 0xeb, 0xb4, 0x09, 0xe3, 0x10, 0x99, 0x6d, 0x43, 0x4c, 0x0e, 0x49, 0x64, 0x71, 0x88,
0x54, 0x14, 0x44, 0xa8, 0x38, 0xf5, 0x40, 0x5a, 0x22, 0x95, 0x52, 0xf3, 0xb3, 0x09, 0x02, 0x21,
0x24, 0xcb, 0xb1, 0x47, 0x8d, 0x45, 0xfc, 0x53, 0xef, 0x5a, 0x6a, 0x38, 0xf3, 0x00, 0x3c, 0x13,
0xa7, 0x9e, 0x10, 0x4f, 0x80, 0x50, 0x9e, 0x04, 0x79, 0x6d, 0x87, 0x0d, 0x20, 0xc4, 0xcd, 0xfb,
0xfd, 0xcd, 0xec, 0xec, 0x18, 0xea, 0x09, 0x06, 0x11, 0xc7, 0x41, 0x9c, 0x44, 0x3c, 0x22, 0x10,
0x27, 0x51, 0x80, 0x7c, 0x8e, 0x29, 0x6b, 0x6b, 0x7c, 0x19, 0x23, 0xcb, 0x89, 0xf6, 0xfe, 0x45,
0x74, 0x11, 0x89, 0xcf, 0xfb, 0xd9, 0x57, 0x8e, 0x9a, 0x9f, 0x15, 0xa8, 0xbf, 0x49, 0x7c, 0x8e,
0x14, 0x2f, 0x53, 0x64, 0x9c, 0x1c, 0x01, 0x70, 0x3f, 0x40, 0x86, 0x89, 0x8f, 0xcc, 0x50, 0x7a,
0x6a, 0x5f, 0x1b, 0xde, 0x1e, 0xfc, 0x0a, 0x1d, 0x4c, 0xfd, 0x00, 0x27, 0x82, 0x3d, 0xae, 0x5c,
0x7f, 0xef, 0x6e, 0x51, 0x49, 0x4f, 0x8e, 0xa0, 0x16, 0x20, 0x77, 0x3c, 0x87, 0x3b, 0x86, 0x2a,
0xbc, 0x6d, 0xd9, 0x6b, 0x21, 0x4f, 0x7c, 0xd7, 0x2a, 0x14, 0x85, 0x7f, 0xed, 0x38, 0xab, 0xd4,
0xb6, 0x75, 0xd5, 0xfc, 0xa4, 0x40, 0xd3, 0xf2, 0x43, 0x3f, 0xf0, 0x3f, 0xa2, 0xb7, 0xd1, 0xdb,
0xf8, 0x2f, 0xbd, 0x75, 0x37, 0xf2, 0x4b, 0xdb, 0x3f, 0x9b, 0x34, 0xa0, 0xca, 0x96, 0xc1, 0x2c,
0x5a, 0x30, 0xa3, 0xd2, 0x53, 0xfa, 0x37, 0x68, 0x79, 0xcc, 0x1b, 0x38, 0xab, 0xd4, 0x54, 0xbd,
0x62, 0x7e, 0x55, 0x40, 0xa3, 0xe8, 0x78, 0x65, 0xf1, 0x03, 0xa8, 0x5e, 0xa6, 0x72, 0xe5, 0x5b,
0x72, 0xe5, 0x57, 0x29, 0x26, 0x4b, 0x5a, 0x2a, 0xc8, 0x7b, 0x68, 0x39, 0xae, 0x8b, 0x31, 0x47,
0xcf, 0x4e, 0x90, 0xc5, 0x51, 0xc8, 0xd0, 0x16, 0xaf, 0x61, 0x6c, 0xf7, 0xd4, 0x7e, 0x63, 0x78,
0x57, 0x36, 0x4b, 0x65, 0x06, 0xb4, 0x50, 0x4f, 0x97, 0x31, 0xd2, 0x66, 0x19, 0x22, 0xa3, 0xcc,
0x3c, 0x84, 0xba, 0x0c, 0x10, 0x0d, 0xaa, 0x93, 0x91, 0xf5, 0xf2, 0x7c, 0x3c, 0xd1, 0xb7, 0x48,
0x0b, 0xf6, 0x26, 0x53, 0x3a, 0x1e, 0x59, 0xe3, 0x27, 0xf6, 0xdb, 0x17, 0xd4, 0x3e, 0x39, 0x7d,
0xfd, 0xfc, 0xd9, 0x44, 0x57, 0xcc, 0x51, 0xe6, 0x72, 0xd6, 0x51, 0xe4, 0x01, 0x54, 0x13, 0x64,
0xe9, 0x82, 0x97, 0x17, 0x6a, 0xfd, 0x79, 0x21, 0xc1, 0xd3, 0x52, 0x67, 0x7e, 0x51, 0x60, 0x47,
0x10, 0xe4, 0x1e, 0x10, 0xc6, 0x9d, 0x84, 0xdb, 0x62, 0xae, 0xdc, 0x09, 0x62, 0x3b, 0xc8, 0x72,
0x94, 0xbe, 0x4a, 0x75, 0xc1, 0x4c, 0x4b, 0xc2, 0x62, 0xa4, 0x0f, 0x3a, 0x86, 0xde, 0xa6, 0x76,
0x5b, 0x68, 0x1b, 0x18, 0x7a, 0xb2, 0xf2, 0x10, 0x6a, 0x81, 0xc3, 0xdd, 0x39, 0x26, 0xac, 0x58,
0x20, 0x43, 0xee, 0xea, 0xdc, 0x99, 0xe1, 0xc2, 0xca, 0x05, 0x74, 0xad, 0x24, 0x07, 0xb0, 0x33,
0xf7, 0x43, 0x9e, 0xbf, 0xa7, 0x36, 0x6c, 0xfe, 0x3e, 0xdc, 0xd3, 0x8c, 0xa4, 0xb9, 0xc6, 0x1c,
0x83, 0x26, 0x5d, 0x8e, 0x3c, 0xfa, 0xff, 0x85, 0x97, 0xb7, 0xc8, 0xbc, 0x82, 0xbd, 0x93, 0x79,
0x1a, 0x7e, 0xc8, 0x1e, 0x47, 0x9a, 0xea, 0x63, 0x68, 0xb8, 0x39, 0x6c, 0x6f, 0x44, 0xde, 0x91,
0x23, 0x0b, 0x63, 0x91, 0x7a, 0xd3, 0x95, 0x8f, 0xa4, 0x0b, 0x5a, 0xb6, 0x46, 0x4b, 0xdb, 0x0f,
0x3d, 0xbc, 0x2a, 0xe6, 0x04, 0x02, 0x7a, 0x9a, 0x21, 0xc7, 0xfb, 0xd7, 0xab, 0x8e, 0xf2, 0x6d,
0xd5, 0x51, 0x7e, 0xac, 0x3a, 0xca, 0xbb, 0xdd, 0x2c, 0x37, 0x9e, 0xcd, 0x76, 0xc5, 0x0f, 0xfd,
0xf0, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x3e, 0xdc, 0x81, 0x0f, 0x04, 0x00, 0x00,
}
func (m *WriteRequest) Marshal() (dAtA []byte, err error) {
@ -635,64 +570,6 @@ func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
func (m *WriteRequestWithRefs) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *WriteRequestWithRefs) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *WriteRequestWithRefs) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.StringSymbolTable) > 0 {
for k := range m.StringSymbolTable {
v := m.StringSymbolTable[k]
baseI := i
i -= len(v)
copy(dAtA[i:], v)
i = encodeVarintRemote(dAtA, i, uint64(len(v)))
i--
dAtA[i] = 0x12
i = encodeVarintRemote(dAtA, i, uint64(k))
i--
dAtA[i] = 0x8
i = encodeVarintRemote(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0x22
}
}
if len(m.Timeseries) > 0 {
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *MinimizedWriteRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@ -1026,32 +903,6 @@ func (m *WriteRequest) Size() (n int) {
return n
}
func (m *WriteRequestWithRefs) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Timeseries) > 0 {
for _, e := range m.Timeseries {
l = e.Size()
n += 1 + l + sovRemote(uint64(l))
}
}
if len(m.StringSymbolTable) > 0 {
for k, v := range m.StringSymbolTable {
_ = k
_ = v
mapEntrySize := 1 + sovRemote(uint64(k)) + 1 + len(v) + sovRemote(uint64(len(v)))
n += mapEntrySize + 1 + sovRemote(uint64(mapEntrySize))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *MinimizedWriteRequest) Size() (n int) {
if m == nil {
return 0
@ -1309,204 +1160,6 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *WriteRequestWithRefs) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: WriteRequestWithRefs: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: WriteRequestWithRefs: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Timeseries = append(m.Timeseries, ReducedTimeSeries{})
if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StringSymbolTable", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.StringSymbolTable == nil {
m.StringSymbolTable = make(map[uint64]string)
}
var mapkey uint64
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
mapkey |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthRemote
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue < 0 {
return ErrInvalidLengthRemote
}
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipRemote(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.StringSymbolTable[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipRemote(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *MinimizedWriteRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0

View file

@ -28,16 +28,6 @@ message WriteRequest {
}
message WriteRequestWithRefs {
repeated prometheus.ReducedTimeSeries timeseries = 1 [(gogoproto.nullable) = false];
// Cortex uses this field to determine the source of the write request.
// We reserve it to avoid any compatibility issues.
reserved 2;
// Metadata (3) has moved to be part of the TimeSeries type
reserved 3;
// Symbol table for label names/values.
map<uint64, string> string_symbol_table = 4 [(gogoproto.nullable) = false];
}
message MinimizedWriteRequest {
repeated MinimizedTimeSeries timeseries = 1 [(gogoproto.nullable) = false];
// Cortex uses this field to determine the source of the write request.

File diff suppressed because it is too large Load diff

View file

@ -38,24 +38,6 @@ message MetricMetadata {
string unit = 5;
}
message MetricMetadataRef {
enum MetricType {
UNKNOWN = 0;
COUNTER = 1;
GAUGE = 2;
HISTOGRAM = 3;
GAUGEHISTOGRAM = 4;
SUMMARY = 5;
INFO = 6;
STATESET = 7;
}
// Represents the metric type, these match the set from Prometheus.
// Refer to model/textparse/interface.go for details.
MetricType type = 1;
int64 help_ref = 4;
int64 unit_ref = 5;
}
message Sample {
double value = 1;
// timestamp is in ms format, see model/timestamp/timestamp.go for
@ -72,14 +54,6 @@ message Exemplar {
int64 timestamp = 3;
}
message ExemplarRef {
// Optional, can be empty.
repeated LabelRef labels = 1 [(gogoproto.nullable) = false];
double value = 2;
// timestamp is in ms format, see model/timestamp/timestamp.go for
// conversion from time.Time to Prometheus timestamp.
int64 timestamp = 3;
}
// A native histogram, also known as a sparse histogram.
// Original design doc:
@ -156,17 +130,6 @@ message TimeSeries {
repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
}
// TimeSeries represents samples and labels for a single time series.
message ReducedTimeSeries {
// For a timeseries to be valid, and for the samples and exemplars
// to be ingested by the remote system properly, the labels field is required.
repeated LabelRef labels = 1 [(gogoproto.nullable) = false];
repeated Sample samples = 2 [(gogoproto.nullable) = false];
repeated ExemplarRef exemplars = 3 [(gogoproto.nullable) = false];
repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
MetricMetadataRef metadata = 5 [(gogoproto.nullable) = false];
}
// based on an experiment by marco
message MinimizedTimeSeries {
// Sorted list of label name-value pair references. This list's len is always multiple of 4,
@ -178,11 +141,7 @@ message MinimizedTimeSeries {
repeated Sample samples = 2 [(gogoproto.nullable) = false];
repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
}
message LabelRef {
uint64 name_ref = 1;
uint64 value_ref = 2;
// TODO: add metadata
}
message Label {

View file

@ -8,7 +8,6 @@ declare -a INSTANCES
# (sender,receiver) pairs to run: (sender_name; sender_flags; receiver_name; receiver_flags)
INSTANCES+=('sender-v1;;receiver-v1;')
INSTANCES+=('sender-v11;--enable-feature rw-1-1-sender;receiver-v11;--enable-feature rw-1-1-receiver')
INSTANCES+=('sender-v11-min;--enable-feature rw-1-1-sender-min;receiver-v11-min;--enable-feature rw-1-1-receiver-min')
# ~~~~~~~~~~~~~

View file

@ -770,26 +770,6 @@ func labelProtosToLabels(labelPairs []prompb.Label) labels.Labels {
return b.Labels()
}
func labelRefProtosToLabels(st map[uint64]string, lbls []prompb.LabelRef) labels.Labels {
b := labels.NewScratchBuilder(len(lbls))
for _, l := range lbls {
b.Add(st[l.NameRef], st[l.ValueRef])
}
b.Sort()
return b.Labels()
}
func exemplarRefProtoToExemplar(st map[uint64]string, ep prompb.ExemplarRef) exemplar.Exemplar {
timestamp := ep.Timestamp
return exemplar.Exemplar{
Labels: labelRefProtosToLabels(st, ep.Labels),
Value: ep.Value,
Ts: timestamp,
HasTs: timestamp != 0,
}
}
// labelsToLabelsProto transforms labels into prompb labels. The buffer slice
// will be used to avoid allocations if it is big enough to store the labels.
func labelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label {
@ -803,20 +783,6 @@ func labelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label
return result
}
// labelsToLabelsRefProto transforms labels into prompb LabelRefs. The buffer slice
// will be used to avoid allocations if it is big enough to store the labels.
func labelsToLabelRefsProto(lbls labels.Labels, pool *lookupPool, buf []prompb.LabelRef) []prompb.LabelRef {
result := buf[:0]
lbls.Range(func(l labels.Label) {
result = append(result, prompb.LabelRef{
NameRef: pool.intern(l.Name),
ValueRef: pool.intern(l.Value),
})
})
return result
}
func labelsToUint32Slice(lbls labels.Labels, symbolTable *rwSymbolTable, buf []uint32) []uint32 {
result := buf[:0]
lbls.Range(func(l labels.Label) {
@ -936,27 +902,6 @@ func DecodeOTLPWriteRequest(r *http.Request) (pmetricotlp.ExportRequest, error)
return otlpReq, nil
}
// DecodeWriteRequest from an io.Reader into a prompb.WriteRequest, handling
// snappy decompression.
func DecodeReducedWriteRequest(r io.Reader) (*prompb.WriteRequestWithRefs, error) {
compressed, err := io.ReadAll(r)
if err != nil {
return nil, err
}
reqBuf, err := snappy.Decode(nil, compressed)
if err != nil {
return nil, err
}
var req prompb.WriteRequestWithRefs
if err := proto.Unmarshal(reqBuf, &req); err != nil {
return nil, err
}
return &req, nil
}
// DecodeMinimizedWriteRequest from an io.Reader into a prompb.WriteRequest, handling
// snappy decompression.
func DecodeMinimizedWriteRequest(r io.Reader) (*prompb.MinimizedWriteRequest, error) {
@ -978,33 +923,27 @@ func DecodeMinimizedWriteRequest(r io.Reader) (*prompb.MinimizedWriteRequest, er
return &req, nil
}
func ReducedWriteRequestToWriteRequest(redReq *prompb.WriteRequestWithRefs) (*prompb.WriteRequest, error) {
func MinimizedWriteRequestToWriteRequest(redReq *prompb.MinimizedWriteRequest) (*prompb.WriteRequest, error) {
req := &prompb.WriteRequest{
Timeseries: make([]prompb.TimeSeries, len(redReq.Timeseries)),
//Metadata: redReq.Metadata,
}
for i, rts := range redReq.Timeseries {
lbls := make([]prompb.Label, len(rts.Labels))
for j, l := range rts.Labels {
lbls[j].Name = redReq.StringSymbolTable[l.NameRef]
lbls[j].Value = redReq.StringSymbolTable[l.ValueRef]
}
Uint32RefToLabels(redReq.Symbols, rts.LabelSymbols).Range(func(l labels.Label) {
req.Timeseries[i].Labels = append(req.Timeseries[i].Labels, prompb.Label{
Name: l.Name,
Value: l.Value,
})
})
exemplars := make([]prompb.Exemplar, len(rts.Exemplars))
for j, e := range rts.Exemplars {
exemplars[j].Value = e.Value
exemplars[j].Timestamp = e.Timestamp
exemplars[j].Labels = make([]prompb.Label, len(e.Labels))
for k, l := range e.Labels {
exemplars[j].Labels[k].Name = redReq.StringSymbolTable[l.NameRef]
exemplars[j].Labels[k].Value = redReq.StringSymbolTable[l.ValueRef]
}
exemplars[j].Labels = e.Labels
}
req.Timeseries[i].Labels = lbls
req.Timeseries[i].Samples = rts.Samples
req.Timeseries[i].Exemplars = exemplars
req.Timeseries[i].Histograms = rts.Histograms

View file

@ -74,59 +74,6 @@ var writeRequestFixture = &prompb.WriteRequest{
},
}
// writeRequestWithRefsFixture represents the same request as writeRequestFixture, but using the reduced representation.
var writeRequestWithRefsFixture = &prompb.WriteRequestWithRefs{
StringSymbolTable: map[uint64]string{
// Names
0: "__name__",
2: "b",
4: "baz",
6: "d",
8: "foo",
10: "f",
12: "h",
// Values
1: "test_metric1",
3: "c",
5: "qux",
7: "e",
9: "bar",
11: "g",
13: "i",
},
Timeseries: []prompb.ReducedTimeSeries{
{
Labels: []prompb.LabelRef{
{NameRef: 0, ValueRef: 1},
{NameRef: 2, ValueRef: 3},
{NameRef: 4, ValueRef: 5},
{NameRef: 6, ValueRef: 7},
{NameRef: 8, ValueRef: 9},
},
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
Exemplars: []prompb.ExemplarRef{{Labels: []prompb.LabelRef{
{NameRef: 10, ValueRef: 11},
}, Value: 1, Timestamp: 0}},
Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat())},
},
{
Labels: []prompb.LabelRef{
{NameRef: 0, ValueRef: 1},
{NameRef: 2, ValueRef: 3},
{NameRef: 4, ValueRef: 5},
{NameRef: 6, ValueRef: 7},
{NameRef: 8, ValueRef: 9},
},
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
Exemplars: []prompb.ExemplarRef{{Labels: []prompb.LabelRef{
{NameRef: 12, ValueRef: 13},
}, Value: 2, Timestamp: 1}},
Histograms: []prompb.Histogram{HistogramToHistogramProto(2, &testHistogram), FloatHistogramToHistogramProto(3, testHistogram.ToFloat())},
},
},
}
// writeRequestMinimizedFixture represents the same request as writeRequestFixture, but using the minimized representation.
var writeRequestMinimizedFixture = func() *prompb.MinimizedWriteRequest {
st := newRwSymbolTable()
@ -611,16 +558,6 @@ func TestDecodeWriteRequest(t *testing.T) {
require.Equal(t, writeRequestFixture, actual)
}
func TestDecodeReducedWriteRequest(t *testing.T) {
buf, _, err := buildReducedWriteRequest(writeRequestWithRefsFixture.Timeseries, writeRequestWithRefsFixture.StringSymbolTable, nil, nil)
require.NoError(t, err)
actual, err := DecodeReducedWriteRequest(bytes.NewReader(buf))
require.NoError(t, err)
require.Equal(t, writeRequestWithRefsFixture, actual)
}
func TestDecodeMinWriteRequest(t *testing.T) {
buf, _, err := buildMinimizedWriteRequest(writeRequestMinimizedFixture.Timeseries, writeRequestMinimizedFixture.Symbols, nil, nil)
@ -631,8 +568,8 @@ func TestDecodeMinWriteRequest(t *testing.T) {
require.Equal(t, writeRequestMinimizedFixture, actual)
}
func TestReducedWriteRequestToWriteRequest(t *testing.T) {
actual, err := ReducedWriteRequestToWriteRequest(writeRequestWithRefsFixture)
func TestMinimizedWriteRequestToWriteRequest(t *testing.T) {
actual, err := MinimizedWriteRequestToWriteRequest(writeRequestMinimizedFixture)
require.NoError(t, err)
require.Equal(t, writeRequestFixture, actual)

View file

@ -100,44 +100,3 @@ func (p *pool) release(s string) {
}
delete(p.pool, s)
}
// used to create a lookup table for a new remote write request, should not be used concurrently
type lookupPool struct {
nextRef uint64
table map[uint64]string
reverseTable map[string]uint64
}
func newLookupPool() *lookupPool {
return &lookupPool{
table: map[uint64]string{},
reverseTable: map[string]uint64{},
}
}
func (p *lookupPool) intern(s string) uint64 {
if ref, ok := p.reverseTable[s]; ok {
return ref
}
ref := p.nextRef
p.reverseTable[s] = ref
p.table[ref] = s
p.nextRef++
return ref
}
func (p *lookupPool) getTable() map[uint64]string {
return p.table
}
func (p *lookupPool) clear() {
for k := range p.table {
delete(p.table, k)
}
for k := range p.reverseTable {
delete(p.reverseTable, k)
}
p.nextRef = 0
}

View file

@ -406,8 +406,7 @@ type QueueManager struct {
watcher *wlog.Watcher
metadataWatcher *MetadataWatcher
// experimental feature, new remote write proto format
internFormat bool
secondInternFormat bool
internFormat bool
clientMtx sync.RWMutex
storeClient WriteClient
@ -456,7 +455,6 @@ func NewQueueManager(
enableExemplarRemoteWrite bool,
enableNativeHistogramRemoteWrite bool,
internFormat bool,
secondInternFormat bool,
) *QueueManager {
if logger == nil {
logger = log.NewNopLogger()
@ -480,7 +478,6 @@ func NewQueueManager(
sendExemplars: enableExemplarRemoteWrite,
sendNativeHistograms: enableNativeHistogramRemoteWrite,
internFormat: internFormat,
secondInternFormat: secondInternFormat,
seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels),
seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int),
@ -1351,7 +1348,6 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
}()
shardNum := strconv.Itoa(shardID)
pool := newLookupPool()
symbolTable := newRwSymbolTable()
// Send batches of at most MaxSamplesPerSend samples to the remote storage.
@ -1376,14 +1372,6 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
}
}
pendingReducedData := make([]prompb.ReducedTimeSeries, max)
for i := range pendingReducedData {
pendingReducedData[i].Samples = []prompb.Sample{{}}
if s.qm.sendExemplars {
pendingReducedData[i].Exemplars = []prompb.ExemplarRef{{}}
}
}
pendingMinimizedData := make([]prompb.MinimizedTimeSeries, max)
for i := range pendingMinimizedData {
pendingMinimizedData[i].Samples = []prompb.Sample{{}}
@ -1423,18 +1411,12 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
if !ok {
return
}
if s.qm.secondInternFormat {
if s.qm.internFormat {
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
symbolTable.clear()
} else if s.qm.internFormat && !s.qm.secondInternFormat {
// the new internFormat feature flag is be set
nPendingSamples, nPendingExemplars, nPendingHistograms := populateReducedTimeSeries(pool, batch, pendingReducedData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendReducedSamples(ctx, pendingReducedData[:n], pool.table, nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
pool.clear()
} else {
nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
@ -1449,14 +1431,11 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
batch := queue.Batch()
if len(batch) > 0 {
if s.qm.internFormat {
// the new internFormat feature flag is set
nPendingSamples, nPendingExemplars, nPendingHistograms := populateReducedTimeSeries(pool, batch, pendingReducedData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
s.sendReducedSamples(ctx, pendingReducedData[:n], pool.table, nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
pool.clear()
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
symbolTable.clear()
} else {
nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
@ -1523,18 +1502,6 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, time.Since(begin))
}
func (s *shards) sendReducedSamples(ctx context.Context, samples []prompb.ReducedTimeSeries, labels map[uint64]string, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) {
begin := time.Now()
// Build the ReducedWriteRequest with no metadata.
// Failing to build the write request is non-recoverable, since it will
// only error if marshaling the proto to bytes fails.
req, highest, err := buildReducedWriteRequest(samples, labels, pBuf, buf)
if err == nil {
err = s.sendSamplesWithBackoff(ctx, req, sampleCount, exemplarCount, histogramCount, highest)
}
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, time.Since(begin))
}
func (s *shards) sendMinSamples(ctx context.Context, samples []prompb.MinimizedTimeSeries, labels string, sampleCount, exemplarCount, histogramCount int, pBuf *[]byte, buf *[]byte) {
begin := time.Now()
// Build the ReducedWriteRequest with no metadata.
@ -1629,54 +1596,6 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, rawReq []byte, samp
return err
}
func populateReducedTimeSeries(pool *lookupPool, batch []timeSeries, pendingData []prompb.ReducedTimeSeries, sendExemplars, sendNativeHistograms bool) (int, int, int) {
var nPendingSamples, nPendingExemplars, nPendingHistograms int
for nPending, d := range batch {
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
if sendExemplars {
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
}
if sendNativeHistograms {
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
}
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
// pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
pendingData[nPending].Labels = labelsToLabelRefsProto(d.seriesLabels, pool, pendingData[nPending].Labels)
switch d.sType {
case tSample:
pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
Value: d.value,
Timestamp: d.timestamp,
})
nPendingSamples++
case tExemplar:
// TODO(npazosmendez) optimize?
l := make([]prompb.LabelRef, 0, d.exemplarLabels.Len())
d.exemplarLabels.Range(func(el labels.Label) {
nRef := pool.intern(el.Name)
vRef := pool.intern(el.Value)
l = append(l, prompb.LabelRef{NameRef: nRef, ValueRef: vRef})
})
pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.ExemplarRef{
Labels: l,
Value: d.value,
Timestamp: d.timestamp,
})
nPendingExemplars++
case tHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram))
nPendingHistograms++
case tFloatHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, FloatHistogramToHistogramProto(d.timestamp, d.floatHistogram))
nPendingHistograms++
}
}
return nPendingSamples, nPendingExemplars, nPendingHistograms
}
func populateMinimizedTimeSeries(symbolTable *rwSymbolTable, batch []timeSeries, pendingData []prompb.MinimizedTimeSeries, sendExemplars, sendNativeHistograms bool) (int, int, int) {
var nPendingSamples, nPendingExemplars, nPendingHistograms int
for nPending, d := range batch {
@ -1825,52 +1744,6 @@ func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMeta
return compressed, highest, nil
}
func buildReducedWriteRequest(samples []prompb.ReducedTimeSeries, labels map[uint64]string, pBuf *proto.Buffer, buf *[]byte) ([]byte, int64, error) {
var highest int64
for _, ts := range samples {
// At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
if len(ts.Samples) > 0 && ts.Samples[0].Timestamp > highest {
highest = ts.Samples[0].Timestamp
}
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest {
highest = ts.Exemplars[0].Timestamp
}
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp > highest {
highest = ts.Histograms[0].Timestamp
}
}
req := &prompb.WriteRequestWithRefs{
StringSymbolTable: labels,
Timeseries: samples,
}
if pBuf == nil {
pBuf = proto.NewBuffer(nil) // For convenience in tests. Not efficient.
} else {
pBuf.Reset()
}
err := pBuf.Marshal(req)
if err != nil {
return nil, 0, err
}
// snappy uses len() to see if it needs to allocate a new slice. Make the
// buffer as long as possible.
if buf != nil {
*buf = (*buf)[0:cap(*buf)]
} else {
buf = &[]byte{}
}
compressed := snappy.Encode(*buf, pBuf.Bytes())
if n := snappy.MaxEncodedLen(len(pBuf.Bytes())); buf != nil && n > len(*buf) {
// grow the buffer for the next time
*buf = make([]byte, n)
}
return compressed, highest, nil
}
type offLenPair struct {
Off uint32
Len uint32

View file

@ -62,13 +62,12 @@ func newHighestTimestampMetric() *maxTimestamp {
func TestSampleDelivery(t *testing.T) {
testcases := []struct {
name string
samples bool
exemplars bool
histograms bool
floatHistograms bool
remoteWrite11 bool
remoteWrite11Min bool
name string
samples bool
exemplars bool
histograms bool
floatHistograms bool
remoteWrite11 bool
}{
{samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"},
{samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"},
@ -81,12 +80,6 @@ func TestSampleDelivery(t *testing.T) {
{remoteWrite11: true, samples: false, exemplars: true, histograms: false, name: "interned exemplars only"},
{remoteWrite11: true, samples: false, exemplars: false, histograms: true, name: "interned histograms only"},
{remoteWrite11: true, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "interned float histograms only"},
{remoteWrite11Min: true, samples: true, exemplars: false, histograms: false, name: "interned samples only"},
{remoteWrite11Min: true, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "interned samples, exemplars, and histograms"},
{remoteWrite11Min: true, samples: false, exemplars: true, histograms: false, name: "interned exemplars only"},
{remoteWrite11Min: true, samples: false, exemplars: false, histograms: true, name: "interned histograms only"},
{remoteWrite11Min: true, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "interned float histograms only"},
}
// Let's create an even number of send batches so we don't run into the
@ -113,7 +106,7 @@ func TestSampleDelivery(t *testing.T) {
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, tc.remoteWrite11, tc.remoteWrite11Min)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, tc.remoteWrite11)
defer s.Close()
var (
@ -185,7 +178,7 @@ func TestMetadataDelivery(t *testing.T) {
mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false, false)
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
m.Start()
defer m.Stop()
@ -227,7 +220,7 @@ func TestSampleDeliveryTimeout(t *testing.T) {
dir := t.TempDir()
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11, false)
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
m.StoreSeries(series, 0)
m.Start()
defer m.Stop()
@ -274,7 +267,7 @@ func TestSampleDeliveryOrder(t *testing.T) {
mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11, false)
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
m.StoreSeries(series, 0)
m.Start()
@ -296,7 +289,7 @@ func TestShutdown(t *testing.T) {
mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, false, false)
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend
samples, series := createTimeseries(n, n)
m.StoreSeries(series, 0)
@ -334,7 +327,7 @@ func TestSeriesReset(t *testing.T) {
cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, false, false)
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
for i := 0; i < numSegments; i++ {
series := []record.RefSeries{}
for j := 0; j < numSeries; j++ {
@ -366,7 +359,7 @@ func TestReshard(t *testing.T) {
dir := t.TempDir()
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11, false)
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
m.StoreSeries(series, 0)
m.Start()
@ -407,7 +400,7 @@ func TestReshardRaceWithStop(t *testing.T) {
go func() {
for {
metrics := newQueueManagerMetrics(nil, "", "")
m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11, false)
m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
m.Start()
h.Unlock()
h.Lock()
@ -447,7 +440,7 @@ func TestReshardPartialBatch(t *testing.T) {
cfg.BatchSendDeadline = model.Duration(batchSendDeadline)
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11, false)
m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
m.StoreSeries(series, 0)
m.Start()
@ -497,7 +490,7 @@ func TestQueueFilledDeadlock(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11, false)
m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
m.StoreSeries(series, 0)
m.Start()
defer m.Stop()
@ -529,7 +522,7 @@ func TestReleaseNoninternedString(t *testing.T) {
mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "")
c := NewTestWriteClient(remoteWrite11)
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11, false)
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
m.Start()
defer m.Stop()
@ -578,7 +571,7 @@ func TestShouldReshard(t *testing.T) {
for _, c := range cases {
metrics := newQueueManagerMetrics(nil, "", "")
client := NewTestWriteClient(false)
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false, false)
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
m.numShards = c.startingShards
m.dataIn.incr(c.samplesIn)
m.dataOut.incr(c.samplesOut)
@ -839,10 +832,10 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
var reqProto *prompb.WriteRequest
if c.expectRemoteWrite11 {
var reqReduced prompb.WriteRequestWithRefs
var reqReduced prompb.MinimizedWriteRequest
err = proto.Unmarshal(reqBuf, &reqReduced)
if err == nil {
reqProto, err = ReducedWriteRequestToWriteRequest(&reqReduced)
reqProto, err = MinimizedWriteRequestToWriteRequest(&reqReduced)
}
} else {
reqProto = &prompb.WriteRequest{}
@ -972,7 +965,7 @@ func BenchmarkSampleSend(b *testing.B) {
dir := b.TempDir()
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false, false)
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
m.StoreSeries(series, 0)
// These should be received by the client.
@ -1018,7 +1011,7 @@ func BenchmarkStartup(b *testing.B) {
c := NewTestBlockedWriteClient()
m := NewQueueManager(metrics, nil, nil, logger, dir,
newEWMARate(ewmaWeight, shardUpdateDuration),
cfg, mcfg, labels.EmptyLabels(), nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false, false, false, false)
cfg, mcfg, labels.EmptyLabels(), nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false, false, false)
m.watcher.SetStartTime(timestamp.Time(math.MaxInt64))
m.watcher.MaxSegment = segments[len(segments)-2]
err := m.watcher.Run()
@ -1101,7 +1094,7 @@ func TestCalculateDesiredShards(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "")
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false, false)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
// Need to start the queue manager so the proper metrics are initialized.
// However we can stop it right away since we don't need to do any actual
@ -1178,7 +1171,7 @@ func TestCalculateDesiredShardsDetail(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "")
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false, false)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
for _, tc := range []struct {
name string
@ -1497,54 +1490,6 @@ func BenchmarkBuildWriteRequest(b *testing.B) {
})
}
func BenchmarkBuildReducedWriteRequest(b *testing.B) {
bench := func(b *testing.B, batch []timeSeries) {
pool := newLookupPool()
pBuf := proto.NewBuffer(nil)
buff := make([]byte, 0)
seriesBuff := make([]prompb.ReducedTimeSeries, len(batch))
for i := range seriesBuff {
seriesBuff[i].Samples = []prompb.Sample{{}}
seriesBuff[i].Exemplars = []prompb.ExemplarRef{{}}
}
// Warmup buffers
for i := 0; i < 10; i++ {
populateReducedTimeSeries(pool, batch, seriesBuff, true, true)
buildReducedWriteRequest(seriesBuff, pool.getTable(), pBuf, &buff)
}
b.ResetTimer()
totalSize := 0
for i := 0; i < b.N; i++ {
populateReducedTimeSeries(pool, batch, seriesBuff, true, true)
req, _, err := buildReducedWriteRequest(seriesBuff, pool.getTable(), pBuf, &buff)
if err != nil {
b.Fatal(err)
}
pool.clear()
totalSize += len(req)
b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op")
}
}
two_batch := createDummyTimeSeries(2)
ten_batch := createDummyTimeSeries(10)
hundred_batch := createDummyTimeSeries(100)
b.Run("2 instances", func(b *testing.B) {
bench(b, two_batch)
})
b.Run("10 instances", func(b *testing.B) {
bench(b, ten_batch)
})
b.Run("1k instances", func(b *testing.B) {
bench(b, hundred_batch)
})
}
func BenchmarkBuildMinimizedWriteRequest(b *testing.B) {
type testcase struct {
batch []timeSeries

View file

@ -91,7 +91,7 @@ func TestNoDuplicateReadConfigs(t *testing.T) {
for _, tc := range cases {
t.Run("", func(t *testing.T) {
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false, false)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteReadConfigs: tc.cfgs,

View file

@ -62,7 +62,7 @@ type Storage struct {
}
// NewStorage returns a remote.Storage.
func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, remoteWrite11 bool, remoteWrite11Minimized bool) *Storage {
func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, remoteWrite11 bool) *Storage {
if l == nil {
l = log.NewNopLogger()
}
@ -72,7 +72,7 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal
logger: logger,
localStartTimeCallback: stCallback,
}
s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, remoteWrite11, remoteWrite11Minimized)
s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, remoteWrite11)
return s
}

View file

@ -29,7 +29,7 @@ import (
func TestStorageLifecycle(t *testing.T) {
dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false, false)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: []*config.RemoteWriteConfig{
@ -56,7 +56,7 @@ func TestStorageLifecycle(t *testing.T) {
func TestUpdateRemoteReadConfigs(t *testing.T) {
dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false, false)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{},
@ -77,7 +77,7 @@ func TestUpdateRemoteReadConfigs(t *testing.T) {
func TestFilterExternalLabels(t *testing.T) {
dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false, false)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{
@ -102,7 +102,7 @@ func TestFilterExternalLabels(t *testing.T) {
func TestIgnoreExternalLabels(t *testing.T) {
dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false, false)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{

View file

@ -60,42 +60,40 @@ type WriteStorage struct {
reg prometheus.Registerer
mtx sync.Mutex
watcherMetrics *wlog.WatcherMetrics
liveReaderMetrics *wlog.LiveReaderMetrics
externalLabels labels.Labels
dir string
queues map[string]*QueueManager
remoteWrite11 bool
remoteWrite11Minimized bool
samplesIn *ewmaRate
flushDeadline time.Duration
interner *pool
scraper ReadyScrapeManager
quit chan struct{}
watcherMetrics *wlog.WatcherMetrics
liveReaderMetrics *wlog.LiveReaderMetrics
externalLabels labels.Labels
dir string
queues map[string]*QueueManager
remoteWrite11 bool
samplesIn *ewmaRate
flushDeadline time.Duration
interner *pool
scraper ReadyScrapeManager
quit chan struct{}
// For timestampTracker.
highestTimestamp *maxTimestamp
}
// NewWriteStorage creates and runs a WriteStorage.
func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, remoteWrite11 bool, remoteWrite11Minimized bool) *WriteStorage {
func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, remoteWrite11 bool) *WriteStorage {
if logger == nil {
logger = log.NewNopLogger()
}
rws := &WriteStorage{
queues: make(map[string]*QueueManager),
remoteWrite11: remoteWrite11,
remoteWrite11Minimized: remoteWrite11Minimized,
watcherMetrics: wlog.NewWatcherMetrics(reg),
liveReaderMetrics: wlog.NewLiveReaderMetrics(reg),
logger: logger,
reg: reg,
flushDeadline: flushDeadline,
samplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),
dir: dir,
interner: newPool(),
scraper: sm,
quit: make(chan struct{}),
queues: make(map[string]*QueueManager),
remoteWrite11: remoteWrite11,
watcherMetrics: wlog.NewWatcherMetrics(reg),
liveReaderMetrics: wlog.NewLiveReaderMetrics(reg),
logger: logger,
reg: reg,
flushDeadline: flushDeadline,
samplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),
dir: dir,
interner: newPool(),
scraper: sm,
quit: make(chan struct{}),
highestTimestamp: &maxTimestamp{
Gauge: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
@ -213,7 +211,6 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
rwConf.SendExemplars,
rwConf.SendNativeHistograms,
rws.remoteWrite11,
rws.remoteWrite11Minimized,
)
// Keep track of which queues are new so we know which to start.
newHashes = append(newHashes, hash)

View file

@ -47,18 +47,15 @@ type writeHandler struct {
// Experimental feature, new remote write proto format
// The handler will accept the new format, but it can still accept the old one
enableRemoteWrite11 bool
enableRemoteWrite11Minimized bool
}
// NewWriteHandler creates a http.Handler that accepts remote write requests and
// writes them to the provided appendable.
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, enableRemoteWrite11 bool, enableRemoteWrite11Minimized bool) http.Handler {
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, enableRemoteWrite11 bool) http.Handler {
h := &writeHandler{
logger: logger,
appendable: appendable,
enableRemoteWrite11: enableRemoteWrite11,
enableRemoteWrite11Minimized: enableRemoteWrite11Minimized,
logger: logger,
appendable: appendable,
enableRemoteWrite11: enableRemoteWrite11,
samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "prometheus",
@ -76,13 +73,10 @@ func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable st
func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var err error
var req *prompb.WriteRequest
var reqWithRefs *prompb.WriteRequestWithRefs
var reqMin *prompb.MinimizedWriteRequest
if h.enableRemoteWrite11Minimized {
if h.enableRemoteWrite11 && r.Header.Get(RemoteWriteVersionHeader) == RemoteWriteVersion11HeaderValue {
reqMin, err = DecodeMinimizedWriteRequest(r.Body)
} else if !h.enableRemoteWrite11Minimized && h.enableRemoteWrite11 && r.Header.Get(RemoteWriteVersionHeader) == RemoteWriteVersion11HeaderValue {
reqWithRefs, err = DecodeReducedWriteRequest(r.Body)
} else {
req, err = DecodeWriteRequest(r.Body)
}
@ -93,10 +87,8 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
if h.enableRemoteWrite11Minimized {
if h.enableRemoteWrite11 && r.Header.Get(RemoteWriteVersionHeader) == RemoteWriteVersion11HeaderValue {
err = h.writeMin(r.Context(), reqMin)
} else if h.enableRemoteWrite11 {
err = h.writeReduced(r.Context(), reqWithRefs)
} else {
err = h.write(r.Context(), req)
}
@ -294,50 +286,6 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
func (h *writeHandler) writeReduced(ctx context.Context, req *prompb.WriteRequestWithRefs) (err error) {
outOfOrderExemplarErrs := 0
app := h.appendable.Appender(ctx)
defer func() {
if err != nil {
_ = app.Rollback()
return
}
err = app.Commit()
}()
for _, ts := range req.Timeseries {
labels := labelRefProtosToLabels(req.StringSymbolTable, ts.Labels)
// TODO(npazosmendez): ?
// if !labels.IsValid() {
// level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", labels.String())
// samplesWithInvalidLabels++
// continue
// }
err := h.appendSamples(app, ts.Samples, labels)
if err != nil {
return err
}
for _, ep := range ts.Exemplars {
e := exemplarRefProtoToExemplar(req.StringSymbolTable, ep)
h.appendExemplar(app, e, labels, &outOfOrderExemplarErrs)
}
err = h.appendHistograms(app, ts.Histograms, labels)
if err != nil {
return err
}
}
if outOfOrderExemplarErrs > 0 {
_ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
}
return nil
}
func (h *writeHandler) writeMin(ctx context.Context, req *prompb.MinimizedWriteRequest) (err error) {
outOfOrderExemplarErrs := 0

View file

@ -45,7 +45,7 @@ func TestRemoteWriteHandler(t *testing.T) {
require.NoError(t, err)
appendable := &mockAppendable{}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false, false)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -83,55 +83,6 @@ func TestRemoteWriteHandler(t *testing.T) {
}
}
func TestRemoteWriteHandlerReducedFormat(t *testing.T) {
buf, _, err := buildReducedWriteRequest(writeRequestWithRefsFixture.Timeseries, writeRequestWithRefsFixture.StringSymbolTable, nil, nil)
require.NoError(t, err)
req, err := http.NewRequest("", "", bytes.NewReader(buf))
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
require.NoError(t, err)
appendable := &mockAppendable{}
handler := NewWriteHandler(nil, nil, appendable, true, false)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
require.Equal(t, http.StatusNoContent, resp.StatusCode)
i := 0
j := 0
k := 0
// the reduced write request is equivalent to the write request fixture.
// we can use it for
for _, ts := range writeRequestFixture.Timeseries {
labels := labelProtosToLabels(ts.Labels)
for _, s := range ts.Samples {
require.Equal(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i])
i++
}
for _, e := range ts.Exemplars {
exemplarLabels := labelProtosToLabels(e.Labels)
require.Equal(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
j++
}
for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() {
fh := FloatHistogramProtoToFloatHistogram(hp)
require.Equal(t, mockHistogram{labels, hp.Timestamp, nil, fh}, appendable.histograms[k])
} else {
h := HistogramProtoToHistogram(hp)
require.Equal(t, mockHistogram{labels, hp.Timestamp, h, nil}, appendable.histograms[k])
}
k++
}
}
}
func TestRemoteWriteHandlerMinimizedFormat(t *testing.T) {
buf, _, err := buildMinimizedWriteRequest(writeRequestMinimizedFixture.Timeseries, writeRequestMinimizedFixture.Symbols, nil, nil)
require.NoError(t, err)
@ -141,7 +92,7 @@ func TestRemoteWriteHandlerMinimizedFormat(t *testing.T) {
require.NoError(t, err)
appendable := &mockAppendable{}
handler := NewWriteHandler(nil, nil, appendable, false, true)
handler := NewWriteHandler(nil, nil, appendable, true)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -194,7 +145,7 @@ func TestOutOfOrderSample(t *testing.T) {
appendable := &mockAppendable{
latestSample: 100,
}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false, false)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -219,7 +170,7 @@ func TestOutOfOrderExemplar(t *testing.T) {
appendable := &mockAppendable{
latestExemplar: 100,
}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false, false)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -243,7 +194,7 @@ func TestOutOfOrderHistogram(t *testing.T) {
latestHistogram: 100,
}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false, false)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -271,7 +222,7 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
}
appendable := &mockAppendable{}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false, false)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
recorder := httptest.NewRecorder()
b.ResetTimer()
@ -280,36 +231,36 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
}
}
// TODO(npazosmendez): add benchmarks with realistic scenarios
func BenchmarkReducedRemoteWriteHandler(b *testing.B) {
const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte"
reqs := []*http.Request{}
for i := 0; i < b.N; i++ {
pool := newLookupPool()
num := strings.Repeat(strconv.Itoa(i), 16)
buf, _, err := buildReducedWriteRequest([]prompb.ReducedTimeSeries{{
Labels: []prompb.LabelRef{
{NameRef: pool.intern("__name__"), ValueRef: pool.intern("test_metric")},
{NameRef: pool.intern("test_label_name_" + num), ValueRef: pool.intern(labelValue + num)},
},
Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram)},
}}, pool.getTable(), nil, nil)
require.NoError(b, err)
req, err := http.NewRequest("", "", bytes.NewReader(buf))
require.NoError(b, err)
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
reqs = append(reqs, req)
}
// TODO(npazosmendez): adapt to minimized version
// func BenchmarkReducedRemoteWriteHandler(b *testing.B) {
// const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte"
// reqs := []*http.Request{}
// for i := 0; i < b.N; i++ {
// pool := newLookupPool()
// num := strings.Repeat(strconv.Itoa(i), 16)
// buf, _, err := buildReducedWriteRequest([]prompb.ReducedTimeSeries{{
// Labels: []prompb.LabelRef{
// {NameRef: pool.intern("__name__"), ValueRef: pool.intern("test_metric")},
// {NameRef: pool.intern("test_label_name_" + num), ValueRef: pool.intern(labelValue + num)},
// },
// Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram)},
// }}, pool.getTable(), nil, nil)
// require.NoError(b, err)
// req, err := http.NewRequest("", "", bytes.NewReader(buf))
// require.NoError(b, err)
// req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
// reqs = append(reqs, req)
// }
appendable := &mockAppendable{}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, true, false)
recorder := httptest.NewRecorder()
// appendable := &mockAppendable{}
// handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, true, false)
// recorder := httptest.NewRecorder()
b.ResetTimer()
for _, req := range reqs {
handler.ServeHTTP(recorder, req)
}
}
// b.ResetTimer()
// for _, req := range reqs {
// handler.ServeHTTP(recorder, req)
// }
// }
func TestCommitErr(t *testing.T) {
buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil, nil)
@ -321,7 +272,7 @@ func TestCommitErr(t *testing.T) {
appendable := &mockAppendable{
commitErr: fmt.Errorf("commit error"),
}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false, false)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -347,7 +298,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) {
require.NoError(b, db.Close())
})
handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head(), false, false)
handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head(), false)
buf, _, err := buildWriteRequest(genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil)
require.NoError(b, err)

View file

@ -117,7 +117,7 @@ func TestNoDuplicateWriteConfigs(t *testing.T) {
}
for _, tc := range cases {
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false, false)
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: tc.cfgs,
@ -139,7 +139,7 @@ func TestRestartOnNameChange(t *testing.T) {
hash, err := toHash(cfg)
require.NoError(t, err)
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false, false)
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
@ -164,7 +164,7 @@ func TestRestartOnNameChange(t *testing.T) {
func TestUpdateWithRegisterer(t *testing.T) {
dir := t.TempDir()
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil, false, false)
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil, false)
c1 := &config.RemoteWriteConfig{
Name: "named",
URL: &common_config.URL{
@ -204,7 +204,7 @@ func TestUpdateWithRegisterer(t *testing.T) {
func TestWriteStorageLifecycle(t *testing.T) {
dir := t.TempDir()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false, false)
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: []*config.RemoteWriteConfig{
@ -221,7 +221,7 @@ func TestWriteStorageLifecycle(t *testing.T) {
func TestUpdateExternalLabels(t *testing.T) {
dir := t.TempDir()
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil, false, false)
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil, false)
externalLabels := labels.FromStrings("external", "true")
conf := &config.Config{
@ -250,7 +250,7 @@ func TestUpdateExternalLabels(t *testing.T) {
func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
dir := t.TempDir()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false, false)
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{},
@ -276,7 +276,7 @@ func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
dir := t.TempDir()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false, false)
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
c0 := &config.RemoteWriteConfig{
RemoteTimeout: model.Duration(10 * time.Second),

View file

@ -255,7 +255,6 @@ func NewAPI(
rwEnabled bool,
otlpEnabled bool,
enableRemoteWrite11 bool,
enableRemoteWrite11Min bool,
) *API {
a := &API{
QueryEngine: qe,
@ -297,7 +296,7 @@ func NewAPI(
}
if rwEnabled {
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, enableRemoteWrite11, enableRemoteWrite11Min)
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, enableRemoteWrite11)
}
if otlpEnabled {
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap)

View file

@ -241,28 +241,27 @@ type Options struct {
Version *PrometheusVersion
Flags map[string]string
ListenAddress string
CORSOrigin *regexp.Regexp
ReadTimeout time.Duration
MaxConnections int
ExternalURL *url.URL
RoutePrefix string
UseLocalAssets bool
UserAssetsPath string
ConsoleTemplatesPath string
ConsoleLibrariesPath string
EnableLifecycle bool
EnableAdminAPI bool
PageTitle string
RemoteReadSampleLimit int
RemoteReadConcurrencyLimit int
RemoteReadBytesInFrame int
EnableRemoteWriteReceiver bool
EnableOTLPWriteReceiver bool
IsAgent bool
AppName string
EnableReceiverRemoteWrite11 bool
EnableReceiverRemoteWrite11Min bool
ListenAddress string
CORSOrigin *regexp.Regexp
ReadTimeout time.Duration
MaxConnections int
ExternalURL *url.URL
RoutePrefix string
UseLocalAssets bool
UserAssetsPath string
ConsoleTemplatesPath string
ConsoleLibrariesPath string
EnableLifecycle bool
EnableAdminAPI bool
PageTitle string
RemoteReadSampleLimit int
RemoteReadConcurrencyLimit int
RemoteReadBytesInFrame int
EnableRemoteWriteReceiver bool
EnableOTLPWriteReceiver bool
IsAgent bool
AppName string
EnableReceiverRemoteWrite11 bool
Gatherer prometheus.Gatherer
Registerer prometheus.Registerer
@ -354,7 +353,6 @@ func New(logger log.Logger, o *Options) *Handler {
o.EnableRemoteWriteReceiver,
o.EnableOTLPWriteReceiver,
o.EnableReceiverRemoteWrite11,
o.EnableReceiverRemoteWrite11Min,
)
if o.RoutePrefix != "/" {