remote-read: streamed chunked server side; Extended protobuf; Added chunked, checksumed reader (#5703)

Part of: https://github.com/prometheus/prometheus/issues/4517 and https://github.com/improbable-eng/thanos/issues/488

Changes:
* Extended protobuf for chunked remote read and negotation.
* Added checksumed, chunked Writer/Reader.
* Added Server side implementation for chunked streamed remote-read.


Signed-off-by: Bartek Plotka <bwplotka@gmail.com>
This commit is contained in:
Bartek Płotka 2019-08-19 21:16:10 +01:00 committed by GitHub
parent d7f4183791
commit 48b2c9c8ea
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 1936 additions and 224 deletions

View file

@ -213,12 +213,15 @@ func main() {
a.Flag("storage.remote.flush-deadline", "How long to wait flushing sample on shutdown or config reload.").
Default("1m").PlaceHolder("<duration>").SetValue(&cfg.RemoteFlushDeadline)
a.Flag("storage.remote.read-sample-limit", "Maximum overall number of samples to return via the remote read interface, in a single query. 0 means no limit.").
a.Flag("storage.remote.read-sample-limit", "Maximum overall number of samples to return via the remote read interface, in a single query. 0 means no limit. This limit is ignored for streamed response types.").
Default("5e7").IntVar(&cfg.web.RemoteReadSampleLimit)
a.Flag("storage.remote.read-concurrent-limit", "Maximum number of concurrent remote read calls. 0 means no limit.").
Default("10").IntVar(&cfg.web.RemoteReadConcurrencyLimit)
a.Flag("storage.remote.read-max-bytes-in-frame", "Maximum number of bytes in a single frame for streaming remote read response types before marshalling. Note that client might have limit on frame size as well. 1MB as recommended by protobuf by default.").
Default("1048576").IntVar(&cfg.web.RemoteReadBytesInFrame)
a.Flag("rules.alert.for-outage-tolerance", "Max time to tolerate prometheus outage for restoring \"for\" state of alert.").
Default("1h").SetValue(&cfg.outageTolerance)

View file

@ -24,6 +24,43 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type ReadRequest_ResponseType int32
const (
// Server will return a single ReadResponse message with matched series that includes list of raw samples.
// It's recommended to use streamed response types instead.
//
// Response headers:
// Content-Type: "application/x-protobuf"
// Content-Encoding: "snappy"
ReadRequest_SAMPLES ReadRequest_ResponseType = 0
// Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series.
// Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum.
//
// Response headers:
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
// Content-Encoding: ""
ReadRequest_STREAMED_XOR_CHUNKS ReadRequest_ResponseType = 1
)
var ReadRequest_ResponseType_name = map[int32]string{
0: "SAMPLES",
1: "STREAMED_XOR_CHUNKS",
}
var ReadRequest_ResponseType_value = map[string]int32{
"SAMPLES": 0,
"STREAMED_XOR_CHUNKS": 1,
}
func (x ReadRequest_ResponseType) String() string {
return proto.EnumName(ReadRequest_ResponseType_name, int32(x))
}
func (ReadRequest_ResponseType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{1, 0}
}
type WriteRequest struct {
Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
@ -71,8 +108,15 @@ func (m *WriteRequest) GetTimeseries() []TimeSeries {
return nil
}
// ReadRequest represents a remote read request.
type ReadRequest struct {
Queries []*Query `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"`
// accepted_response_types allows negotiating the content type of the response.
//
// Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
// implemented by server, error is returned.
// For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
AcceptedResponseTypes []ReadRequest_ResponseType `protobuf:"varint,2,rep,packed,name=accepted_response_types,json=acceptedResponseTypes,proto3,enum=prometheus.ReadRequest_ResponseType" json:"accepted_response_types,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -118,6 +162,14 @@ func (m *ReadRequest) GetQueries() []*Query {
return nil
}
func (m *ReadRequest) GetAcceptedResponseTypes() []ReadRequest_ResponseType {
if m != nil {
return m.AcceptedResponseTypes
}
return nil
}
// ReadResponse is a response when response_type equals SAMPLES.
type ReadResponse struct {
// In same order as the request's queries.
Results []*QueryResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"`
@ -285,39 +337,110 @@ func (m *QueryResult) GetTimeseries() []*TimeSeries {
return nil
}
// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS.
// We strictly stream full series after series, optionally split by time. This means that a single frame can contain
// partition of the single series, but once a new series is started to be streamed it means that no more chunks will
// be sent for previous one.
type ChunkedReadResponse struct {
ChunkedSeries []*ChunkedSeries `protobuf:"bytes,1,rep,name=chunked_series,json=chunkedSeries,proto3" json:"chunked_series,omitempty"`
// query_index represents an index of the query from ReadRequest.queries these chunks relates to.
QueryIndex int64 `protobuf:"varint,2,opt,name=query_index,json=queryIndex,proto3" json:"query_index,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ChunkedReadResponse) Reset() { *m = ChunkedReadResponse{} }
func (m *ChunkedReadResponse) String() string { return proto.CompactTextString(m) }
func (*ChunkedReadResponse) ProtoMessage() {}
func (*ChunkedReadResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{5}
}
func (m *ChunkedReadResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ChunkedReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ChunkedReadResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ChunkedReadResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ChunkedReadResponse.Merge(m, src)
}
func (m *ChunkedReadResponse) XXX_Size() int {
return m.Size()
}
func (m *ChunkedReadResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ChunkedReadResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ChunkedReadResponse proto.InternalMessageInfo
func (m *ChunkedReadResponse) GetChunkedSeries() []*ChunkedSeries {
if m != nil {
return m.ChunkedSeries
}
return nil
}
func (m *ChunkedReadResponse) GetQueryIndex() int64 {
if m != nil {
return m.QueryIndex
}
return 0
}
func init() {
proto.RegisterEnum("prometheus.ReadRequest_ResponseType", ReadRequest_ResponseType_name, ReadRequest_ResponseType_value)
proto.RegisterType((*WriteRequest)(nil), "prometheus.WriteRequest")
proto.RegisterType((*ReadRequest)(nil), "prometheus.ReadRequest")
proto.RegisterType((*ReadResponse)(nil), "prometheus.ReadResponse")
proto.RegisterType((*Query)(nil), "prometheus.Query")
proto.RegisterType((*QueryResult)(nil), "prometheus.QueryResult")
proto.RegisterType((*ChunkedReadResponse)(nil), "prometheus.ChunkedReadResponse")
}
func init() { proto.RegisterFile("remote.proto", fileDescriptor_eefc82927d57d89b) }
var fileDescriptor_eefc82927d57d89b = []byte{
// 333 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xbf, 0x4a, 0x2b, 0x41,
0x14, 0xc6, 0xef, 0xdc, 0xfc, 0xbb, 0x9c, 0x0d, 0x97, 0xdc, 0x21, 0x57, 0x97, 0x14, 0x31, 0x6c,
0xb5, 0x10, 0x89, 0x18, 0xc5, 0x42, 0x6c, 0x0c, 0x08, 0x16, 0x49, 0xe1, 0x18, 0x10, 0x6c, 0xc2,
0xc6, 0x1c, 0x92, 0x85, 0xcc, 0xce, 0x66, 0xe6, 0x6c, 0x91, 0xd7, 0xb3, 0x4a, 0xe9, 0x13, 0x88,
0xe4, 0x49, 0x64, 0x67, 0x59, 0x1d, 0xb1, 0xb1, 0x1b, 0xe6, 0xf7, 0xfb, 0x3e, 0xce, 0xe1, 0x40,
0x53, 0xa3, 0x54, 0x84, 0x83, 0x54, 0x2b, 0x52, 0x1c, 0x52, 0xad, 0x24, 0xd2, 0x0a, 0x33, 0xd3,
0xf1, 0x68, 0x9b, 0xa2, 0x29, 0x40, 0xa7, 0xbd, 0x54, 0x4b, 0x65, 0x9f, 0x27, 0xf9, 0xab, 0xf8,
0x0d, 0xc6, 0xd0, 0x7c, 0xd0, 0x31, 0xa1, 0xc0, 0x4d, 0x86, 0x86, 0xf8, 0x15, 0x00, 0xc5, 0x12,
0x0d, 0xea, 0x18, 0x8d, 0xcf, 0x7a, 0x95, 0xd0, 0x1b, 0x1e, 0x0c, 0x3e, 0x3b, 0x07, 0xd3, 0x58,
0xe2, 0xbd, 0xa5, 0xa3, 0xea, 0xee, 0xf5, 0xe8, 0x97, 0x70, 0xfc, 0xe0, 0x12, 0x3c, 0x81, 0xd1,
0xa2, 0x2c, 0xeb, 0x43, 0x63, 0x93, 0xb9, 0x4d, 0xff, 0xdc, 0xa6, 0xbb, 0x0c, 0xf5, 0x56, 0x94,
0x46, 0x70, 0x0d, 0xcd, 0x22, 0x6b, 0x52, 0x95, 0x18, 0xe4, 0xa7, 0xd0, 0xd0, 0x68, 0xb2, 0x35,
0x95, 0xe1, 0xc3, 0xef, 0x61, 0xcb, 0x45, 0xe9, 0x05, 0xcf, 0x0c, 0x6a, 0x16, 0xf0, 0x63, 0xe0,
0x86, 0x22, 0x4d, 0x33, 0x3b, 0x1c, 0x45, 0x32, 0x9d, 0xc9, 0xbc, 0x87, 0x85, 0x15, 0xd1, 0xb2,
0x64, 0x5a, 0x82, 0x89, 0xe1, 0x21, 0xb4, 0x30, 0x59, 0x7c, 0x75, 0x7f, 0x5b, 0xf7, 0x2f, 0x26,
0x0b, 0xd7, 0x3c, 0x87, 0x3f, 0x32, 0xa2, 0xa7, 0x15, 0x6a, 0xe3, 0x57, 0xec, 0x54, 0xbe, 0x3b,
0xd5, 0x38, 0x9a, 0xe3, 0x7a, 0x52, 0x08, 0xe2, 0xc3, 0xe4, 0x7d, 0xa8, 0xad, 0xe2, 0x84, 0x8c,
0x5f, 0xed, 0xb1, 0xd0, 0x1b, 0xfe, 0x77, 0x23, 0xf9, 0xce, 0xb7, 0x39, 0x14, 0x85, 0x13, 0xdc,
0x80, 0xe7, 0x2c, 0xc7, 0x2f, 0x7e, 0x7e, 0x10, 0xf7, 0x14, 0xa3, 0xf6, 0x6e, 0xdf, 0x65, 0x2f,
0xfb, 0x2e, 0x7b, 0xdb, 0x77, 0xd9, 0x63, 0x3d, 0x0f, 0xa4, 0xf3, 0x79, 0xdd, 0x5e, 0xfd, 0xec,
0x3d, 0x00, 0x00, 0xff, 0xff, 0x9e, 0xb6, 0x05, 0x1c, 0x34, 0x02, 0x00, 0x00,
// 466 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xcf, 0x6e, 0xd3, 0x40,
0x10, 0xc6, 0xbb, 0x4d, 0xdb, 0xa0, 0x71, 0x88, 0xc2, 0xb6, 0x25, 0xa6, 0x87, 0x34, 0xb2, 0x38,
0x58, 0x2a, 0x0a, 0x22, 0x54, 0x9c, 0x38, 0x90, 0x96, 0x48, 0x45, 0x24, 0xfc, 0x59, 0x07, 0x81,
0x10, 0x92, 0xe5, 0xd8, 0xa3, 0xc6, 0xa2, 0xfe, 0xd3, 0xdd, 0xb5, 0xd4, 0xbc, 0x1e, 0xa7, 0x9e,
0x10, 0x4f, 0x80, 0x50, 0x9e, 0x04, 0xed, 0xda, 0x0e, 0x1b, 0xb8, 0x70, 0x5b, 0x7f, 0xdf, 0x37,
0x3f, 0xef, 0x8c, 0xc7, 0xd0, 0xe2, 0x98, 0x64, 0x12, 0x07, 0x39, 0xcf, 0x64, 0x46, 0x21, 0xe7,
0x59, 0x82, 0x72, 0x81, 0x85, 0x38, 0xb2, 0xe4, 0x32, 0x47, 0x51, 0x1a, 0x47, 0x07, 0x97, 0xd9,
0x65, 0xa6, 0x8f, 0x8f, 0xd5, 0xa9, 0x54, 0x9d, 0x09, 0xb4, 0x3e, 0xf2, 0x58, 0x22, 0xc3, 0xeb,
0x02, 0x85, 0xa4, 0xcf, 0x01, 0x64, 0x9c, 0xa0, 0x40, 0x1e, 0xa3, 0xb0, 0x49, 0xbf, 0xe1, 0x5a,
0xc3, 0xfb, 0x83, 0x3f, 0xcc, 0xc1, 0x2c, 0x4e, 0xd0, 0xd3, 0xee, 0xd9, 0xce, 0xed, 0xcf, 0xe3,
0x2d, 0x66, 0xe4, 0x9d, 0xef, 0x04, 0x2c, 0x86, 0x41, 0x54, 0xd3, 0x4e, 0xa0, 0x79, 0x5d, 0x98,
0xa8, 0x7b, 0x26, 0xea, 0x7d, 0x81, 0x7c, 0xc9, 0xea, 0x04, 0xfd, 0x02, 0xdd, 0x20, 0x0c, 0x31,
0x97, 0x18, 0xf9, 0x1c, 0x45, 0x9e, 0xa5, 0x02, 0x7d, 0xdd, 0x81, 0xbd, 0xdd, 0x6f, 0xb8, 0xed,
0xe1, 0x43, 0xb3, 0xd8, 0x78, 0xcd, 0x80, 0x55, 0xe9, 0xd9, 0x32, 0x47, 0x76, 0x58, 0x43, 0x4c,
0x55, 0x38, 0xa7, 0xd0, 0x32, 0x05, 0x6a, 0x41, 0xd3, 0x1b, 0x4d, 0xdf, 0x4d, 0xc6, 0x5e, 0x67,
0x8b, 0x76, 0x61, 0xdf, 0x9b, 0xb1, 0xf1, 0x68, 0x3a, 0x7e, 0xe9, 0x7f, 0x7a, 0xcb, 0xfc, 0xf3,
0x8b, 0x0f, 0x6f, 0x5e, 0x7b, 0x1d, 0xe2, 0x8c, 0x54, 0x55, 0xb0, 0x46, 0xd1, 0x27, 0xd0, 0xe4,
0x28, 0x8a, 0x2b, 0x59, 0x37, 0xd4, 0xfd, 0xb7, 0x21, 0xed, 0xb3, 0x3a, 0xe7, 0x7c, 0x23, 0xb0,
0xab, 0x0d, 0xfa, 0x08, 0xa8, 0x90, 0x01, 0x97, 0xbe, 0x9e, 0x98, 0x0c, 0x92, 0xdc, 0x4f, 0x14,
0x87, 0xb8, 0x0d, 0xd6, 0xd1, 0xce, 0xac, 0x36, 0xa6, 0x82, 0xba, 0xd0, 0xc1, 0x34, 0xda, 0xcc,
0x6e, 0xeb, 0x6c, 0x1b, 0xd3, 0xc8, 0x4c, 0x9e, 0xc2, 0x9d, 0x24, 0x90, 0xe1, 0x02, 0xb9, 0xb0,
0x1b, 0xfa, 0x56, 0xb6, 0x79, 0xab, 0x49, 0x30, 0xc7, 0xab, 0x69, 0x19, 0x60, 0xeb, 0x24, 0x3d,
0x81, 0xdd, 0x45, 0x9c, 0x4a, 0x61, 0xef, 0xf4, 0x89, 0x6b, 0x0d, 0x0f, 0xff, 0x1e, 0xee, 0x85,
0x32, 0x59, 0x99, 0x71, 0xc6, 0x60, 0x19, 0xcd, 0xd1, 0x67, 0xff, 0xbf, 0x25, 0x1b, 0xfb, 0x71,
0x03, 0xfb, 0xe7, 0x8b, 0x22, 0xfd, 0xaa, 0x3e, 0x8e, 0x31, 0xd5, 0x17, 0xd0, 0x0e, 0x4b, 0xd9,
0xdf, 0x40, 0x3e, 0x30, 0x91, 0x55, 0x61, 0x45, 0xbd, 0x1b, 0x9a, 0x8f, 0xf4, 0x18, 0x2c, 0xb5,
0x46, 0x4b, 0x3f, 0x4e, 0x23, 0xbc, 0xa9, 0xe6, 0x04, 0x5a, 0x7a, 0xa5, 0x94, 0xb3, 0x83, 0xdb,
0x55, 0x8f, 0xfc, 0x58, 0xf5, 0xc8, 0xaf, 0x55, 0x8f, 0x7c, 0xde, 0x53, 0xdc, 0x7c, 0x3e, 0xdf,
0xd3, 0x3f, 0xc1, 0xd3, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9a, 0xb6, 0x6b, 0xcd, 0x43, 0x03,
0x00, 0x00,
}
func (m *WriteRequest) Marshal() (dAtA []byte, err error) {
@ -385,6 +508,24 @@ func (m *ReadRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.AcceptedResponseTypes) > 0 {
dAtA2 := make([]byte, len(m.AcceptedResponseTypes)*10)
var j1 int
for _, num := range m.AcceptedResponseTypes {
for num >= 1<<7 {
dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j1++
}
dAtA2[j1] = uint8(num)
j1++
}
i -= j1
copy(dAtA[i:], dAtA2[:j1])
i = encodeVarintRemote(dAtA, i, uint64(j1))
i--
dAtA[i] = 0x12
}
if len(m.Queries) > 0 {
for iNdEx := len(m.Queries) - 1; iNdEx >= 0; iNdEx-- {
{
@ -547,6 +688,52 @@ func (m *QueryResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
func (m *ChunkedReadResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ChunkedReadResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ChunkedReadResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.QueryIndex != 0 {
i = encodeVarintRemote(dAtA, i, uint64(m.QueryIndex))
i--
dAtA[i] = 0x10
}
if len(m.ChunkedSeries) > 0 {
for iNdEx := len(m.ChunkedSeries) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.ChunkedSeries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func encodeVarintRemote(dAtA []byte, offset int, v uint64) int {
offset -= sovRemote(v)
base := offset
@ -588,6 +775,13 @@ func (m *ReadRequest) Size() (n int) {
n += 1 + l + sovRemote(uint64(l))
}
}
if len(m.AcceptedResponseTypes) > 0 {
l = 0
for _, e := range m.AcceptedResponseTypes {
l += sovRemote(uint64(e))
}
n += 1 + sovRemote(uint64(l)) + l
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
@ -658,6 +852,27 @@ func (m *QueryResult) Size() (n int) {
return n
}
func (m *ChunkedReadResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.ChunkedSeries) > 0 {
for _, e := range m.ChunkedSeries {
l = e.Size()
n += 1 + l + sovRemote(uint64(l))
}
}
if m.QueryIndex != 0 {
n += 1 + sovRemote(uint64(m.QueryIndex))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovRemote(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
@ -815,6 +1030,75 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
case 2:
if wireType == 0 {
var v ReadRequest_ResponseType
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= ReadRequest_ResponseType(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.AcceptedResponseTypes = append(m.AcceptedResponseTypes, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
if elementCount != 0 && len(m.AcceptedResponseTypes) == 0 {
m.AcceptedResponseTypes = make([]ReadRequest_ResponseType, 0, elementCount)
}
for iNdEx < postIndex {
var v ReadRequest_ResponseType
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= ReadRequest_ResponseType(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.AcceptedResponseTypes = append(m.AcceptedResponseTypes, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field AcceptedResponseTypes", wireType)
}
default:
iNdEx = preIndex
skippy, err := skipRemote(dAtA[iNdEx:])
@ -1178,6 +1462,113 @@ func (m *QueryResult) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *ChunkedReadResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ChunkedReadResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ChunkedReadResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ChunkedSeries", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ChunkedSeries = append(m.ChunkedSeries, &ChunkedSeries{})
if err := m.ChunkedSeries[len(m.ChunkedSeries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field QueryIndex", wireType)
}
m.QueryIndex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.QueryIndex |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipRemote(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipRemote(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0

View file

@ -23,10 +23,36 @@ message WriteRequest {
repeated prometheus.TimeSeries timeseries = 1 [(gogoproto.nullable) = false];
}
// ReadRequest represents a remote read request.
message ReadRequest {
repeated Query queries = 1;
enum ResponseType {
// Server will return a single ReadResponse message with matched series that includes list of raw samples.
// It's recommended to use streamed response types instead.
//
// Response headers:
// Content-Type: "application/x-protobuf"
// Content-Encoding: "snappy"
SAMPLES = 0;
// Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series.
// Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum.
//
// Response headers:
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
// Content-Encoding: ""
STREAMED_XOR_CHUNKS = 1;
}
// accepted_response_types allows negotiating the content type of the response.
//
// Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
// implemented by server, error is returned.
// For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
repeated ResponseType accepted_response_types = 2;
}
// ReadResponse is a response when response_type equals SAMPLES.
message ReadResponse {
// In same order as the request's queries.
repeated QueryResult results = 1;
@ -43,3 +69,14 @@ message QueryResult {
// Samples within a time series must be ordered by time.
repeated prometheus.TimeSeries timeseries = 1;
}
// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS.
// We strictly stream full series after series, optionally split by time. This means that a single frame can contain
// partition of the single series, but once a new series is started to be streamed it means that no more chunks will
// be sent for previous one.
message ChunkedReadResponse {
repeated prometheus.ChunkedSeries chunked_series = 1;
// query_index represents an index of the query from ReadRequest.queries these chunks relates to.
int64 query_index = 2;
}

View file

@ -56,6 +56,32 @@ func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{4, 0}
}
// We require this to match chunkenc.Encoding.
type Chunk_Encoding int32
const (
Chunk_UNKNOWN Chunk_Encoding = 0
Chunk_XOR Chunk_Encoding = 1
)
var Chunk_Encoding_name = map[int32]string{
0: "UNKNOWN",
1: "XOR",
}
var Chunk_Encoding_value = map[string]int32{
"UNKNOWN": 0,
"XOR": 1,
}
func (x Chunk_Encoding) String() string {
return proto.EnumName(Chunk_Encoding_name, int32(x))
}
func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{6, 0}
}
type Sample struct {
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
@ -111,6 +137,7 @@ func (m *Sample) GetTimestamp() int64 {
return 0
}
// TimeSeries represents samples and labels for a single time series.
type TimeSeries struct {
Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
@ -403,44 +430,185 @@ func (m *ReadHints) GetEndMs() int64 {
return 0
}
// Chunk represents a TSDB chunk.
// Time range [min, max] is inclusive.
type Chunk struct {
MinTimeMs int64 `protobuf:"varint,1,opt,name=min_time_ms,json=minTimeMs,proto3" json:"min_time_ms,omitempty"`
MaxTimeMs int64 `protobuf:"varint,2,opt,name=max_time_ms,json=maxTimeMs,proto3" json:"max_time_ms,omitempty"`
Type Chunk_Encoding `protobuf:"varint,3,opt,name=type,proto3,enum=prometheus.Chunk_Encoding" json:"type,omitempty"`
Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Chunk) Reset() { *m = Chunk{} }
func (m *Chunk) String() string { return proto.CompactTextString(m) }
func (*Chunk) ProtoMessage() {}
func (*Chunk) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{6}
}
func (m *Chunk) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Chunk.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Chunk) XXX_Merge(src proto.Message) {
xxx_messageInfo_Chunk.Merge(m, src)
}
func (m *Chunk) XXX_Size() int {
return m.Size()
}
func (m *Chunk) XXX_DiscardUnknown() {
xxx_messageInfo_Chunk.DiscardUnknown(m)
}
var xxx_messageInfo_Chunk proto.InternalMessageInfo
func (m *Chunk) GetMinTimeMs() int64 {
if m != nil {
return m.MinTimeMs
}
return 0
}
func (m *Chunk) GetMaxTimeMs() int64 {
if m != nil {
return m.MaxTimeMs
}
return 0
}
func (m *Chunk) GetType() Chunk_Encoding {
if m != nil {
return m.Type
}
return Chunk_UNKNOWN
}
func (m *Chunk) GetData() []byte {
if m != nil {
return m.Data
}
return nil
}
// ChunkedSeries represents single, encoded time series.
type ChunkedSeries struct {
// Labels should be sorted.
Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
// Chunks will be in start time order and may overlap.
Chunks []Chunk `protobuf:"bytes,2,rep,name=chunks,proto3" json:"chunks"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ChunkedSeries) Reset() { *m = ChunkedSeries{} }
func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) }
func (*ChunkedSeries) ProtoMessage() {}
func (*ChunkedSeries) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{7}
}
func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ChunkedSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ChunkedSeries.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ChunkedSeries) XXX_Merge(src proto.Message) {
xxx_messageInfo_ChunkedSeries.Merge(m, src)
}
func (m *ChunkedSeries) XXX_Size() int {
return m.Size()
}
func (m *ChunkedSeries) XXX_DiscardUnknown() {
xxx_messageInfo_ChunkedSeries.DiscardUnknown(m)
}
var xxx_messageInfo_ChunkedSeries proto.InternalMessageInfo
func (m *ChunkedSeries) GetLabels() []Label {
if m != nil {
return m.Labels
}
return nil
}
func (m *ChunkedSeries) GetChunks() []Chunk {
if m != nil {
return m.Chunks
}
return nil
}
func init() {
proto.RegisterEnum("prometheus.LabelMatcher_Type", LabelMatcher_Type_name, LabelMatcher_Type_value)
proto.RegisterEnum("prometheus.Chunk_Encoding", Chunk_Encoding_name, Chunk_Encoding_value)
proto.RegisterType((*Sample)(nil), "prometheus.Sample")
proto.RegisterType((*TimeSeries)(nil), "prometheus.TimeSeries")
proto.RegisterType((*Label)(nil), "prometheus.Label")
proto.RegisterType((*Labels)(nil), "prometheus.Labels")
proto.RegisterType((*LabelMatcher)(nil), "prometheus.LabelMatcher")
proto.RegisterType((*ReadHints)(nil), "prometheus.ReadHints")
proto.RegisterType((*Chunk)(nil), "prometheus.Chunk")
proto.RegisterType((*ChunkedSeries)(nil), "prometheus.ChunkedSeries")
}
func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) }
var fileDescriptor_d938547f84707355 = []byte{
// 379 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x4f, 0xef, 0xd2, 0x40,
0x14, 0x64, 0xdb, 0xb2, 0x95, 0x87, 0x31, 0x75, 0x83, 0xb1, 0x1a, 0x45, 0xd2, 0x53, 0x4f, 0x25,
0xe0, 0xc9, 0xc4, 0x13, 0x49, 0x13, 0x0f, 0xd4, 0x84, 0x85, 0x93, 0x17, 0xb3, 0xc0, 0x13, 0x6a,
0xfa, 0x67, 0xed, 0x2e, 0x26, 0x7c, 0x10, 0xbf, 0x13, 0x47, 0x3f, 0x81, 0x31, 0x7c, 0x12, 0xb3,
0x5b, 0x10, 0x12, 0xbd, 0xfc, 0x6e, 0x6f, 0xe6, 0xcd, 0x74, 0xe6, 0x35, 0x0b, 0x7d, 0x7d, 0x94,
0xa8, 0x12, 0xd9, 0xd4, 0xba, 0x66, 0x20, 0x9b, 0xba, 0x44, 0xbd, 0xc7, 0x83, 0x7a, 0x39, 0xd8,
0xd5, 0xbb, 0xda, 0xd2, 0x63, 0x33, 0xb5, 0x8a, 0xe8, 0x3d, 0xd0, 0xa5, 0x28, 0x65, 0x81, 0x6c,
0x00, 0xdd, 0xef, 0xa2, 0x38, 0x60, 0x48, 0x46, 0x24, 0x26, 0xbc, 0x05, 0xec, 0x15, 0xf4, 0x74,
0x5e, 0xa2, 0xd2, 0xa2, 0x94, 0xa1, 0x33, 0x22, 0xb1, 0xcb, 0x6f, 0x44, 0xf4, 0x0d, 0x60, 0x95,
0x97, 0xb8, 0xc4, 0x26, 0x47, 0xc5, 0xc6, 0x40, 0x0b, 0xb1, 0xc6, 0x42, 0x85, 0x64, 0xe4, 0xc6,
0xfd, 0xe9, 0xd3, 0xe4, 0x16, 0x9f, 0xcc, 0xcd, 0x66, 0xe6, 0x9d, 0x7e, 0xbd, 0xe9, 0xf0, 0x8b,
0x8c, 0x4d, 0xc1, 0x57, 0x36, 0x5c, 0x85, 0x8e, 0x75, 0xb0, 0x7b, 0x47, 0xdb, 0xeb, 0x62, 0xb9,
0x0a, 0xa3, 0x09, 0x74, 0xed, 0xa7, 0x18, 0x03, 0xaf, 0x12, 0x65, 0x5b, 0xb7, 0xc7, 0xed, 0x7c,
0xbb, 0xc1, 0xb1, 0x64, 0x0b, 0xa2, 0x77, 0x40, 0xe7, 0x6d, 0xe0, 0x43, 0x1b, 0x46, 0x3f, 0x08,
0x3c, 0xb6, 0x7c, 0x26, 0xf4, 0x66, 0x8f, 0x0d, 0x9b, 0x80, 0x67, 0x7e, 0xb0, 0x4d, 0x7d, 0x32,
0x7d, 0xfd, 0x8f, 0xff, 0xa2, 0x4b, 0x56, 0x47, 0x89, 0xdc, 0x4a, 0xff, 0x16, 0x75, 0xfe, 0x57,
0xd4, 0xbd, 0x2f, 0x1a, 0x83, 0x67, 0x7c, 0x8c, 0x82, 0x93, 0x2e, 0x82, 0x0e, 0xf3, 0xc1, 0xfd,
0x98, 0x2e, 0x02, 0x62, 0x08, 0x9e, 0x06, 0x8e, 0x25, 0x78, 0x1a, 0xb8, 0xd1, 0x57, 0xe8, 0x71,
0x14, 0xdb, 0x0f, 0x79, 0xa5, 0x15, 0x7b, 0x0e, 0xbe, 0xd2, 0x28, 0x3f, 0x97, 0xca, 0xd6, 0x72,
0x39, 0x35, 0x30, 0x53, 0x26, 0xf9, 0xcb, 0xa1, 0xda, 0x5c, 0x93, 0xcd, 0xcc, 0x5e, 0xc0, 0x23,
0xa5, 0x45, 0xa3, 0x8d, 0xda, 0xb5, 0x6a, 0xdf, 0xe2, 0x4c, 0xb1, 0x67, 0x40, 0xb1, 0xda, 0x9a,
0x85, 0x67, 0x17, 0x5d, 0xac, 0xb6, 0x99, 0x9a, 0x0d, 0x4e, 0xe7, 0x21, 0xf9, 0x79, 0x1e, 0x92,
0xdf, 0xe7, 0x21, 0xf9, 0x44, 0xcd, 0xc5, 0x72, 0xbd, 0xa6, 0xf6, 0xfd, 0xbc, 0xfd, 0x13, 0x00,
0x00, 0xff, 0xff, 0xe3, 0x8a, 0x88, 0x84, 0x70, 0x02, 0x00, 0x00,
// 496 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0xcd, 0x6e, 0xd3, 0x4c,
0x14, 0xed, 0xd8, 0x89, 0xd3, 0xdc, 0xf4, 0xfb, 0xe4, 0x8e, 0x82, 0x08, 0x15, 0x84, 0xc8, 0xab,
0xac, 0x1c, 0x35, 0xac, 0x90, 0x58, 0x15, 0x59, 0x42, 0xa2, 0x4e, 0xd5, 0x69, 0x11, 0x88, 0x4d,
0x35, 0x89, 0x87, 0xc4, 0x90, 0x19, 0xbb, 0x9e, 0x09, 0x6a, 0x1f, 0x84, 0xc7, 0xe0, 0x3d, 0xba,
0xe4, 0x09, 0x10, 0xca, 0x93, 0xa0, 0xb9, 0xb6, 0xeb, 0x48, 0x65, 0x03, 0xbb, 0xfb, 0x73, 0xce,
0x9c, 0x93, 0x93, 0x6b, 0xe8, 0x99, 0xdb, 0x5c, 0xe8, 0x30, 0x2f, 0x32, 0x93, 0x51, 0xc8, 0x8b,
0x4c, 0x0a, 0xb3, 0x12, 0x1b, 0x7d, 0xd4, 0x5f, 0x66, 0xcb, 0x0c, 0xc7, 0x13, 0x5b, 0x95, 0x88,
0xe0, 0x15, 0x78, 0x17, 0x5c, 0xe6, 0x6b, 0x41, 0xfb, 0xd0, 0xfe, 0xca, 0xd7, 0x1b, 0x31, 0x20,
0x23, 0x32, 0x26, 0xac, 0x6c, 0xe8, 0x53, 0xe8, 0x9a, 0x54, 0x0a, 0x6d, 0xb8, 0xcc, 0x07, 0xce,
0x88, 0x8c, 0x5d, 0xd6, 0x0c, 0x82, 0x6b, 0x80, 0xcb, 0x54, 0x8a, 0x0b, 0x51, 0xa4, 0x42, 0xd3,
0x09, 0x78, 0x6b, 0x3e, 0x17, 0x6b, 0x3d, 0x20, 0x23, 0x77, 0xdc, 0x9b, 0x1e, 0x86, 0x8d, 0x7c,
0x78, 0x6a, 0x37, 0x27, 0xad, 0xbb, 0x9f, 0xcf, 0xf7, 0x58, 0x05, 0xa3, 0x53, 0xe8, 0x68, 0x14,
0xd7, 0x03, 0x07, 0x19, 0x74, 0x97, 0x51, 0xfa, 0xaa, 0x28, 0x35, 0x30, 0x38, 0x86, 0x36, 0x3e,
0x45, 0x29, 0xb4, 0x14, 0x97, 0xa5, 0xdd, 0x2e, 0xc3, 0xba, 0xf9, 0x0d, 0x0e, 0x0e, 0xcb, 0x26,
0x78, 0x09, 0xde, 0x69, 0x29, 0xf8, 0xb7, 0x0e, 0x83, 0x6f, 0x04, 0x0e, 0x70, 0x1e, 0x73, 0xb3,
0x58, 0x89, 0x82, 0x1e, 0x43, 0xcb, 0x06, 0x8c, 0xaa, 0xff, 0x4f, 0x9f, 0x3d, 0xe0, 0x57, 0xb8,
0xf0, 0xf2, 0x36, 0x17, 0x0c, 0xa1, 0xf7, 0x46, 0x9d, 0x3f, 0x19, 0x75, 0x77, 0x8d, 0x8e, 0xa1,
0x65, 0x79, 0xd4, 0x03, 0x27, 0x3a, 0xf7, 0xf7, 0x68, 0x07, 0xdc, 0x59, 0x74, 0xee, 0x13, 0x3b,
0x60, 0x91, 0xef, 0xe0, 0x80, 0x45, 0xbe, 0x1b, 0x7c, 0x86, 0x2e, 0x13, 0x3c, 0x79, 0x93, 0x2a,
0xa3, 0xe9, 0x63, 0xe8, 0x68, 0x23, 0xf2, 0x2b, 0xa9, 0xd1, 0x96, 0xcb, 0x3c, 0xdb, 0xc6, 0xda,
0x2a, 0x7f, 0xda, 0xa8, 0x45, 0xad, 0x6c, 0x6b, 0xfa, 0x04, 0xf6, 0xb5, 0xe1, 0x85, 0xb1, 0x68,
0x17, 0xd1, 0x1d, 0xec, 0x63, 0x4d, 0x1f, 0x81, 0x27, 0x54, 0x62, 0x17, 0x2d, 0x5c, 0xb4, 0x85,
0x4a, 0x62, 0x1d, 0x7c, 0x27, 0xd0, 0x7e, 0xbd, 0xda, 0xa8, 0x2f, 0x74, 0x08, 0x3d, 0x99, 0xaa,
0x2b, 0xfb, 0xff, 0x37, 0x62, 0x5d, 0x99, 0x2a, 0x7b, 0x04, 0xb1, 0xc6, 0x3d, 0xbf, 0xb9, 0xdf,
0x57, 0xe7, 0x22, 0xf9, 0x4d, 0xb5, 0x0f, 0xab, 0xf0, 0x5c, 0x0c, 0xef, 0x68, 0x37, 0x3c, 0x14,
0x08, 0x23, 0xb5, 0xc8, 0x92, 0x54, 0x2d, 0x9b, 0xe4, 0x12, 0x6e, 0x38, 0xda, 0x39, 0x60, 0x58,
0x07, 0x23, 0xd8, 0xaf, 0x51, 0xb4, 0x07, 0x9d, 0x77, 0xb3, 0xb7, 0xb3, 0xb3, 0xf7, 0xb3, 0x32,
0xac, 0x0f, 0x67, 0xcc, 0x27, 0xc1, 0x35, 0xfc, 0x87, 0xaf, 0x89, 0xe4, 0x5f, 0xef, 0x72, 0x02,
0xde, 0xc2, 0xbe, 0x50, 0x9f, 0xe5, 0xe1, 0x03, 0xa7, 0x35, 0xa1, 0x84, 0x9d, 0xf4, 0xef, 0xb6,
0x43, 0xf2, 0x63, 0x3b, 0x24, 0xbf, 0xb6, 0x43, 0xf2, 0xd1, 0xb3, 0xe8, 0x7c, 0x3e, 0xf7, 0xf0,
0x13, 0x7b, 0xf1, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x1e, 0x9b, 0x28, 0x1b, 0x93, 0x03, 0x00, 0x00,
}
func (m *Sample) Marshal() (dAtA []byte, err error) {
@ -713,6 +881,110 @@ func (m *ReadHints) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
func (m *Chunk) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Chunk) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Chunk) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Data) > 0 {
i -= len(m.Data)
copy(dAtA[i:], m.Data)
i = encodeVarintTypes(dAtA, i, uint64(len(m.Data)))
i--
dAtA[i] = 0x22
}
if m.Type != 0 {
i = encodeVarintTypes(dAtA, i, uint64(m.Type))
i--
dAtA[i] = 0x18
}
if m.MaxTimeMs != 0 {
i = encodeVarintTypes(dAtA, i, uint64(m.MaxTimeMs))
i--
dAtA[i] = 0x10
}
if m.MinTimeMs != 0 {
i = encodeVarintTypes(dAtA, i, uint64(m.MinTimeMs))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *ChunkedSeries) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ChunkedSeries) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ChunkedSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Chunks) > 0 {
for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if len(m.Labels) > 0 {
for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func encodeVarintTypes(dAtA []byte, offset int, v uint64) int {
offset -= sovTypes(v)
base := offset
@ -852,6 +1124,55 @@ func (m *ReadHints) Size() (n int) {
return n
}
func (m *Chunk) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.MinTimeMs != 0 {
n += 1 + sovTypes(uint64(m.MinTimeMs))
}
if m.MaxTimeMs != 0 {
n += 1 + sovTypes(uint64(m.MaxTimeMs))
}
if m.Type != 0 {
n += 1 + sovTypes(uint64(m.Type))
}
l = len(m.Data)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ChunkedSeries) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Labels) > 0 {
for _, e := range m.Labels {
l = e.Size()
n += 1 + l + sovTypes(uint64(l))
}
}
if len(m.Chunks) > 0 {
for _, e := range m.Chunks {
l = e.Size()
n += 1 + l + sovTypes(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovTypes(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
@ -1550,6 +1871,273 @@ func (m *ReadHints) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *Chunk) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Chunk: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MinTimeMs", wireType)
}
m.MinTimeMs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MinTimeMs |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MaxTimeMs", wireType)
}
m.MaxTimeMs = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MaxTimeMs |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Type |= Chunk_Encoding(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
if m.Data == nil {
m.Data = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ChunkedSeries) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ChunkedSeries: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ChunkedSeries: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Labels = append(m.Labels, Label{})
if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Chunks = append(m.Chunks, Chunk{})
if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipTypes(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0

View file

@ -23,6 +23,7 @@ message Sample {
int64 timestamp = 2;
}
// TimeSeries represents samples and labels for a single time series.
message TimeSeries {
repeated Label labels = 1 [(gogoproto.nullable) = false];
repeated Sample samples = 2 [(gogoproto.nullable) = false];
@ -56,3 +57,26 @@ message ReadHints {
int64 start_ms = 3; // Start time in milliseconds.
int64 end_ms = 4; // End time in milliseconds.
}
// Chunk represents a TSDB chunk.
// Time range [min, max] is inclusive.
message Chunk {
int64 min_time_ms = 1;
int64 max_time_ms = 2;
// We require this to match chunkenc.Encoding.
enum Encoding {
UNKNOWN = 0;
XOR = 1;
}
Encoding type = 3;
bytes data = 4;
}
// ChunkedSeries represents single, encoded time series.
message ChunkedSeries {
// Labels should be sorted.
repeated Label labels = 1 [(gogoproto.nullable) = false];
// Chunks will be in start time order and may overlap.
repeated Chunk chunks = 2 [(gogoproto.nullable) = false];
}

154
storage/remote/chunked.go Normal file
View file

@ -0,0 +1,154 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"bufio"
"encoding/binary"
"hash"
"hash/crc32"
"io"
"net/http"
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
)
// DefaultChunkedReadLimit is the default value for the maximum size of the protobuf frame client allows.
// 50MB is the default. This is equivalent to ~100k full XOR chunks and average labelset.
const DefaultChunkedReadLimit = 5e+7
// The table gets initialized with sync.Once but may still cause a race
// with any other use of the crc32 package anywhere. Thus we initialize it
// before.
var castagnoliTable *crc32.Table
func init() {
castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
}
// ChunkedWriter is an io.Writer wrapper that allows streaming by adding uvarint delimiter before each write in a form
// of length of the corresponded byte array.
type ChunkedWriter struct {
writer io.Writer
flusher http.Flusher
crc32 hash.Hash32
}
// NewChunkedWriter constructs a ChunkedWriter.
func NewChunkedWriter(w io.Writer, f http.Flusher) *ChunkedWriter {
return &ChunkedWriter{writer: w, flusher: f, crc32: crc32.New(castagnoliTable)}
}
// Write writes given bytes to the stream and flushes it.
// Each frame includes:
//
// 1. uvarint for the size of the data frame.
// 2. big-endian uint32 for the Castagnoli polynomial CRC-32 checksum of the data frame.
// 3. the bytes of the given data.
//
// Write returns number of sent bytes for a given buffer. The number does not include delimiter and checksum bytes.
func (w *ChunkedWriter) Write(b []byte) (int, error) {
if len(b) == 0 {
return 0, nil
}
var buf [binary.MaxVarintLen64]byte
v := binary.PutUvarint(buf[:], uint64(len(b)))
if _, err := w.writer.Write(buf[:v]); err != nil {
return 0, err
}
w.crc32.Reset()
if _, err := w.crc32.Write(b); err != nil {
return 0, err
}
if err := binary.Write(w.writer, binary.BigEndian, w.crc32.Sum32()); err != nil {
return 0, err
}
n, err := w.writer.Write(b)
if err != nil {
return n, err
}
w.flusher.Flush()
return n, nil
}
// ChunkedReader is a buffered reader that expects uvarint delimiter and checksum before each message.
// It will allocate as much as the biggest frame defined by delimiter (on top of bufio.Reader allocations).
type ChunkedReader struct {
b *bufio.Reader
data []byte
sizeLimit uint64
crc32 hash.Hash32
}
// NewChunkedReader constructs a ChunkedReader.
// It allows passing data slice for byte slice reuse, which will be increased to needed size if smaller.
func NewChunkedReader(r io.Reader, sizeLimit uint64, data []byte) *ChunkedReader {
return &ChunkedReader{b: bufio.NewReader(r), sizeLimit: sizeLimit, data: data, crc32: crc32.New(castagnoliTable)}
}
// Next returns the next length-delimited record from the input, or io.EOF if
// there are no more records available. Returns io.ErrUnexpectedEOF if a short
// record is found, with a length of n but fewer than n bytes of data.
// Next also verifies the given checksum with Castagnoli polynomial CRC-32 checksum.
//
// NOTE: The slice returned is valid only until a subsequent call to Next. It's a caller's responsibility to copy the
// returned slice if needed.
func (r *ChunkedReader) Next() ([]byte, error) {
size, err := binary.ReadUvarint(r.b)
if err != nil {
return nil, err
}
if size > r.sizeLimit {
return nil, errors.Errorf("chunkedReader: message size exceeded the limit %v bytes; got: %v bytes", r.sizeLimit, size)
}
if cap(r.data) < int(size) {
r.data = make([]byte, size)
} else {
r.data = r.data[:size]
}
var crc32 uint32
if err := binary.Read(r.b, binary.BigEndian, &crc32); err != nil {
return nil, err
}
r.crc32.Reset()
if _, err := io.ReadFull(io.TeeReader(r.b, r.crc32), r.data); err != nil {
return nil, err
}
if r.crc32.Sum32() != crc32 {
return nil, errors.New("chunkedReader: corrupted frame; checksum mismatch")
}
return r.data, nil
}
// NextProto consumes the next available record by calling r.Next, and decodes
// it into the protobuf with proto.Unmarshal.
func (r *ChunkedReader) NextProto(pb proto.Message) error {
rec, err := r.Next()
if err != nil {
return err
}
return proto.Unmarshal(rec, pb)
}

View file

@ -0,0 +1,106 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"bytes"
"io"
"testing"
"github.com/prometheus/prometheus/util/testutil"
)
type mockedFlusher struct {
flushed int
}
func (f *mockedFlusher) Flush() {
f.flushed++
}
func TestChunkedReaderCanReadFromChunkedWriter(t *testing.T) {
b := &bytes.Buffer{}
f := &mockedFlusher{}
w := NewChunkedWriter(b, f)
r := NewChunkedReader(b, 20, nil)
msgs := [][]byte{
[]byte("test1"),
[]byte("test2"),
[]byte("test3"),
[]byte("test4"),
[]byte{}, // This is ignored by writer.
[]byte("test5-after-empty"),
}
for _, msg := range msgs {
n, err := w.Write(msg)
testutil.Ok(t, err)
testutil.Equals(t, len(msg), n)
}
i := 0
for ; i < 4; i++ {
msg, err := r.Next()
testutil.Ok(t, err)
testutil.Assert(t, i < len(msgs), "more messages then expected")
testutil.Equals(t, msgs[i], msg)
}
// Empty byte slice is skipped.
i++
msg, err := r.Next()
testutil.Ok(t, err)
testutil.Assert(t, i < len(msgs), "more messages then expected")
testutil.Equals(t, msgs[i], msg)
_, err = r.Next()
testutil.NotOk(t, err, "expected io.EOF")
testutil.Equals(t, io.EOF, err)
testutil.Equals(t, 5, f.flushed)
}
func TestChunkedReader_Overflow(t *testing.T) {
b := &bytes.Buffer{}
_, err := NewChunkedWriter(b, &mockedFlusher{}).Write([]byte("twelve bytes"))
testutil.Ok(t, err)
b2 := make([]byte, 12)
copy(b2, b.Bytes())
ret, err := NewChunkedReader(b, 12, nil).Next()
testutil.Ok(t, err)
testutil.Equals(t, "twelve bytes", string(ret))
_, err = NewChunkedReader(bytes.NewReader(b2), 11, nil).Next()
testutil.NotOk(t, err, "expect exceed limit error")
testutil.Equals(t, "chunkedReader: message size exceeded the limit 11 bytes; got: 12 bytes", err.Error())
}
func TestChunkedReader_CorruptedFrame(t *testing.T) {
b := &bytes.Buffer{}
w := NewChunkedWriter(b, &mockedFlusher{})
n, err := w.Write([]byte("test1"))
testutil.Ok(t, err)
testutil.Equals(t, 5, n)
bs := b.Bytes()
bs[9] = 1 // Malform the frame by changing one byte.
_, err = NewChunkedReader(bytes.NewReader(bs), 20, nil).Next()
testutil.NotOk(t, err, "expected malformed frame")
testutil.Equals(t, "chunkedReader: corrupted frame; checksum mismatch", err.Error())
}

View file

@ -24,10 +24,10 @@ import (
"github.com/golang/snappy"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
// decodeReadLimit is the maximum size of a read request body in bytes.
@ -73,9 +73,6 @@ func EncodeReadResponse(resp *prompb.ReadResponse, w http.ResponseWriter) error
return err
}
w.Header().Set("Content-Type", "application/x-protobuf")
w.Header().Set("Content-Encoding", "snappy")
compressed := snappy.Encode(nil, data)
_, err = w.Write(compressed)
return err
@ -106,25 +103,6 @@ func ToQuery(from, to int64, matchers []*labels.Matcher, p *storage.SelectParams
}, nil
}
// FromQuery unpacks a Query proto.
func FromQuery(req *prompb.Query) (int64, int64, []*labels.Matcher, *storage.SelectParams, error) {
matchers, err := fromLabelMatchers(req.Matchers)
if err != nil {
return 0, 0, nil, nil, err
}
var selectParams *storage.SelectParams
if req.Hints != nil {
selectParams = &storage.SelectParams{
Start: req.Hints.StartMs,
End: req.Hints.EndMs,
Step: req.Hints.StepMs,
Func: req.Hints.Func,
}
}
return req.StartTimestampMs, req.EndTimestampMs, matchers, selectParams, nil
}
// ToQueryResult builds a QueryResult proto.
func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, error) {
numSamples := 0
@ -183,6 +161,181 @@ func FromQueryResult(res *prompb.QueryResult) storage.SeriesSet {
}
}
// NegotiateResponseType returns first accepted response type that this server supports.
// On the empty accepted list we assume that the SAMPLES response type was requested. This is to maintain backward compatibility.
func NegotiateResponseType(accepted []prompb.ReadRequest_ResponseType) (prompb.ReadRequest_ResponseType, error) {
if len(accepted) == 0 {
accepted = []prompb.ReadRequest_ResponseType{prompb.ReadRequest_SAMPLES}
}
supported := map[prompb.ReadRequest_ResponseType]struct{}{
prompb.ReadRequest_SAMPLES: {},
prompb.ReadRequest_STREAMED_XOR_CHUNKS: {},
}
for _, resType := range accepted {
if _, ok := supported[resType]; ok {
return resType, nil
}
}
return 0, errors.Errorf("server does not support any of the requested response types: %v; supported: %v", accepted, supported)
}
// StreamChunkedReadResponses iterates over series, builds chunks and streams those to the caller.
// TODO(bwplotka): Encode only what's needed. Fetch the encoded series from blocks instead of re-encoding everything.
func StreamChunkedReadResponses(
stream io.Writer,
queryIndex int64,
ss storage.SeriesSet,
sortedExternalLabels []prompb.Label,
maxBytesInFrame int,
) error {
var (
chks []prompb.Chunk
err error
lblsSize int
)
for ss.Next() {
series := ss.At()
iter := series.Iterator()
lbls := MergeLabels(labelsToLabelsProto(series.Labels()), sortedExternalLabels)
lblsSize = 0
for _, lbl := range lbls {
lblsSize += lbl.Size()
}
// Send at most one series per frame; series may be split over multiple frames according to maxBytesInFrame.
for {
// TODO(bwplotka): Use ChunkIterator once available in TSDB instead of re-encoding: https://github.com/prometheus/prometheus/pull/5882
chks, err = encodeChunks(iter, chks, maxBytesInFrame-lblsSize)
if err != nil {
return err
}
if len(chks) == 0 {
break
}
b, err := proto.Marshal(&prompb.ChunkedReadResponse{
ChunkedSeries: []*prompb.ChunkedSeries{
{
Labels: lbls,
Chunks: chks,
},
},
QueryIndex: queryIndex,
})
if err != nil {
return errors.Wrap(err, "marshal ChunkedReadResponse")
}
if _, err := stream.Write(b); err != nil {
return errors.Wrap(err, "write to stream")
}
chks = chks[:0]
}
if err := iter.Err(); err != nil {
return err
}
}
if err := ss.Err(); err != nil {
return err
}
return nil
}
// encodeChunks expects iterator to be ready to use (aka iter.Next() called before invoking).
func encodeChunks(iter storage.SeriesIterator, chks []prompb.Chunk, frameBytesLeft int) ([]prompb.Chunk, error) {
const maxSamplesInChunk = 120
var (
chkMint int64
chkMaxt int64
chk *chunkenc.XORChunk
app chunkenc.Appender
err error
)
for iter.Next() {
if chk == nil {
chk = chunkenc.NewXORChunk()
app, err = chk.Appender()
if err != nil {
return nil, err
}
chkMint, _ = iter.At()
}
app.Append(iter.At())
chkMaxt, _ = iter.At()
if chk.NumSamples() < maxSamplesInChunk {
continue
}
// Cut the chunk.
chks = append(chks, prompb.Chunk{
MinTimeMs: chkMint,
MaxTimeMs: chkMaxt,
Type: prompb.Chunk_Encoding(chk.Encoding()),
Data: chk.Bytes(),
})
chk = nil
frameBytesLeft -= chks[len(chks)-1].Size()
// We are fine with minor inaccuracy of max bytes per frame. The inaccuracy will be max of full chunk size.
if frameBytesLeft <= 0 {
break
}
}
if iter.Err() != nil {
return nil, errors.Wrap(iter.Err(), "iter TSDB series")
}
if chk != nil {
// Cut the chunk if exists.
chks = append(chks, prompb.Chunk{
MinTimeMs: chkMint,
MaxTimeMs: chkMaxt,
Type: prompb.Chunk_Encoding(chk.Encoding()),
Data: chk.Bytes(),
})
}
return chks, nil
}
// MergeLabels merges two sets of sorted proto labels, preferring those in
// primary to those in secondary when there is an overlap.
func MergeLabels(primary, secondary []prompb.Label) []prompb.Label {
result := make([]prompb.Label, 0, len(primary)+len(secondary))
i, j := 0, 0
for i < len(primary) && j < len(secondary) {
if primary[i].Name < secondary[j].Name {
result = append(result, primary[i])
i++
} else if primary[i].Name > secondary[j].Name {
result = append(result, secondary[j])
j++
} else {
result = append(result, primary[i])
i++
j++
}
}
for ; i < len(primary); i++ {
result = append(result, primary[i])
}
for ; j < len(secondary); j++ {
result = append(result, secondary[j])
}
return result
}
type byLabel []storage.Series
func (a byLabel) Len() int { return len(a) }
@ -322,7 +475,8 @@ func toLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error)
return pbMatchers, nil
}
func fromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, error) {
// FromLabelMatchers parses protobuf label matchers to Prometheus label matchers.
func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, error) {
result := make([]*labels.Matcher, 0, len(matchers))
for _, matcher := range matchers {
var mtype labels.MatchType

View file

@ -212,3 +212,27 @@ func TestFromQueryResultWithDuplicates(t *testing.T) {
errMessage := errSeries.Err().Error()
testutil.Assert(t, errMessage == "duplicate label with name: foo", fmt.Sprintf("Expected error to be from duplicate label, but got: %s", errMessage))
}
func TestNegotiateResponseType(t *testing.T) {
r, err := NegotiateResponseType([]prompb.ReadRequest_ResponseType{
prompb.ReadRequest_STREAMED_XOR_CHUNKS,
prompb.ReadRequest_SAMPLES,
})
testutil.Ok(t, err)
testutil.Equals(t, prompb.ReadRequest_STREAMED_XOR_CHUNKS, r)
r2, err := NegotiateResponseType([]prompb.ReadRequest_ResponseType{
prompb.ReadRequest_SAMPLES,
prompb.ReadRequest_STREAMED_XOR_CHUNKS,
})
testutil.Ok(t, err)
testutil.Equals(t, prompb.ReadRequest_SAMPLES, r2)
r3, err := NegotiateResponseType([]prompb.ReadRequest_ResponseType{})
testutil.Ok(t, err)
testutil.Equals(t, prompb.ReadRequest_SAMPLES, r3)
_, err = NegotiateResponseType([]prompb.ReadRequest_ResponseType{20})
testutil.NotOk(t, err, "expected error due to not supported requested response types")
testutil.Equals(t, "server does not support any of the requested response types: [20]; supported: map[SAMPLES:{} STREAMED_XOR_CHUNKS:{}]", err.Error())
}

View file

@ -151,6 +151,7 @@ type API struct {
enableAdmin bool
logger log.Logger
remoteReadSampleLimit int
remoteReadMaxBytesInFrame int
remoteReadGate *gate.Gate
CORSOrigin *regexp.Regexp
}
@ -175,6 +176,7 @@ func NewAPI(
rr rulesRetriever,
remoteReadSampleLimit int,
remoteReadConcurrencyLimit int,
remoteReadMaxBytesInFrame int,
CORSOrigin *regexp.Regexp,
) *API {
return &API{
@ -192,6 +194,7 @@ func NewAPI(
rulesRetriever: rr,
remoteReadSampleLimit: remoteReadSampleLimit,
remoteReadGate: gate.New(remoteReadConcurrencyLimit),
remoteReadMaxBytesInFrame: remoteReadMaxBytesInFrame,
logger: logger,
CORSOrigin: CORSOrigin,
}
@ -841,7 +844,8 @@ func (api *API) serveFlags(r *http.Request) apiFuncResult {
}
func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) {
if err := api.remoteReadGate.Start(r.Context()); err != nil {
ctx := r.Context()
if err := api.remoteReadGate.Start(ctx); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@ -856,58 +860,8 @@ func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) {
return
}
resp := prompb.ReadResponse{
Results: make([]*prompb.QueryResult, len(req.Queries)),
}
for i, query := range req.Queries {
from, through, matchers, selectParams, err := remote.FromQuery(query)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
querier, err := api.Queryable.Querier(r.Context(), from, through)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer querier.Close()
// Change equality matchers which match external labels
// to a matcher that looks for an empty label,
// as that label should not be present in the storage.
externalLabels := api.config().GlobalConfig.ExternalLabels.Map()
filteredMatchers := make([]*labels.Matcher, 0, len(matchers))
for _, m := range matchers {
value := externalLabels[m.Name]
if m.Type == labels.MatchEqual && value == m.Value {
matcher, err := labels.NewMatcher(labels.MatchEqual, m.Name, "")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
filteredMatchers = append(filteredMatchers, matcher)
} else {
filteredMatchers = append(filteredMatchers, m)
}
}
set, _, err := querier.Select(selectParams, filteredMatchers...)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
resp.Results[i], err = remote.ToQueryResult(set, api.remoteReadSampleLimit)
if err != nil {
if httpErr, ok := err.(remote.HTTPError); ok {
http.Error(w, httpErr.Error(), httpErr.Status())
return
}
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Add external labels back in, in sorted order.
sortedExternalLabels := make([]prompb.Label, 0, len(externalLabels))
for name, value := range externalLabels {
sortedExternalLabels = append(sortedExternalLabels, prompb.Label{
@ -919,8 +873,69 @@ func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) {
return sortedExternalLabels[i].Name < sortedExternalLabels[j].Name
})
responseType, err := remote.NegotiateResponseType(req.AcceptedResponseTypes)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
switch responseType {
case prompb.ReadRequest_STREAMED_XOR_CHUNKS:
w.Header().Set("Content-Type", "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse")
f, ok := w.(http.Flusher)
if !ok {
http.Error(w, "internal http.ResponseWriter does not implement http.Flusher interface", http.StatusInternalServerError)
return
}
for i, query := range req.Queries {
err := api.remoteReadQuery(ctx, query, externalLabels, func(set storage.SeriesSet) error {
return remote.StreamChunkedReadResponses(
remote.NewChunkedWriter(w, f),
int64(i),
set,
sortedExternalLabels,
api.remoteReadMaxBytesInFrame,
)
})
if err != nil {
if httpErr, ok := err.(remote.HTTPError); ok {
http.Error(w, httpErr.Error(), httpErr.Status())
return
}
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
default:
w.Header().Set("Content-Type", "application/x-protobuf")
w.Header().Set("Content-Encoding", "snappy")
// On empty or unknown types in req.AcceptedResponseTypes we default to non streamed, raw samples response.
resp := prompb.ReadResponse{
Results: make([]*prompb.QueryResult, len(req.Queries)),
}
for i, query := range req.Queries {
err := api.remoteReadQuery(ctx, query, externalLabels, func(set storage.SeriesSet) error {
resp.Results[i], err = remote.ToQueryResult(set, api.remoteReadSampleLimit)
if err != nil {
return err
}
for _, ts := range resp.Results[i].Timeseries {
ts.Labels = mergeLabels(ts.Labels, sortedExternalLabels)
ts.Labels = remote.MergeLabels(ts.Labels, sortedExternalLabels)
}
return nil
})
if err != nil {
if httpErr, ok := err.(remote.HTTPError); ok {
http.Error(w, httpErr.Error(), httpErr.Status())
return
}
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
@ -928,6 +943,67 @@ func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
// filterExtLabelsFromMatchers change equality matchers which match external labels
// to a matcher that looks for an empty label,
// as that label should not be present in the storage.
func filterExtLabelsFromMatchers(pbMatchers []*prompb.LabelMatcher, externalLabels map[string]string) ([]*labels.Matcher, error) {
matchers, err := remote.FromLabelMatchers(pbMatchers)
if err != nil {
return nil, err
}
filteredMatchers := make([]*labels.Matcher, 0, len(matchers))
for _, m := range matchers {
value := externalLabels[m.Name]
if m.Type == labels.MatchEqual && value == m.Value {
matcher, err := labels.NewMatcher(labels.MatchEqual, m.Name, "")
if err != nil {
return nil, err
}
filteredMatchers = append(filteredMatchers, matcher)
} else {
filteredMatchers = append(filteredMatchers, m)
}
}
return filteredMatchers, nil
}
func (api *API) remoteReadQuery(ctx context.Context, query *prompb.Query, externalLabels map[string]string, seriesHandleFn func(set storage.SeriesSet) error) error {
filteredMatchers, err := filterExtLabelsFromMatchers(query.Matchers, externalLabels)
if err != nil {
return err
}
querier, err := api.Queryable.Querier(ctx, query.StartTimestampMs, query.EndTimestampMs)
if err != nil {
return err
}
var selectParams *storage.SelectParams
if query.Hints != nil {
selectParams = &storage.SelectParams{
Start: query.Hints.StartMs,
End: query.Hints.EndMs,
Step: query.Hints.StepMs,
Func: query.Hints.Func,
}
}
defer func() {
if err := querier.Close(); err != nil {
level.Warn(api.logger).Log("msg", "error on querier close", "err", err.Error())
}
}()
set, _, err := querier.Select(selectParams, filteredMatchers...)
if err != nil {
return err
}
return seriesHandleFn(set)
}
func (api *API) deleteSeries(r *http.Request) apiFuncResult {
@ -1067,33 +1143,6 @@ func convertMatcher(m *labels.Matcher) tsdbLabels.Matcher {
panic("storage.convertMatcher: invalid matcher type")
}
// mergeLabels merges two sets of sorted proto labels, preferring those in
// primary to those in secondary when there is an overlap.
func mergeLabels(primary, secondary []prompb.Label) []prompb.Label {
result := make([]prompb.Label, 0, len(primary)+len(secondary))
i, j := 0, 0
for i < len(primary) && j < len(secondary) {
if primary[i].Name < secondary[j].Name {
result = append(result, primary[i])
i++
} else if primary[i].Name > secondary[j].Name {
result = append(result, secondary[j])
j++
} else {
result = append(result, primary[i])
i++
j++
}
}
for ; i < len(primary); i++ {
result = append(result, primary[i])
}
for ; j < len(secondary); j++ {
result = append(result, secondary[j])
}
return result
}
func (api *API) respond(w http.ResponseWriter, data interface{}, warnings storage.Warnings) {
statusMessage := statusSuccess
var warningStrings []string

View file

@ -19,6 +19,7 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
@ -381,13 +382,23 @@ func setupRemote(s storage.Storage) *httptest.Server {
Results: make([]*prompb.QueryResult, len(req.Queries)),
}
for i, query := range req.Queries {
from, through, matchers, selectParams, err := remote.FromQuery(query)
matchers, err := remote.FromLabelMatchers(query.Matchers)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
querier, err := s.Querier(r.Context(), from, through)
var selectParams *storage.SelectParams
if query.Hints != nil {
selectParams = &storage.SelectParams{
Start: query.Hints.StartMs,
End: query.Hints.EndMs,
Step: query.Hints.StepMs,
Func: query.Hints.Func,
}
}
querier, err := s.Querier(r.Context(), query.StartTimestampMs, query.EndTimestampMs)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
@ -916,19 +927,17 @@ func assertAPIResponse(t *testing.T, got interface{}, exp interface{}) {
}
}
func TestReadEndpoint(t *testing.T) {
func TestSampledReadEndpoint(t *testing.T) {
suite, err := promql.NewTest(t, `
load 1m
test_metric1{foo="bar",baz="qux"} 1
`)
if err != nil {
t.Fatal(err)
}
testutil.Ok(t, err)
defer suite.Close()
if err := suite.Run(); err != nil {
t.Fatal(err)
}
err = suite.Run()
testutil.Ok(t, err)
api := &API{
Queryable: suite.Storage(),
@ -937,6 +946,7 @@ func TestReadEndpoint(t *testing.T) {
return config.Config{
GlobalConfig: config.GlobalConfig{
ExternalLabels: labels.Labels{
// We expect external labels to be added, with the source labels honored.
{Name: "baz", Value: "a"},
{Name: "b", Value: "c"},
{Name: "d", Value: "e"},
@ -950,27 +960,22 @@ func TestReadEndpoint(t *testing.T) {
// Encode the request.
matcher1, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_metric1")
if err != nil {
t.Fatal(err)
}
testutil.Ok(t, err)
matcher2, err := labels.NewMatcher(labels.MatchEqual, "d", "e")
if err != nil {
t.Fatal(err)
}
testutil.Ok(t, err)
query, err := remote.ToQuery(0, 1, []*labels.Matcher{matcher1, matcher2}, &storage.SelectParams{Step: 0, Func: "avg"})
if err != nil {
t.Fatal(err)
}
testutil.Ok(t, err)
req := &prompb.ReadRequest{Queries: []*prompb.Query{query}}
data, err := proto.Marshal(req)
if err != nil {
t.Fatal(err)
}
testutil.Ok(t, err)
compressed := snappy.Encode(nil, data)
request, err := http.NewRequest("POST", "", bytes.NewBuffer(compressed))
if err != nil {
t.Fatal(err)
}
testutil.Ok(t, err)
recorder := httptest.NewRecorder()
api.remoteRead(recorder, request)
@ -978,28 +983,25 @@ func TestReadEndpoint(t *testing.T) {
t.Fatal(recorder.Code)
}
testutil.Equals(t, "application/x-protobuf", recorder.Result().Header.Get("Content-Type"))
testutil.Equals(t, "snappy", recorder.Result().Header.Get("Content-Encoding"))
// Decode the response.
compressed, err = ioutil.ReadAll(recorder.Result().Body)
if err != nil {
t.Fatal(err)
}
testutil.Ok(t, err)
uncompressed, err := snappy.Decode(nil, compressed)
if err != nil {
t.Fatal(err)
}
testutil.Ok(t, err)
var resp prompb.ReadResponse
err = proto.Unmarshal(uncompressed, &resp)
if err != nil {
t.Fatal(err)
}
testutil.Ok(t, err)
if len(resp.Results) != 1 {
t.Fatalf("Expected 1 result, got %d", len(resp.Results))
}
result := resp.Results[0]
expected := &prompb.QueryResult{
testutil.Equals(t, &prompb.QueryResult{
Timeseries: []*prompb.TimeSeries{
{
Labels: []prompb.Label{
@ -1012,10 +1014,188 @@ func TestReadEndpoint(t *testing.T) {
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
},
},
}, resp.Results[0])
}
func TestStreamReadEndpoint(t *testing.T) {
// First with 120 samples. We expect 1 frame with 1 chunk.
// Second with 121 samples, We expect 1 frame with 2 chunks.
// Third with 241 samples. We expect 1 frame with 2 chunks, and 1 frame with 1 chunk for the same series due to bytes limit.
suite, err := promql.NewTest(t, `
load 1m
test_metric1{foo="bar1",baz="qux"} 0+100x119
test_metric1{foo="bar2",baz="qux"} 0+100x120
test_metric1{foo="bar3",baz="qux"} 0+100x240
`)
testutil.Ok(t, err)
defer suite.Close()
testutil.Ok(t, suite.Run())
api := &API{
Queryable: suite.Storage(),
QueryEngine: suite.QueryEngine(),
config: func() config.Config {
return config.Config{
GlobalConfig: config.GlobalConfig{
ExternalLabels: labels.Labels{
// We expect external labels to be added, with the source labels honored.
{Name: "baz", Value: "a"},
{Name: "b", Value: "c"},
{Name: "d", Value: "e"},
},
},
}
if !reflect.DeepEqual(result, expected) {
t.Fatalf("Expected response \n%v\n but got \n%v\n", result, expected)
},
remoteReadSampleLimit: 1e6,
remoteReadGate: gate.New(1),
// Labelset has 57 bytes. Full chunk in test data has roughly 240 bytes. This allows us to have at max 2 chunks in this test.
remoteReadMaxBytesInFrame: 57 + 480,
}
// Encode the request.
matcher1, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_metric1")
testutil.Ok(t, err)
matcher2, err := labels.NewMatcher(labels.MatchEqual, "d", "e")
testutil.Ok(t, err)
query, err := remote.ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher2}, &storage.SelectParams{Step: 0, Func: "avg"})
testutil.Ok(t, err)
req := &prompb.ReadRequest{
Queries: []*prompb.Query{query},
AcceptedResponseTypes: []prompb.ReadRequest_ResponseType{prompb.ReadRequest_STREAMED_XOR_CHUNKS},
}
data, err := proto.Marshal(req)
testutil.Ok(t, err)
compressed := snappy.Encode(nil, data)
request, err := http.NewRequest("POST", "", bytes.NewBuffer(compressed))
testutil.Ok(t, err)
recorder := httptest.NewRecorder()
api.remoteRead(recorder, request)
if recorder.Code/100 != 2 {
t.Fatal(recorder.Code)
}
testutil.Equals(t, "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse", recorder.Result().Header.Get("Content-Type"))
testutil.Equals(t, "", recorder.Result().Header.Get("Content-Encoding"))
var results []*prompb.ChunkedReadResponse
stream := remote.NewChunkedReader(recorder.Result().Body, remote.DefaultChunkedReadLimit, nil)
for {
res := &prompb.ChunkedReadResponse{}
err := stream.NextProto(res)
if err == io.EOF {
break
}
testutil.Ok(t, err)
results = append(results, res)
}
if len(results) != 4 {
t.Fatalf("Expected 4 result, got %d", len(results))
}
testutil.Equals(t, []*prompb.ChunkedReadResponse{
{
ChunkedSeries: []*prompb.ChunkedSeries{
{
Labels: []prompb.Label{
{Name: "__name__", Value: "test_metric1"},
{Name: "b", Value: "c"},
{Name: "baz", Value: "qux"},
{Name: "d", Value: "e"},
{Name: "foo", Value: "bar1"},
},
Chunks: []prompb.Chunk{
{
Type: prompb.Chunk_XOR,
MaxTimeMs: 7140000,
Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"),
},
},
},
},
},
{
ChunkedSeries: []*prompb.ChunkedSeries{
{
Labels: []prompb.Label{
{Name: "__name__", Value: "test_metric1"},
{Name: "b", Value: "c"},
{Name: "baz", Value: "qux"},
{Name: "d", Value: "e"},
{Name: "foo", Value: "bar2"},
},
Chunks: []prompb.Chunk{
{
Type: prompb.Chunk_XOR,
MaxTimeMs: 7140000,
Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"),
},
{
Type: prompb.Chunk_XOR,
MinTimeMs: 7200000,
MaxTimeMs: 7200000,
Data: []byte("\000\001\200\364\356\006@\307p\000\000\000\000\000\000"),
},
},
},
},
},
{
ChunkedSeries: []*prompb.ChunkedSeries{
{
Labels: []prompb.Label{
{Name: "__name__", Value: "test_metric1"},
{Name: "b", Value: "c"},
{Name: "baz", Value: "qux"},
{Name: "d", Value: "e"},
{Name: "foo", Value: "bar3"},
},
Chunks: []prompb.Chunk{
{
Type: prompb.Chunk_XOR,
MaxTimeMs: 7140000,
Data: []byte("\000x\000\000\000\000\000\000\000\000\000\340\324\003\302|\005\224\000\301\254}\351z2\320O\355\264n[\007\316\224\243md\371\320\375\032Pm\nS\235\016Q\255\006P\275\250\277\312\201Z\003(3\240R\207\332\005(\017\240\322\201\332=(\023\2402\203Z\007(w\2402\201Z\017(\023\265\227\364P\033@\245\007\364\nP\033C\245\002t\036P+@e\036\364\016Pk@e\002t:P;A\245\001\364\nS\373@\245\006t\006P+C\345\002\364\006Pk@\345\036t\nP\033A\245\003\364:P\033@\245\006t\016ZJ\377\\\205\313\210\327\270\017\345+F[\310\347E)\355\024\241\366\342}(v\215(N\203)\326\207(\336\203(V\332W\362\202t4\240m\005(\377AJ\006\320\322\202t\374\240\255\003(oA\312:\3202"),
},
{
Type: prompb.Chunk_XOR,
MinTimeMs: 7200000,
MaxTimeMs: 14340000,
Data: []byte("\000x\200\364\356\006@\307p\000\000\000\000\000\340\324\003\340>\224\355\260\277\322\200\372\005(=\240R\207:\003(\025\240\362\201z\003(\365\240r\203:\005(\r\241\322\201\372\r(\r\240R\237:\007(5\2402\201z\037(\025\2402\203:\005(\375\240R\200\372\r(\035\241\322\201:\003(5\240r\326g\364\271\213\227!\253q\037\312N\340GJ\033E)\375\024\241\266\362}(N\217(V\203)\336\207(\326\203(N\334W\322\203\2644\240}\005(\373AJ\031\3202\202\264\374\240\275\003(kA\3129\320R\201\2644\240\375\264\277\322\200\332\005(3\240r\207Z\003(\027\240\362\201Z\003(\363\240R\203\332\005(\017\241\322\201\332\r(\023\2402\237Z\007(7\2402\201Z\037(\023\240\322\200\332\005(\377\240R\200\332\r "),
},
},
},
},
},
{
ChunkedSeries: []*prompb.ChunkedSeries{
{
Labels: []prompb.Label{
{Name: "__name__", Value: "test_metric1"},
{Name: "b", Value: "c"},
{Name: "baz", Value: "qux"},
{Name: "d", Value: "e"},
{Name: "foo", Value: "bar3"},
},
Chunks: []prompb.Chunk{
{
Type: prompb.Chunk_XOR,
MinTimeMs: 14400000,
MaxTimeMs: 14400000,
Data: []byte("\000\001\200\350\335\r@\327p\000\000\000\000\000\000"),
},
},
},
},
},
}, results)
}
type fakeDB struct {

View file

@ -228,6 +228,7 @@ type Options struct {
PageTitle string
RemoteReadSampleLimit int
RemoteReadConcurrencyLimit int
RemoteReadBytesInFrame int
Gatherer prometheus.Gatherer
Registerer prometheus.Registerer
@ -291,6 +292,7 @@ func New(logger log.Logger, o *Options) *Handler {
h.ruleManager,
h.options.RemoteReadSampleLimit,
h.options.RemoteReadConcurrencyLimit,
h.options.RemoteReadBytesInFrame,
h.options.CORSOrigin,
)