mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-23 12:44:05 -08:00
*: add admin grpc API
This commit is contained in:
parent
dc15a6f6ea
commit
ccf9e62972
|
@ -225,13 +225,13 @@ func main() {
|
|||
cfg.queryEngine.Logger = logger
|
||||
var (
|
||||
notifier = notifier.New(&cfg.notifier, logger)
|
||||
targetManager = retrieval.NewTargetManager(localStorage, logger)
|
||||
queryEngine = promql.NewEngine(localStorage, &cfg.queryEngine)
|
||||
targetManager = retrieval.NewTargetManager(tsdb.Adapter(localStorage), logger)
|
||||
queryEngine = promql.NewEngine(tsdb.Adapter(localStorage), &cfg.queryEngine)
|
||||
ctx, cancelCtx = context.WithCancel(context.Background())
|
||||
)
|
||||
|
||||
ruleManager := rules.NewManager(&rules.ManagerOptions{
|
||||
Appendable: localStorage,
|
||||
Appendable: tsdb.Adapter(localStorage),
|
||||
Notifier: notifier,
|
||||
QueryEngine: queryEngine,
|
||||
Context: ctx,
|
||||
|
@ -318,7 +318,8 @@ func main() {
|
|||
// to be canceled and ensures a quick shutdown of the rule manager.
|
||||
defer cancelCtx()
|
||||
|
||||
go webHandler.Run()
|
||||
errc := make(chan error)
|
||||
go func() { errc <- webHandler.Run(ctx) }()
|
||||
|
||||
// Wait for reload or termination signals.
|
||||
close(hupReady) // Unblock SIGHUP handler.
|
||||
|
@ -330,7 +331,7 @@ func main() {
|
|||
logger.Warn("Received SIGTERM, exiting gracefully...")
|
||||
case <-webHandler.Quit():
|
||||
logger.Warn("Received termination request via web service, exiting gracefully...")
|
||||
case err := <-webHandler.ListenError():
|
||||
case err := <-errc:
|
||||
logger.Errorln("Error starting web server, exiting gracefully:", err)
|
||||
}
|
||||
|
||||
|
|
123
documentation/dev/api/swagger.json
Normal file
123
documentation/dev/api/swagger.json
Normal file
|
@ -0,0 +1,123 @@
|
|||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "prompb/rpc.proto",
|
||||
"version": "version not set"
|
||||
},
|
||||
"schemes": [
|
||||
"http",
|
||||
"https"
|
||||
],
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {
|
||||
"/admin/v1/tsdb/delete_series": {
|
||||
"post": {
|
||||
"summary": "DeleteSeries deletes data for a selection of series in a time range.",
|
||||
"operationId": "DeleteSeries",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/prometheusSeriesDeleteResponse"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/prometheusSeriesDeleteRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"Admin"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/admin/v1/tsdb/snapshot": {
|
||||
"post": {
|
||||
"summary": "Snapshot creates a snapshot of all current data into 'snapshots/\u003cdatetime\u003e-\u003crand\u003e' under\nthe TSDB's date directory.",
|
||||
"operationId": "TSDBSnapshot",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/prometheusTSDBSnapshotResponse"
|
||||
}
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
"Admin"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"prometheusLabelMatcher": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"$ref": "#/definitions/prometheusLabelMatcherType"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"description": "Matcher specifies a rule, which can match or set of labels or not."
|
||||
},
|
||||
"prometheusLabelMatcherType": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"EQ",
|
||||
"NEQ",
|
||||
"RE",
|
||||
"NRE"
|
||||
],
|
||||
"default": "EQ"
|
||||
},
|
||||
"prometheusSeriesDeleteRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"min_time": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"max_time": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"matchers": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/prometheusLabelMatcher"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"prometheusSeriesDeleteResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"prometheusTSDBSnapshotRequest": {
|
||||
"type": "object"
|
||||
},
|
||||
"prometheusTSDBSnapshotResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
860
prompb/rpc.pb.go
Normal file
860
prompb/rpc.pb.go
Normal file
|
@ -0,0 +1,860 @@
|
|||
// Code generated by protoc-gen-gogo.
|
||||
// source: rpc.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package prompb is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
rpc.proto
|
||||
types.proto
|
||||
|
||||
It has these top-level messages:
|
||||
TSDBSnapshotRequest
|
||||
TSDBSnapshotResponse
|
||||
SeriesDeleteRequest
|
||||
SeriesDeleteResponse
|
||||
Sample
|
||||
TimeSeries
|
||||
Label
|
||||
Labels
|
||||
LabelMatcher
|
||||
*/
|
||||
package prompb
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
|
||||
import time "time"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
)
|
||||
|
||||
import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
||||
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
var _ = time.Kitchen
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type TSDBSnapshotRequest struct {
|
||||
}
|
||||
|
||||
func (m *TSDBSnapshotRequest) Reset() { *m = TSDBSnapshotRequest{} }
|
||||
func (m *TSDBSnapshotRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*TSDBSnapshotRequest) ProtoMessage() {}
|
||||
func (*TSDBSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} }
|
||||
|
||||
type TSDBSnapshotResponse struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
}
|
||||
|
||||
func (m *TSDBSnapshotResponse) Reset() { *m = TSDBSnapshotResponse{} }
|
||||
func (m *TSDBSnapshotResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*TSDBSnapshotResponse) ProtoMessage() {}
|
||||
func (*TSDBSnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} }
|
||||
|
||||
type SeriesDeleteRequest struct {
|
||||
MinTime *time.Time `protobuf:"bytes,1,opt,name=min_time,json=minTime,stdtime" json:"min_time,omitempty"`
|
||||
MaxTime *time.Time `protobuf:"bytes,2,opt,name=max_time,json=maxTime,stdtime" json:"max_time,omitempty"`
|
||||
Matchers []LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers"`
|
||||
}
|
||||
|
||||
func (m *SeriesDeleteRequest) Reset() { *m = SeriesDeleteRequest{} }
|
||||
func (m *SeriesDeleteRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SeriesDeleteRequest) ProtoMessage() {}
|
||||
func (*SeriesDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} }
|
||||
|
||||
type SeriesDeleteResponse struct {
|
||||
}
|
||||
|
||||
func (m *SeriesDeleteResponse) Reset() { *m = SeriesDeleteResponse{} }
|
||||
func (m *SeriesDeleteResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*SeriesDeleteResponse) ProtoMessage() {}
|
||||
func (*SeriesDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*TSDBSnapshotRequest)(nil), "prometheus.TSDBSnapshotRequest")
|
||||
proto.RegisterType((*TSDBSnapshotResponse)(nil), "prometheus.TSDBSnapshotResponse")
|
||||
proto.RegisterType((*SeriesDeleteRequest)(nil), "prometheus.SeriesDeleteRequest")
|
||||
proto.RegisterType((*SeriesDeleteResponse)(nil), "prometheus.SeriesDeleteResponse")
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for Admin service
|
||||
|
||||
type AdminClient interface {
|
||||
// Snapshot creates a snapshot of all current data into 'snapshots/<datetime>-<rand>' under
|
||||
// the TSDB's date directory.
|
||||
TSDBSnapshot(ctx context.Context, in *TSDBSnapshotRequest, opts ...grpc.CallOption) (*TSDBSnapshotResponse, error)
|
||||
// DeleteSeries deletes data for a selection of series in a time range.
|
||||
DeleteSeries(ctx context.Context, in *SeriesDeleteRequest, opts ...grpc.CallOption) (*SeriesDeleteResponse, error)
|
||||
}
|
||||
|
||||
type adminClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewAdminClient(cc *grpc.ClientConn) AdminClient {
|
||||
return &adminClient{cc}
|
||||
}
|
||||
|
||||
func (c *adminClient) TSDBSnapshot(ctx context.Context, in *TSDBSnapshotRequest, opts ...grpc.CallOption) (*TSDBSnapshotResponse, error) {
|
||||
out := new(TSDBSnapshotResponse)
|
||||
err := grpc.Invoke(ctx, "/prometheus.Admin/TSDBSnapshot", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *adminClient) DeleteSeries(ctx context.Context, in *SeriesDeleteRequest, opts ...grpc.CallOption) (*SeriesDeleteResponse, error) {
|
||||
out := new(SeriesDeleteResponse)
|
||||
err := grpc.Invoke(ctx, "/prometheus.Admin/DeleteSeries", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for Admin service
|
||||
|
||||
type AdminServer interface {
|
||||
// Snapshot creates a snapshot of all current data into 'snapshots/<datetime>-<rand>' under
|
||||
// the TSDB's date directory.
|
||||
TSDBSnapshot(context.Context, *TSDBSnapshotRequest) (*TSDBSnapshotResponse, error)
|
||||
// DeleteSeries deletes data for a selection of series in a time range.
|
||||
DeleteSeries(context.Context, *SeriesDeleteRequest) (*SeriesDeleteResponse, error)
|
||||
}
|
||||
|
||||
func RegisterAdminServer(s *grpc.Server, srv AdminServer) {
|
||||
s.RegisterService(&_Admin_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _Admin_TSDBSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(TSDBSnapshotRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(AdminServer).TSDBSnapshot(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/prometheus.Admin/TSDBSnapshot",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AdminServer).TSDBSnapshot(ctx, req.(*TSDBSnapshotRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Admin_DeleteSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SeriesDeleteRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(AdminServer).DeleteSeries(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/prometheus.Admin/DeleteSeries",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AdminServer).DeleteSeries(ctx, req.(*SeriesDeleteRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _Admin_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "prometheus.Admin",
|
||||
HandlerType: (*AdminServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "TSDBSnapshot",
|
||||
Handler: _Admin_TSDBSnapshot_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteSeries",
|
||||
Handler: _Admin_DeleteSeries_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "rpc.proto",
|
||||
}
|
||||
|
||||
func (m *TSDBSnapshotRequest) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *TSDBSnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *TSDBSnapshotResponse) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *TSDBSnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Name) > 0 {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
|
||||
i += copy(dAtA[i:], m.Name)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *SeriesDeleteRequest) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *SeriesDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.MinTime != nil {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRpc(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.MinTime)))
|
||||
n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.MinTime, dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n1
|
||||
}
|
||||
if m.MaxTime != nil {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintRpc(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.MaxTime)))
|
||||
n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.MaxTime, dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n2
|
||||
}
|
||||
if len(m.Matchers) > 0 {
|
||||
for _, msg := range m.Matchers {
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *SeriesDeleteResponse) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *SeriesDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Rpc(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Rpc(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintRpc(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *TSDBSnapshotRequest) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *TSDBSnapshotResponse) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Name)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovRpc(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *SeriesDeleteRequest) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.MinTime != nil {
|
||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.MinTime)
|
||||
n += 1 + l + sovRpc(uint64(l))
|
||||
}
|
||||
if m.MaxTime != nil {
|
||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.MaxTime)
|
||||
n += 1 + l + sovRpc(uint64(l))
|
||||
}
|
||||
if len(m.Matchers) > 0 {
|
||||
for _, e := range m.Matchers {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovRpc(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *SeriesDeleteResponse) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
return n
|
||||
}
|
||||
|
||||
func sovRpc(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
x >>= 7
|
||||
if x == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozRpc(x uint64) (n int) {
|
||||
return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *TSDBSnapshotRequest) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRpc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: TSDBSnapshotRequest: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: TSDBSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipRpc(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthRpc
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *TSDBSnapshotResponse) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRpc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: TSDBSnapshotResponse: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: TSDBSnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRpc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthRpc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Name = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipRpc(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthRpc
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *SeriesDeleteRequest) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRpc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: SeriesDeleteRequest: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: SeriesDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field MinTime", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRpc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthRpc
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.MinTime == nil {
|
||||
m.MinTime = new(time.Time)
|
||||
}
|
||||
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.MinTime, dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field MaxTime", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRpc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthRpc
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.MaxTime == nil {
|
||||
m.MaxTime = new(time.Time)
|
||||
}
|
||||
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.MaxTime, dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRpc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthRpc
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Matchers = append(m.Matchers, LabelMatcher{})
|
||||
if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipRpc(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthRpc
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *SeriesDeleteResponse) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRpc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: SeriesDeleteResponse: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: SeriesDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipRpc(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthRpc
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipRpc(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowRpc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowRpc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
return iNdEx, nil
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowRpc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthRpc
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
var innerWire uint64
|
||||
var start int = iNdEx
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowRpc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
innerWireType := int(innerWire & 0x7)
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipRpc(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
return iNdEx, nil
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
return iNdEx, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) }
|
||||
|
||||
var fileDescriptorRpc = []byte{
|
||||
// 393 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x51, 0xcd, 0xae, 0xd2, 0x40,
|
||||
0x14, 0xbe, 0x73, 0x41, 0x94, 0x81, 0x55, 0x41, 0xad, 0x0d, 0x69, 0x71, 0x36, 0x12, 0x16, 0x9d,
|
||||
0x88, 0x3b, 0x5c, 0xd9, 0xb0, 0xd4, 0x4d, 0x61, 0xe5, 0x86, 0x4c, 0xe1, 0x58, 0x9a, 0xd0, 0x99,
|
||||
0xb1, 0x33, 0x18, 0x74, 0xe9, 0x13, 0x98, 0xf8, 0x52, 0x24, 0x6e, 0x4c, 0xdc, 0xfb, 0x43, 0x7c,
|
||||
0x07, 0xb7, 0xa6, 0x33, 0x54, 0xa9, 0xc1, 0xe4, 0xee, 0x4e, 0x4f, 0xbf, 0xef, 0x7c, 0x3f, 0x83,
|
||||
0xdb, 0x85, 0x5c, 0x85, 0xb2, 0x10, 0x5a, 0x38, 0x58, 0x16, 0x22, 0x07, 0xbd, 0x81, 0x9d, 0xf2,
|
||||
0x3a, 0xfa, 0xad, 0x04, 0x65, 0x7f, 0x78, 0x41, 0x2a, 0x44, 0xba, 0x05, 0x6a, 0xbe, 0x92, 0xdd,
|
||||
0x2b, 0xaa, 0xb3, 0x1c, 0x94, 0x66, 0xb9, 0x3c, 0x01, 0x06, 0x27, 0x00, 0x93, 0x19, 0x65, 0x9c,
|
||||
0x0b, 0xcd, 0x74, 0x26, 0x78, 0x45, 0xef, 0xa7, 0x22, 0x15, 0x66, 0xa4, 0xe5, 0x64, 0xb7, 0xe4,
|
||||
0x2e, 0xee, 0x2d, 0xe6, 0xb3, 0x68, 0xce, 0x99, 0x54, 0x1b, 0xa1, 0x63, 0x78, 0xbd, 0x03, 0xa5,
|
||||
0xc9, 0x18, 0xf7, 0xeb, 0x6b, 0x25, 0x05, 0x57, 0xe0, 0x38, 0xb8, 0xc9, 0x59, 0x0e, 0x2e, 0x1a,
|
||||
0xa2, 0x51, 0x3b, 0x36, 0x33, 0xf9, 0x84, 0x70, 0x6f, 0x0e, 0x45, 0x06, 0x6a, 0x06, 0x5b, 0xd0,
|
||||
0x70, 0xba, 0xe1, 0x3c, 0xc5, 0x77, 0xf2, 0x8c, 0x2f, 0x4b, 0x97, 0x06, 0xdf, 0x99, 0x78, 0xa1,
|
||||
0x75, 0x18, 0x56, 0x11, 0xc2, 0x45, 0x15, 0x21, 0x6a, 0x7e, 0xf8, 0x16, 0xa0, 0xf8, 0x76, 0x9e,
|
||||
0xf1, 0x72, 0x67, 0xc8, 0x6c, 0x6f, 0xc9, 0xd7, 0x37, 0x26, 0xb3, 0xbd, 0x21, 0x4f, 0x4b, 0xb2,
|
||||
0x5e, 0x6d, 0xa0, 0x50, 0x6e, 0x63, 0xd8, 0x18, 0x75, 0x26, 0x6e, 0xf8, 0xb7, 0xd5, 0xf0, 0x39,
|
||||
0x4b, 0x60, 0xfb, 0xc2, 0x02, 0xa2, 0xe6, 0xe1, 0x6b, 0x70, 0x15, 0xff, 0xc1, 0x93, 0x7b, 0xb8,
|
||||
0x5f, 0x0f, 0x63, 0x93, 0x4f, 0x7e, 0x21, 0x7c, 0xeb, 0xd9, 0x3a, 0xcf, 0xb8, 0x53, 0xe0, 0xee,
|
||||
0x79, 0x37, 0x4e, 0x70, 0x7e, 0xfb, 0x42, 0x99, 0xde, 0xf0, 0xff, 0x00, 0x7b, 0x9c, 0x04, 0xef,
|
||||
0xbf, 0xfc, 0xfc, 0x78, 0xfd, 0x80, 0xdc, 0xa7, 0xac, 0x94, 0xa0, 0x6f, 0x1e, 0x53, 0xad, 0xd6,
|
||||
0x09, 0x55, 0x95, 0xc6, 0x3b, 0xdc, 0xb5, 0x7e, 0xac, 0xb7, 0xba, 0xe6, 0x85, 0xf2, 0xeb, 0x9a,
|
||||
0x97, 0x02, 0x91, 0x47, 0x46, 0xf3, 0x21, 0x19, 0xfc, 0xa3, 0xb9, 0x36, 0xb0, 0xa5, 0x32, 0x9c,
|
||||
0x29, 0x1a, 0x47, 0xee, 0xe1, 0x87, 0x7f, 0x75, 0x38, 0xfa, 0xe8, 0xf3, 0xd1, 0x47, 0xdf, 0x8f,
|
||||
0x3e, 0x7a, 0xd9, 0x2a, 0x6f, 0xcb, 0x24, 0x69, 0x99, 0xa7, 0x78, 0xf2, 0x3b, 0x00, 0x00, 0xff,
|
||||
0xff, 0xb9, 0xfc, 0xf3, 0x68, 0xbe, 0x02, 0x00, 0x00,
|
||||
}
|
151
prompb/rpc.pb.gw.go
Normal file
151
prompb/rpc.pb.gw.go
Normal file
|
@ -0,0 +1,151 @@
|
|||
// Code generated by protoc-gen-grpc-gateway
|
||||
// source: prompb/rpc.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package prompb is a reverse proxy.
|
||||
|
||||
It translates gRPC into RESTful JSON APIs.
|
||||
*/
|
||||
package prompb
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var _ codes.Code
|
||||
var _ io.Reader
|
||||
var _ = runtime.String
|
||||
var _ = utilities.NewDoubleArray
|
||||
|
||||
func request_Admin_TSDBSnapshot_0(ctx context.Context, marshaler runtime.Marshaler, client AdminClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq TSDBSnapshotRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
msg, err := client.TSDBSnapshot(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_Admin_DeleteSeries_0(ctx context.Context, marshaler runtime.Marshaler, client AdminClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq SeriesDeleteRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
|
||||
return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.DeleteSeries(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
// RegisterAdminHandlerFromEndpoint is same as RegisterAdminHandler but
|
||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
||||
func RegisterAdminHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
||||
return RegisterAdminHandler(ctx, mux, conn)
|
||||
}
|
||||
|
||||
// RegisterAdminHandler registers the http handlers for service Admin to "mux".
|
||||
// The handlers forward requests to the grpc endpoint over "conn".
|
||||
func RegisterAdminHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
||||
client := NewAdminClient(conn)
|
||||
|
||||
mux.Handle("POST", pattern_Admin_TSDBSnapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
if cn, ok := w.(http.CloseNotifier); ok {
|
||||
go func(done <-chan struct{}, closed <-chan bool) {
|
||||
select {
|
||||
case <-done:
|
||||
case <-closed:
|
||||
cancel()
|
||||
}
|
||||
}(ctx.Done(), cn.CloseNotify())
|
||||
}
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, outboundMarshaler, w, req, err)
|
||||
}
|
||||
resp, md, err := request_Admin_TSDBSnapshot_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Admin_TSDBSnapshot_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Admin_DeleteSeries_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
if cn, ok := w.(http.CloseNotifier); ok {
|
||||
go func(done <-chan struct{}, closed <-chan bool) {
|
||||
select {
|
||||
case <-done:
|
||||
case <-closed:
|
||||
cancel()
|
||||
}
|
||||
}(ctx.Done(), cn.CloseNotify())
|
||||
}
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, outboundMarshaler, w, req, err)
|
||||
}
|
||||
resp, md, err := request_Admin_DeleteSeries_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Admin_DeleteSeries_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_Admin_TSDBSnapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"admin", "v1", "tsdb", "snapshot"}, ""))
|
||||
|
||||
pattern_Admin_DeleteSeries_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"admin", "v1", "tsdb", "delete_series"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
forward_Admin_TSDBSnapshot_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Admin_DeleteSeries_0 = runtime.ForwardResponseMessage
|
||||
)
|
63
prompb/rpc.proto
Normal file
63
prompb/rpc.proto
Normal file
|
@ -0,0 +1,63 @@
|
|||
// Copyright 2017 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
package prometheus;
|
||||
|
||||
import "types.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/api/annotations.proto";
|
||||
import "gogoproto/gogo.proto";
|
||||
|
||||
option go_package = "prompb";
|
||||
|
||||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
option (gogoproto.goproto_getters_all) = false;
|
||||
|
||||
|
||||
service Admin {
|
||||
// Snapshot creates a snapshot of all current data into 'snapshots/<datetime>-<rand>' under
|
||||
// the TSDB's date directory.
|
||||
rpc TSDBSnapshot(TSDBSnapshotRequest) returns (TSDBSnapshotResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/admin/v1/tsdb/snapshot"
|
||||
};
|
||||
}
|
||||
|
||||
// DeleteSeries deletes data for a selection of series in a time range.
|
||||
rpc DeleteSeries(SeriesDeleteRequest) returns (SeriesDeleteResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/admin/v1/tsdb/delete_series"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
message TSDBSnapshotRequest {
|
||||
}
|
||||
|
||||
message TSDBSnapshotResponse {
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
message SeriesDeleteRequest {
|
||||
google.protobuf.Timestamp min_time = 1 [(gogoproto.stdtime) = true];
|
||||
google.protobuf.Timestamp max_time = 2 [(gogoproto.stdtime) = true];
|
||||
repeated LabelMatcher matchers = 3 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message SeriesDeleteResponse {
|
||||
}
|
1096
prompb/types.pb.go
Normal file
1096
prompb/types.pb.go
Normal file
File diff suppressed because it is too large
Load diff
51
prompb/types.proto
Normal file
51
prompb/types.proto
Normal file
|
@ -0,0 +1,51 @@
|
|||
// Copyright 2017 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
package prometheus;
|
||||
|
||||
option go_package = "prompb";
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
|
||||
message Sample {
|
||||
double value = 1;
|
||||
int64 timestamp = 2;
|
||||
}
|
||||
|
||||
message TimeSeries {
|
||||
repeated Label labels = 1;
|
||||
repeated Sample samples = 2;
|
||||
}
|
||||
|
||||
message Label {
|
||||
string name = 1;
|
||||
string value = 2;
|
||||
}
|
||||
|
||||
message Labels {
|
||||
repeated Label labels = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// Matcher specifies a rule, which can match or set of labels or not.
|
||||
message LabelMatcher {
|
||||
enum Type {
|
||||
EQ = 0;
|
||||
NEQ = 1;
|
||||
RE = 2;
|
||||
NRE = 3;
|
||||
}
|
||||
Type type = 1;
|
||||
string name = 2;
|
||||
string value = 3;
|
||||
}
|
50
scripts/genproto.sh
Executable file
50
scripts/genproto.sh
Executable file
|
@ -0,0 +1,50 @@
|
|||
#!/usr/bin/env bash
|
||||
#
|
||||
# Generate all protobuf bindings.
|
||||
# Run from repository root.
|
||||
set -e
|
||||
set -u
|
||||
|
||||
if ! [[ "$0" =~ "scripts/genproto.sh" ]]; then
|
||||
echo "must be run from repository root"
|
||||
exit 255
|
||||
fi
|
||||
|
||||
if ! [[ $(protoc --version) =~ "3.2.0" ]]; then
|
||||
echo "could not find protoc 3.2.0, is it installed + in PATH?"
|
||||
exit 255
|
||||
fi
|
||||
|
||||
PROM_ROOT="${GOPATH}/src/github.com/prometheus/prometheus"
|
||||
PROM_PATH="${PROM_ROOT}/prompb"
|
||||
GOGOPROTO_ROOT="${GOPATH}/src/github.com/gogo/protobuf"
|
||||
GOGOPROTO_PATH="${GOGOPROTO_ROOT}:${GOGOPROTO_ROOT}/protobuf"
|
||||
GRPC_GATEWAY_ROOT="${GOPATH}/src/github.com/grpc-ecosystem/grpc-gateway"
|
||||
|
||||
DIRS="prompb"
|
||||
|
||||
for dir in ${DIRS}; do
|
||||
pushd ${dir}
|
||||
protoc --gogofast_out=plugins=grpc:. -I=. \
|
||||
-I="${GOGOPROTO_PATH}" \
|
||||
-I="${PROM_PATH}" \
|
||||
-I="${GRPC_GATEWAY_ROOT}/third_party/googleapis" \
|
||||
*.proto
|
||||
|
||||
sed -i.bak -E 's/import _ \"gogoproto\"//g' *.pb.go
|
||||
sed -i.bak -E 's/import _ \"google\/protobuf\"//g' *.pb.go
|
||||
rm -f *.bak
|
||||
goimports -w *.pb.go
|
||||
popd
|
||||
done
|
||||
|
||||
protoc -I. \
|
||||
-I="${GOGOPROTO_PATH}" \
|
||||
-I="${PROM_PATH}" \
|
||||
-I="${GRPC_GATEWAY_ROOT}/third_party/googleapis" \
|
||||
--grpc-gateway_out=logtostderr=true:. \
|
||||
--swagger_out=logtostderr=true:./documentation/dev/api/ \
|
||||
prompb/rpc.proto
|
||||
|
||||
mv documentation/dev/api/prompb/rpc.swagger.json documentation/dev/api/swagger.json
|
||||
rm -rf documentation/dev/api/prompb
|
|
@ -26,6 +26,10 @@ import (
|
|||
tsdbLabels "github.com/prometheus/tsdb/labels"
|
||||
)
|
||||
|
||||
func Adapter(db *tsdb.DB) storage.Storage {
|
||||
return &adapter{db: db}
|
||||
}
|
||||
|
||||
// adapter implements a storage.Storage around TSDB.
|
||||
type adapter struct {
|
||||
db *tsdb.DB
|
||||
|
@ -50,8 +54,8 @@ type Options struct {
|
|||
NoLockfile bool
|
||||
}
|
||||
|
||||
// Open returns a new storage backed by a tsdb database.
|
||||
func Open(path string, r prometheus.Registerer, opts *Options) (storage.Storage, error) {
|
||||
// Open returns a new storage backed by a TSDB database that is configured for Prometheus.
|
||||
func Open(path string, r prometheus.Registerer, opts *Options) (*tsdb.DB, error) {
|
||||
db, err := tsdb.Open(path, nil, r, &tsdb.Options{
|
||||
WALFlushInterval: 10 * time.Second,
|
||||
MinBlockDuration: uint64(time.Duration(opts.MinBlockDuration).Seconds() * 1000),
|
||||
|
@ -62,7 +66,7 @@ func Open(path string, r prometheus.Registerer, opts *Options) (storage.Storage,
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return adapter{db: db}, nil
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func (a adapter) Querier(mint, maxt int64) (storage.Querier, error) {
|
||||
|
|
|
@ -43,7 +43,7 @@ func NewStorage(t T) storage.Storage {
|
|||
if err != nil {
|
||||
t.Fatalf("Opening test storage failed: %s", err)
|
||||
}
|
||||
return testStorage{Storage: db, dir: dir}
|
||||
return testStorage{Storage: tsdb.Adapter(db), dir: dir}
|
||||
}
|
||||
|
||||
type testStorage struct {
|
||||
|
|
|
@ -97,7 +97,7 @@ type apiFunc func(r *http.Request) (interface{}, *apiError)
|
|||
// API can register a set of endpoints in a router and handle
|
||||
// them using the provided storage and query engine.
|
||||
type API struct {
|
||||
Storage storage.Storage
|
||||
Queryable promql.Queryable
|
||||
QueryEngine *promql.Engine
|
||||
|
||||
targetRetriever targetRetriever
|
||||
|
@ -107,10 +107,10 @@ type API struct {
|
|||
}
|
||||
|
||||
// NewAPI returns an initialized API type.
|
||||
func NewAPI(qe *promql.Engine, st storage.Storage, tr targetRetriever, ar alertmanagerRetriever) *API {
|
||||
func NewAPI(qe *promql.Engine, q promql.Queryable, tr targetRetriever, ar alertmanagerRetriever) *API {
|
||||
return &API{
|
||||
QueryEngine: qe,
|
||||
Storage: st,
|
||||
Queryable: q,
|
||||
targetRetriever: tr,
|
||||
alertmanagerRetriever: ar,
|
||||
now: time.Now,
|
||||
|
@ -276,7 +276,7 @@ func (api *API) labelValues(r *http.Request) (interface{}, *apiError) {
|
|||
if !model.LabelNameRE.MatchString(name) {
|
||||
return nil, &apiError{errorBadData, fmt.Errorf("invalid label name: %q", name)}
|
||||
}
|
||||
q, err := api.Storage.Querier(math.MinInt64, math.MaxInt64)
|
||||
q, err := api.Queryable.Querier(math.MinInt64, math.MaxInt64)
|
||||
if err != nil {
|
||||
return nil, &apiError{errorExec, err}
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ func (api *API) series(r *http.Request) (interface{}, *apiError) {
|
|||
matcherSets = append(matcherSets, matchers)
|
||||
}
|
||||
|
||||
q, err := api.Storage.Querier(timestamp.FromTime(start), timestamp.FromTime(end))
|
||||
q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end))
|
||||
if err != nil {
|
||||
return nil, &apiError{errorExec, err}
|
||||
}
|
||||
|
@ -358,33 +358,7 @@ func (api *API) series(r *http.Request) (interface{}, *apiError) {
|
|||
}
|
||||
|
||||
func (api *API) dropSeries(r *http.Request) (interface{}, *apiError) {
|
||||
r.ParseForm()
|
||||
if len(r.Form["match[]"]) == 0 {
|
||||
return nil, &apiError{errorBadData, fmt.Errorf("no match[] parameter provided")}
|
||||
}
|
||||
|
||||
// TODO(fabxc): temporarily disabled
|
||||
return nil, &apiError{errorExec, fmt.Errorf("temporarily disabled")}
|
||||
|
||||
// numDeleted := 0
|
||||
// for _, s := range r.Form["match[]"] {
|
||||
// matchers, err := promql.ParseMetricSelector(s)
|
||||
// if err != nil {
|
||||
// return nil, &apiError{errorBadData, err}
|
||||
// }
|
||||
// n, err := api.Storage.DropMetricsForLabelMatchers(context.TODO(), matchers...)
|
||||
// if err != nil {
|
||||
// return nil, &apiError{errorExec, err}
|
||||
// }
|
||||
// numDeleted += n
|
||||
// }
|
||||
|
||||
// res := struct {
|
||||
// NumDeleted int `json:"numDeleted"`
|
||||
// }{
|
||||
// NumDeleted: numDeleted,
|
||||
// }
|
||||
// return res, nil
|
||||
return nil, &apiError{errorInternal, fmt.Errorf("not implemented")}
|
||||
}
|
||||
|
||||
// Target has the information for one target.
|
||||
|
|
|
@ -88,7 +88,7 @@ func TestEndpoints(t *testing.T) {
|
|||
})
|
||||
|
||||
api := &API{
|
||||
Storage: suite.Storage(),
|
||||
Queryable: suite.Storage(),
|
||||
QueryEngine: suite.QueryEngine(),
|
||||
targetRetriever: tr,
|
||||
alertmanagerRetriever: ar,
|
||||
|
@ -400,51 +400,8 @@ func TestEndpoints(t *testing.T) {
|
|||
},
|
||||
{
|
||||
endpoint: api.dropSeries,
|
||||
errType: errorBadData,
|
||||
errType: errorInternal,
|
||||
},
|
||||
// The following tests delete time series from the test storage. They
|
||||
// must remain at the end and are fixed in their order.
|
||||
// {
|
||||
// endpoint: api.dropSeries,
|
||||
// query: url.Values{
|
||||
// "match[]": []string{`test_metric1{foo=~".+o"}`},
|
||||
// },
|
||||
// response: struct {
|
||||
// NumDeleted int `json:"numDeleted"`
|
||||
// }{1},
|
||||
// },
|
||||
// {
|
||||
// endpoint: api.series,
|
||||
// query: url.Values{
|
||||
// "match[]": []string{`test_metric1`},
|
||||
// },
|
||||
// response: []model.Metric{
|
||||
// {
|
||||
// "__name__": "test_metric1",
|
||||
// "foo": "bar",
|
||||
// },
|
||||
// },
|
||||
// }, {
|
||||
// endpoint: api.dropSeries,
|
||||
// query: url.Values{
|
||||
// "match[]": []string{`{__name__=~".+"}`},
|
||||
// },
|
||||
// response: struct {
|
||||
// NumDeleted int `json:"numDeleted"`
|
||||
// }{2},
|
||||
// }, {
|
||||
// endpoint: api.targets,
|
||||
// response: &TargetDiscovery{
|
||||
// ActiveTargets: []*Target{
|
||||
// {
|
||||
// DiscoveredLabels: model.LabelSet{},
|
||||
// Labels: model.LabelSet{},
|
||||
// ScrapeURL: "http://example.com:8080/metrics",
|
||||
// Health: "unknown",
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
{
|
||||
endpoint: api.alertmanagers,
|
||||
response: &AlertmanagerDiscovery{
|
||||
|
|
195
web/api/v2/api.go
Normal file
195
web/api/v2/api.go
Normal file
|
@ -0,0 +1,195 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package apiv2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/timestamp"
|
||||
pb "github.com/prometheus/prometheus/prompb"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/retrieval"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/tsdb"
|
||||
tsdbLabels "github.com/prometheus/tsdb/labels"
|
||||
)
|
||||
|
||||
// API encapsulates all API services.
|
||||
type API struct {
|
||||
now func() time.Time
|
||||
db *tsdb.DB
|
||||
q func(mint, maxt int64) storage.Querier
|
||||
targets func() []*retrieval.Target
|
||||
alertmanagers func() []*url.URL
|
||||
}
|
||||
|
||||
// New returns a new API object.
|
||||
func New(
|
||||
now func() time.Time,
|
||||
db *tsdb.DB,
|
||||
qe *promql.Engine,
|
||||
q func(mint, maxt int64) storage.Querier,
|
||||
targets func() []*retrieval.Target,
|
||||
alertmanagers func() []*url.URL,
|
||||
) *API {
|
||||
return &API{
|
||||
now: now,
|
||||
db: db,
|
||||
q: q,
|
||||
targets: targets,
|
||||
alertmanagers: alertmanagers,
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterGRPC registers all API services with the given server.
|
||||
func (api *API) RegisterGRPC(srv *grpc.Server) {
|
||||
pb.RegisterAdminServer(srv, NewAdmin(api.db))
|
||||
}
|
||||
|
||||
// HTTPHandler returns an HTTP handler for a REST API gateway to the given grpc address.
|
||||
func (api *API) HTTPHandler(grpcAddr string) (http.Handler, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
enc := new(protoutil.JSONPb)
|
||||
mux := runtime.NewServeMux(runtime.WithMarshalerOption(enc.ContentType(), enc))
|
||||
|
||||
opts := []grpc.DialOption{grpc.WithInsecure()}
|
||||
|
||||
err := pb.RegisterAdminHandlerFromEndpoint(ctx, mux, grpcAddr, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mux, nil
|
||||
}
|
||||
|
||||
// extractTimeRange returns minimum and maximum timestamp in milliseconds as
|
||||
// provided by the time range. It defaults either boundary to the minimum and maximum
|
||||
// possible value.
|
||||
func extractTimeRange(min, max *time.Time) (mint, maxt time.Time, err error) {
|
||||
if min == nil {
|
||||
mint = minTime
|
||||
} else {
|
||||
mint = *min
|
||||
}
|
||||
if max == nil {
|
||||
maxt = maxTime
|
||||
} else {
|
||||
maxt = *max
|
||||
}
|
||||
if mint.After(maxt) {
|
||||
return mint, maxt, errors.Errorf("min time must be before max time")
|
||||
}
|
||||
return mint, maxt, nil
|
||||
}
|
||||
|
||||
var (
|
||||
minTime = time.Unix(math.MinInt64/1000+62135596801, 0)
|
||||
maxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999)
|
||||
)
|
||||
|
||||
func labelsToProto(lset labels.Labels) pb.Labels {
|
||||
r := pb.Labels{
|
||||
Labels: make([]pb.Label, 0, len(lset)),
|
||||
}
|
||||
for _, l := range lset {
|
||||
r.Labels = append(r.Labels, pb.Label{Name: l.Name, Value: l.Value})
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Admin provides an administration interface to Prometheus.
|
||||
type Admin struct {
|
||||
db *tsdb.DB
|
||||
snapdir string
|
||||
}
|
||||
|
||||
// NewAdmin returns a Admin server.
|
||||
func NewAdmin(db *tsdb.DB) *Admin {
|
||||
return &Admin{
|
||||
db: db,
|
||||
snapdir: filepath.Join(db.Dir(), "snapshots"),
|
||||
}
|
||||
}
|
||||
|
||||
// TSDBSnapshot implements pb.AdminServer.
|
||||
func (s *Admin) TSDBSnapshot(_ context.Context, _ *pb.TSDBSnapshotRequest) (*pb.TSDBSnapshotResponse, error) {
|
||||
var (
|
||||
name = fmt.Sprintf("%s-%x", time.Now().UTC().Format(time.RFC3339), rand.Int())
|
||||
dir = filepath.Join(s.snapdir, name)
|
||||
)
|
||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "created snapshot directory: %s", err)
|
||||
}
|
||||
if err := s.db.Snapshot(dir); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "create snapshot: %s", err)
|
||||
}
|
||||
return &pb.TSDBSnapshotResponse{Name: name}, nil
|
||||
}
|
||||
|
||||
// DeleteSeries imeplements pb.AdminServer.
|
||||
func (s *Admin) DeleteSeries(_ context.Context, r *pb.SeriesDeleteRequest) (*pb.SeriesDeleteResponse, error) {
|
||||
mint, maxt, err := extractTimeRange(r.MinTime, r.MaxTime)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
var matchers tsdbLabels.Selector
|
||||
|
||||
for _, m := range r.Matchers {
|
||||
var lm tsdbLabels.Matcher
|
||||
var err error
|
||||
|
||||
switch m.Type {
|
||||
case pb.LabelMatcher_EQ:
|
||||
lm = tsdbLabels.NewEqualMatcher(m.Name, m.Value)
|
||||
case pb.LabelMatcher_NEQ:
|
||||
lm = tsdbLabels.Not(tsdbLabels.NewEqualMatcher(m.Name, m.Value))
|
||||
case pb.LabelMatcher_RE:
|
||||
lm, err = tsdbLabels.NewRegexpMatcher(m.Name, m.Value)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "bad regexp matcher: %s", err)
|
||||
}
|
||||
case pb.LabelMatcher_NRE:
|
||||
lm, err = tsdbLabels.NewRegexpMatcher(m.Name, m.Value)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "bad regexp matcher: %s", err)
|
||||
}
|
||||
lm = tsdbLabels.Not(lm)
|
||||
default:
|
||||
return nil, status.Error(codes.InvalidArgument, "unknown matcher type")
|
||||
}
|
||||
|
||||
matchers = append(matchers, lm)
|
||||
}
|
||||
if err := s.db.Delete(timestamp.FromTime(mint), timestamp.FromTime(maxt), matchers...); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
return &pb.SeriesDeleteResponse{}, nil
|
||||
}
|
117
web/web.go
117
web/web.go
|
@ -30,15 +30,21 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
pprof_runtime "runtime/pprof"
|
||||
template_text "text/template"
|
||||
|
||||
"github.com/cockroachdb/cmux"
|
||||
"github.com/opentracing-contrib/go-stdlib/nethttp"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/route"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
ptsdb "github.com/prometheus/prometheus/storage/tsdb"
|
||||
"github.com/prometheus/tsdb"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/netutil"
|
||||
|
||||
|
@ -48,10 +54,10 @@ import (
|
|||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/retrieval"
|
||||
"github.com/prometheus/prometheus/rules"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/template"
|
||||
"github.com/prometheus/prometheus/util/httputil"
|
||||
api_v1 "github.com/prometheus/prometheus/web/api/v1"
|
||||
apiv2 "github.com/prometheus/prometheus/web/api/v2"
|
||||
"github.com/prometheus/prometheus/web/ui"
|
||||
)
|
||||
|
||||
|
@ -63,13 +69,13 @@ type Handler struct {
|
|||
ruleManager *rules.Manager
|
||||
queryEngine *promql.Engine
|
||||
context context.Context
|
||||
tsdb *tsdb.DB
|
||||
storage storage.Storage
|
||||
notifier *notifier.Notifier
|
||||
|
||||
apiV1 *api_v1.API
|
||||
|
||||
router *route.Router
|
||||
listenErrCh chan error
|
||||
quitCh chan struct{}
|
||||
reloadCh chan chan error
|
||||
options *Options
|
||||
|
@ -108,7 +114,7 @@ type PrometheusVersion struct {
|
|||
// Options for the web Handler.
|
||||
type Options struct {
|
||||
Context context.Context
|
||||
Storage storage.Storage
|
||||
Storage *tsdb.DB
|
||||
QueryEngine *promql.Engine
|
||||
TargetManager *retrieval.TargetManager
|
||||
RuleManager *rules.Manager
|
||||
|
@ -121,6 +127,7 @@ type Options struct {
|
|||
MaxConnections int
|
||||
ExternalURL *url.URL
|
||||
RoutePrefix string
|
||||
MetricsPath string
|
||||
UseLocalAssets bool
|
||||
UserAssetsPath string
|
||||
ConsoleTemplatesPath string
|
||||
|
@ -139,7 +146,6 @@ func New(o *Options) *Handler {
|
|||
|
||||
h := &Handler{
|
||||
router: router,
|
||||
listenErrCh: make(chan error),
|
||||
quitCh: make(chan struct{}),
|
||||
reloadCh: make(chan chan error),
|
||||
options: o,
|
||||
|
@ -152,13 +158,14 @@ func New(o *Options) *Handler {
|
|||
targetManager: o.TargetManager,
|
||||
ruleManager: o.RuleManager,
|
||||
queryEngine: o.QueryEngine,
|
||||
storage: o.Storage,
|
||||
tsdb: o.Storage,
|
||||
storage: ptsdb.Adapter(o.Storage),
|
||||
notifier: o.Notifier,
|
||||
|
||||
apiV1: api_v1.NewAPI(o.QueryEngine, o.Storage, o.TargetManager, o.Notifier),
|
||||
now: model.Now,
|
||||
now: model.Now,
|
||||
}
|
||||
|
||||
h.apiV1 = api_v1.NewAPI(h.queryEngine, h.storage, h.targetManager, h.notifier)
|
||||
|
||||
if o.RoutePrefix != "/" {
|
||||
// If the prefix is missing for the root path, prepend it.
|
||||
router.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -171,7 +178,7 @@ func New(o *Options) *Handler {
|
|||
instrf := prometheus.InstrumentHandlerFunc
|
||||
|
||||
router.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, path.Join(o.ExternalURL.Path, "/graph"), http.StatusFound)
|
||||
router.Redirect(w, r, path.Join(o.ExternalURL.Path, "/graph"), http.StatusFound)
|
||||
})
|
||||
|
||||
router.Get("/alerts", instrf("alerts", h.alerts))
|
||||
|
@ -191,8 +198,6 @@ func New(o *Options) *Handler {
|
|||
Handler: http.HandlerFunc(h.federation),
|
||||
}))
|
||||
|
||||
h.apiV1.Register(router.WithPrefix("/api/v1"))
|
||||
|
||||
router.Get("/consoles/*filepath", instrf("consoles", h.consoles))
|
||||
|
||||
router.Get("/static/*filepath", instrf("static", serveStaticAsset))
|
||||
|
@ -217,6 +222,20 @@ func New(o *Options) *Handler {
|
|||
return h
|
||||
}
|
||||
|
||||
var corsHeaders = map[string]string{
|
||||
"Access-Control-Allow-Headers": "Accept, Authorization, Content-Type, Origin",
|
||||
"Access-Control-Allow-Methods": "GET, OPTIONS",
|
||||
"Access-Control-Allow-Origin": "*",
|
||||
"Access-Control-Expose-Headers": "Date",
|
||||
}
|
||||
|
||||
// Enables cross-site script calls.
|
||||
func setCORS(w http.ResponseWriter) {
|
||||
for h, v := range corsHeaders {
|
||||
w.Header().Set(h, v)
|
||||
}
|
||||
}
|
||||
|
||||
func serveStaticAsset(w http.ResponseWriter, req *http.Request) {
|
||||
fp := route.Param(req.Context(), "filepath")
|
||||
fp = filepath.Join("web/ui/static", fp)
|
||||
|
@ -239,11 +258,6 @@ func serveStaticAsset(w http.ResponseWriter, req *http.Request) {
|
|||
http.ServeContent(w, req, info.Name(), info.ModTime(), bytes.NewReader(file))
|
||||
}
|
||||
|
||||
// ListenError returns the receive-only channel that signals errors while starting the web server.
|
||||
func (h *Handler) ListenError() <-chan error {
|
||||
return h.listenErrCh
|
||||
}
|
||||
|
||||
// Quit returns the receive-only quit channel.
|
||||
func (h *Handler) Quit() <-chan struct{} {
|
||||
return h.quitCh
|
||||
|
@ -255,24 +269,73 @@ func (h *Handler) Reload() <-chan chan error {
|
|||
}
|
||||
|
||||
// Run serves the HTTP endpoints.
|
||||
func (h *Handler) Run() {
|
||||
func (h *Handler) Run(ctx context.Context) error {
|
||||
log.Infof("Listening on %s", h.options.ListenAddress)
|
||||
|
||||
l, err := net.Listen("tcp", h.options.ListenAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l = netutil.LimitListener(l, h.options.MaxConnections)
|
||||
|
||||
var (
|
||||
m = cmux.New(l)
|
||||
grpcl = m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc"))
|
||||
httpl = m.Match(cmux.HTTP1Fast())
|
||||
grpcSrv = grpc.NewServer()
|
||||
)
|
||||
av2 := apiv2.New(
|
||||
time.Now,
|
||||
h.options.Storage,
|
||||
h.options.QueryEngine,
|
||||
func(mint, maxt int64) storage.Querier {
|
||||
q, err := ptsdb.Adapter(h.options.Storage).Querier(mint, maxt)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return q
|
||||
},
|
||||
func() []*retrieval.Target {
|
||||
return h.options.TargetManager.Targets()
|
||||
},
|
||||
func() []*url.URL {
|
||||
return h.options.Notifier.Alertmanagers()
|
||||
},
|
||||
)
|
||||
av2.RegisterGRPC(grpcSrv)
|
||||
|
||||
hh, err := av2.HTTPHandler(grpcl.Addr().String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
operationName := nethttp.OperationNameFunc(func(r *http.Request) string {
|
||||
return fmt.Sprintf("%s %s", r.Method, r.URL.Path)
|
||||
})
|
||||
server := &http.Server{
|
||||
Addr: h.options.ListenAddress,
|
||||
Handler: nethttp.Middleware(opentracing.GlobalTracer(), h.router, operationName),
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/", h.router)
|
||||
|
||||
av1 := route.New()
|
||||
h.apiV1.Register(av1)
|
||||
mux.Handle("/api/v1/", http.StripPrefix("/api/v1", av1))
|
||||
|
||||
mux.Handle("/api/", http.StripPrefix("/api",
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
setCORS(w)
|
||||
hh.ServeHTTP(w, r)
|
||||
}),
|
||||
))
|
||||
|
||||
httpSrv := &http.Server{
|
||||
Handler: nethttp.Middleware(opentracing.GlobalTracer(), mux, operationName),
|
||||
ErrorLog: log.NewErrorLogger(),
|
||||
ReadTimeout: h.options.ReadTimeout,
|
||||
}
|
||||
listener, err := net.Listen("tcp", h.options.ListenAddress)
|
||||
if err != nil {
|
||||
h.listenErrCh <- err
|
||||
} else {
|
||||
limitedListener := netutil.LimitListener(listener, h.options.MaxConnections)
|
||||
h.listenErrCh <- server.Serve(limitedListener)
|
||||
}
|
||||
|
||||
go httpSrv.Serve(httpl)
|
||||
go grpcSrv.Serve(grpcl)
|
||||
|
||||
return m.Serve()
|
||||
}
|
||||
|
||||
func (h *Handler) alerts(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
Loading…
Reference in a new issue