mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Add extension point for returning different content types from API endpoints (#412)
* Add initial sketch of Codec interface. Signed-off-by: Charles Korn <charles.korn@grafana.com> * Introduce JSON codec. Signed-off-by: Charles Korn <charles.korn@grafana.com> * Expose Response type so that consuming applications (eg. Mimir) can implement their own Codecs. Signed-off-by: Charles Korn <charles.korn@grafana.com> * Add sketch of what supporting different codecs could look like. Signed-off-by: Charles Korn <charles.korn@grafana.com> * Rename fallbackCodec to defaultCodec. Signed-off-by: Charles Korn <charles.korn@grafana.com> * Remove defaultCodec as a field on API. Signed-off-by: Charles Korn <charles.korn@grafana.com> * Rename AddCodec() and clarify expected behaviour. Signed-off-by: Charles Korn <charles.korn@grafana.com> * Modify TestRespond to test JsonCodec directly. Signed-off-by: Charles Korn <charles.korn@grafana.com> * Refactor existing respond() test in preparation for content negotiation test cases. Signed-off-by: Charles Korn <charles.korn@grafana.com> * Add tests for content negotiation. Signed-off-by: Charles Korn <charles.korn@grafana.com> * Add missing documentation comments. Signed-off-by: Charles Korn <charles.korn@grafana.com> * Add another test case. Signed-off-by: Charles Korn <charles.korn@grafana.com> * Rename JsonCodec to JSONCodec. Signed-off-by: Charles Korn <charles.korn@grafana.com> * Fix linting issue. Signed-off-by: Charles Korn <charles.korn@grafana.com> * Fallback to JSON codec if no acceptable codec can be found for the Accept header. Signed-off-by: Charles Korn <charles.korn@grafana.com> * Move custom jsoniter code into json_codec.go. Signed-off-by: Charles Korn <charles.korn@grafana.com> --------- Signed-off-by: Charles Korn <charles.korn@grafana.com>
This commit is contained in:
parent
82cbf1dd0b
commit
d9063441c1
|
@ -27,7 +27,6 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
|
@ -40,8 +39,6 @@ import (
|
|||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/textparse"
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
|
@ -54,7 +51,6 @@ import (
|
|||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/tsdb/index"
|
||||
"github.com/prometheus/prometheus/util/httputil"
|
||||
"github.com/prometheus/prometheus/util/jsonutil"
|
||||
"github.com/prometheus/prometheus/util/stats"
|
||||
)
|
||||
|
||||
|
@ -84,6 +80,8 @@ const (
|
|||
|
||||
var LocalhostRepresentations = []string{"127.0.0.1", "localhost", "::1"}
|
||||
|
||||
var defaultCodec = JSONCodec{}
|
||||
|
||||
type apiError struct {
|
||||
typ errorType
|
||||
err error
|
||||
|
@ -149,7 +147,8 @@ type RuntimeInfo struct {
|
|||
StorageRetention string `json:"storageRetention"`
|
||||
}
|
||||
|
||||
type response struct {
|
||||
// Response contains a response to a HTTP API request.
|
||||
type Response struct {
|
||||
Status status `json:"status"`
|
||||
Data interface{} `json:"data,omitempty"`
|
||||
ErrorType errorType `json:"errorType,omitempty"`
|
||||
|
@ -212,13 +211,8 @@ type API struct {
|
|||
|
||||
remoteWriteHandler http.Handler
|
||||
remoteReadHandler http.Handler
|
||||
}
|
||||
|
||||
func init() {
|
||||
jsoniter.RegisterTypeEncoderFunc("promql.Series", marshalSeriesJSON, marshalSeriesJSONIsEmpty)
|
||||
jsoniter.RegisterTypeEncoderFunc("promql.Sample", marshalSampleJSON, marshalSampleJSONIsEmpty)
|
||||
jsoniter.RegisterTypeEncoderFunc("promql.Point", marshalPointJSON, marshalPointJSONIsEmpty)
|
||||
jsoniter.RegisterTypeEncoderFunc("exemplar.Exemplar", marshalExemplarJSON, marshalExemplarJSONEmpty)
|
||||
codecs map[string]Codec
|
||||
}
|
||||
|
||||
// NewAPI returns an initialized API type.
|
||||
|
@ -277,8 +271,12 @@ func NewAPI(
|
|||
statsRenderer: defaultStatsRenderer,
|
||||
|
||||
remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame),
|
||||
|
||||
codecs: map[string]Codec{},
|
||||
}
|
||||
|
||||
a.InstallCodec(defaultCodec)
|
||||
|
||||
if statsRenderer != nil {
|
||||
a.statsRenderer = statsRenderer
|
||||
}
|
||||
|
@ -290,6 +288,16 @@ func NewAPI(
|
|||
return a
|
||||
}
|
||||
|
||||
// InstallCodec adds codec to this API's available codecs.
|
||||
// If codec handles a content type handled by a codec already installed in this API, codec replaces the previous codec.
|
||||
func (api *API) InstallCodec(codec Codec) {
|
||||
if api.codecs == nil {
|
||||
api.codecs = map[string]Codec{}
|
||||
}
|
||||
|
||||
api.codecs[codec.ContentType()] = codec
|
||||
}
|
||||
|
||||
func setUnavailStatusOnTSDBNotReady(r apiFuncResult) apiFuncResult {
|
||||
if r.err != nil && errors.Cause(r.err.err) == tsdb.ErrNotReady {
|
||||
r.err.typ = errorUnavailable
|
||||
|
@ -312,7 +320,7 @@ func (api *API) Register(r *route.Router) {
|
|||
}
|
||||
|
||||
if result.data != nil {
|
||||
api.respond(w, result.data, result.warnings)
|
||||
api.respond(w, r, result.data, result.warnings)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
@ -1460,7 +1468,7 @@ func (api *API) serveWALReplayStatus(w http.ResponseWriter, r *http.Request) {
|
|||
if err != nil {
|
||||
api.respondError(w, &apiError{errorInternal, err}, nil)
|
||||
}
|
||||
api.respond(w, walReplayStatus{
|
||||
api.respond(w, r, walReplayStatus{
|
||||
Min: status.Min,
|
||||
Max: status.Max,
|
||||
Current: status.Current,
|
||||
|
@ -1562,34 +1570,59 @@ func (api *API) cleanTombstones(r *http.Request) apiFuncResult {
|
|||
return apiFuncResult{nil, nil, nil, nil}
|
||||
}
|
||||
|
||||
func (api *API) respond(w http.ResponseWriter, data interface{}, warnings storage.Warnings) {
|
||||
func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface{}, warnings storage.Warnings) {
|
||||
statusMessage := statusSuccess
|
||||
var warningStrings []string
|
||||
for _, warning := range warnings {
|
||||
warningStrings = append(warningStrings, warning.Error())
|
||||
}
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
b, err := json.Marshal(&response{
|
||||
|
||||
resp := &Response{
|
||||
Status: statusMessage,
|
||||
Data: data,
|
||||
Warnings: warningStrings,
|
||||
})
|
||||
}
|
||||
|
||||
codec := api.negotiateCodec(req, resp)
|
||||
b, err := codec.Encode(resp)
|
||||
if err != nil {
|
||||
level.Error(api.logger).Log("msg", "error marshaling json response", "err", err)
|
||||
level.Error(api.logger).Log("msg", "error marshaling response", "err", err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Content-Type", codec.ContentType())
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if n, err := w.Write(b); err != nil {
|
||||
level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: HTTP content negotiation is hard (see https://developer.mozilla.org/en-US/docs/Web/HTTP/Content_negotiation).
|
||||
// Ideally, we shouldn't be implementing this ourselves - https://github.com/golang/go/issues/19307 is an open proposal to add
|
||||
// this to the Go stdlib and has links to a number of other implementations.
|
||||
//
|
||||
// This is an initial MVP, and doesn't support features like wildcards or weighting.
|
||||
func (api *API) negotiateCodec(req *http.Request, resp *Response) Codec {
|
||||
acceptHeader := req.Header.Get("Accept")
|
||||
if acceptHeader == "" {
|
||||
return defaultCodec
|
||||
}
|
||||
|
||||
for _, contentType := range strings.Split(acceptHeader, ",") {
|
||||
codec, ok := api.codecs[strings.TrimSpace(contentType)]
|
||||
if ok && codec.CanEncode(resp) {
|
||||
return codec
|
||||
}
|
||||
}
|
||||
|
||||
level.Warn(api.logger).Log("msg", "could not find suitable codec for response, falling back to default codec", "accept_header", acceptHeader)
|
||||
return defaultCodec
|
||||
}
|
||||
|
||||
func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data interface{}) {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
b, err := json.Marshal(&response{
|
||||
b, err := json.Marshal(&Response{
|
||||
Status: statusError,
|
||||
ErrorType: apiErr.typ,
|
||||
Error: apiErr.err.Error(),
|
||||
|
@ -1696,247 +1729,3 @@ OUTER:
|
|||
}
|
||||
return matcherSets, nil
|
||||
}
|
||||
|
||||
// marshalSeriesJSON writes something like the following:
|
||||
//
|
||||
// {
|
||||
// "metric" : {
|
||||
// "__name__" : "up",
|
||||
// "job" : "prometheus",
|
||||
// "instance" : "localhost:9090"
|
||||
// },
|
||||
// "values": [
|
||||
// [ 1435781451.781, "1" ],
|
||||
// < more values>
|
||||
// ],
|
||||
// "histograms": [
|
||||
// [ 1435781451.781, { < histogram, see below > } ],
|
||||
// < more histograms >
|
||||
// ],
|
||||
// },
|
||||
func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||
s := *((*promql.Series)(ptr))
|
||||
stream.WriteObjectStart()
|
||||
stream.WriteObjectField(`metric`)
|
||||
m, err := s.Metric.MarshalJSON()
|
||||
if err != nil {
|
||||
stream.Error = err
|
||||
return
|
||||
}
|
||||
stream.SetBuffer(append(stream.Buffer(), m...))
|
||||
|
||||
// We make two passes through the series here: In the first marshaling
|
||||
// all value points, in the second marshaling all histogram
|
||||
// points. That's probably cheaper than just one pass in which we copy
|
||||
// out histogram Points into a newly allocated slice for separate
|
||||
// marshaling. (Could be benchmarked, though.)
|
||||
var foundValue, foundHistogram bool
|
||||
for _, p := range s.Points {
|
||||
if p.H == nil {
|
||||
stream.WriteMore()
|
||||
if !foundValue {
|
||||
stream.WriteObjectField(`values`)
|
||||
stream.WriteArrayStart()
|
||||
}
|
||||
foundValue = true
|
||||
marshalPointJSON(unsafe.Pointer(&p), stream)
|
||||
} else {
|
||||
foundHistogram = true
|
||||
}
|
||||
}
|
||||
if foundValue {
|
||||
stream.WriteArrayEnd()
|
||||
}
|
||||
if foundHistogram {
|
||||
firstHistogram := true
|
||||
for _, p := range s.Points {
|
||||
if p.H != nil {
|
||||
stream.WriteMore()
|
||||
if firstHistogram {
|
||||
stream.WriteObjectField(`histograms`)
|
||||
stream.WriteArrayStart()
|
||||
}
|
||||
firstHistogram = false
|
||||
marshalPointJSON(unsafe.Pointer(&p), stream)
|
||||
}
|
||||
}
|
||||
stream.WriteArrayEnd()
|
||||
}
|
||||
stream.WriteObjectEnd()
|
||||
}
|
||||
|
||||
func marshalSeriesJSONIsEmpty(ptr unsafe.Pointer) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// marshalSampleJSON writes something like the following for normal value samples:
|
||||
//
|
||||
// {
|
||||
// "metric" : {
|
||||
// "__name__" : "up",
|
||||
// "job" : "prometheus",
|
||||
// "instance" : "localhost:9090"
|
||||
// },
|
||||
// "value": [ 1435781451.781, "1" ]
|
||||
// },
|
||||
//
|
||||
// For histogram samples, it writes something like this:
|
||||
//
|
||||
// {
|
||||
// "metric" : {
|
||||
// "__name__" : "up",
|
||||
// "job" : "prometheus",
|
||||
// "instance" : "localhost:9090"
|
||||
// },
|
||||
// "histogram": [ 1435781451.781, { < histogram, see below > } ]
|
||||
// },
|
||||
func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||
s := *((*promql.Sample)(ptr))
|
||||
stream.WriteObjectStart()
|
||||
stream.WriteObjectField(`metric`)
|
||||
m, err := s.Metric.MarshalJSON()
|
||||
if err != nil {
|
||||
stream.Error = err
|
||||
return
|
||||
}
|
||||
stream.SetBuffer(append(stream.Buffer(), m...))
|
||||
stream.WriteMore()
|
||||
if s.Point.H == nil {
|
||||
stream.WriteObjectField(`value`)
|
||||
} else {
|
||||
stream.WriteObjectField(`histogram`)
|
||||
}
|
||||
marshalPointJSON(unsafe.Pointer(&s.Point), stream)
|
||||
stream.WriteObjectEnd()
|
||||
}
|
||||
|
||||
func marshalSampleJSONIsEmpty(ptr unsafe.Pointer) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// marshalPointJSON writes `[ts, "val"]`.
|
||||
func marshalPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||
p := *((*promql.Point)(ptr))
|
||||
stream.WriteArrayStart()
|
||||
jsonutil.MarshalTimestamp(p.T, stream)
|
||||
stream.WriteMore()
|
||||
if p.H == nil {
|
||||
jsonutil.MarshalValue(p.V, stream)
|
||||
} else {
|
||||
marshalHistogram(p.H, stream)
|
||||
}
|
||||
stream.WriteArrayEnd()
|
||||
}
|
||||
|
||||
func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// marshalHistogramJSON writes something like:
|
||||
//
|
||||
// {
|
||||
// "count": "42",
|
||||
// "sum": "34593.34",
|
||||
// "buckets": [
|
||||
// [ 3, "-0.25", "0.25", "3"],
|
||||
// [ 0, "0.25", "0.5", "12"],
|
||||
// [ 0, "0.5", "1", "21"],
|
||||
// [ 0, "2", "4", "6"]
|
||||
// ]
|
||||
// }
|
||||
//
|
||||
// The 1st element in each bucket array determines if the boundaries are
|
||||
// inclusive (AKA closed) or exclusive (AKA open):
|
||||
//
|
||||
// 0: lower exclusive, upper inclusive
|
||||
// 1: lower inclusive, upper exclusive
|
||||
// 2: both exclusive
|
||||
// 3: both inclusive
|
||||
//
|
||||
// The 2nd and 3rd elements are the lower and upper boundary. The 4th element is
|
||||
// the bucket count.
|
||||
func marshalHistogram(h *histogram.FloatHistogram, stream *jsoniter.Stream) {
|
||||
stream.WriteObjectStart()
|
||||
stream.WriteObjectField(`count`)
|
||||
jsonutil.MarshalValue(h.Count, stream)
|
||||
stream.WriteMore()
|
||||
stream.WriteObjectField(`sum`)
|
||||
jsonutil.MarshalValue(h.Sum, stream)
|
||||
|
||||
bucketFound := false
|
||||
it := h.AllBucketIterator()
|
||||
for it.Next() {
|
||||
bucket := it.At()
|
||||
if bucket.Count == 0 {
|
||||
continue // No need to expose empty buckets in JSON.
|
||||
}
|
||||
stream.WriteMore()
|
||||
if !bucketFound {
|
||||
stream.WriteObjectField(`buckets`)
|
||||
stream.WriteArrayStart()
|
||||
}
|
||||
bucketFound = true
|
||||
boundaries := 2 // Exclusive on both sides AKA open interval.
|
||||
if bucket.LowerInclusive {
|
||||
if bucket.UpperInclusive {
|
||||
boundaries = 3 // Inclusive on both sides AKA closed interval.
|
||||
} else {
|
||||
boundaries = 1 // Inclusive only on lower end AKA right open.
|
||||
}
|
||||
} else {
|
||||
if bucket.UpperInclusive {
|
||||
boundaries = 0 // Inclusive only on upper end AKA left open.
|
||||
}
|
||||
}
|
||||
stream.WriteArrayStart()
|
||||
stream.WriteInt(boundaries)
|
||||
stream.WriteMore()
|
||||
jsonutil.MarshalValue(bucket.Lower, stream)
|
||||
stream.WriteMore()
|
||||
jsonutil.MarshalValue(bucket.Upper, stream)
|
||||
stream.WriteMore()
|
||||
jsonutil.MarshalValue(bucket.Count, stream)
|
||||
stream.WriteArrayEnd()
|
||||
}
|
||||
if bucketFound {
|
||||
stream.WriteArrayEnd()
|
||||
}
|
||||
stream.WriteObjectEnd()
|
||||
}
|
||||
|
||||
// marshalExemplarJSON writes.
|
||||
//
|
||||
// {
|
||||
// labels: <labels>,
|
||||
// value: "<string>",
|
||||
// timestamp: <float>
|
||||
// }
|
||||
func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||
p := *((*exemplar.Exemplar)(ptr))
|
||||
stream.WriteObjectStart()
|
||||
|
||||
// "labels" key.
|
||||
stream.WriteObjectField(`labels`)
|
||||
lbls, err := p.Labels.MarshalJSON()
|
||||
if err != nil {
|
||||
stream.Error = err
|
||||
return
|
||||
}
|
||||
stream.SetBuffer(append(stream.Buffer(), lbls...))
|
||||
|
||||
// "value" key.
|
||||
stream.WriteMore()
|
||||
stream.WriteObjectField(`value`)
|
||||
jsonutil.MarshalValue(p.Value, stream)
|
||||
|
||||
// "timestamp" key.
|
||||
stream.WriteMore()
|
||||
stream.WriteObjectField(`timestamp`)
|
||||
jsonutil.MarshalTimestamp(p.Ts, stream)
|
||||
|
||||
stream.WriteObjectEnd()
|
||||
}
|
||||
|
||||
func marshalExemplarJSONEmpty(ptr unsafe.Pointer) bool {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
|
@ -30,7 +29,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/prometheus/prometheus/util/stats"
|
||||
|
||||
|
@ -2767,39 +2765,93 @@ func TestAdminEndpoints(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRespondSuccess(t *testing.T) {
|
||||
api := API{
|
||||
logger: log.NewNopLogger(),
|
||||
}
|
||||
|
||||
api.InstallCodec(&testCodec{contentType: "test/cannot-encode", canEncode: false})
|
||||
api.InstallCodec(&testCodec{contentType: "test/can-encode", canEncode: true})
|
||||
api.InstallCodec(&testCodec{contentType: "test/can-encode-2", canEncode: true})
|
||||
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
api := API{}
|
||||
api.respond(w, "test", nil)
|
||||
api.respond(w, r, "test", nil)
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
resp, err := http.Get(s.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("Error on test request: %s", err)
|
||||
}
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
defer resp.Body.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading response body: %s", err)
|
||||
}
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
acceptHeader string
|
||||
expectedContentType string
|
||||
expectedBody string
|
||||
}{
|
||||
{
|
||||
name: "no Accept header",
|
||||
expectedContentType: "application/json",
|
||||
expectedBody: `{"status":"success","data":"test"}`,
|
||||
},
|
||||
{
|
||||
name: "Accept header with single content type which is suitable",
|
||||
acceptHeader: "test/can-encode",
|
||||
expectedContentType: "test/can-encode",
|
||||
expectedBody: `response from test/can-encode codec`,
|
||||
},
|
||||
{
|
||||
name: "Accept header with single content type which is not available",
|
||||
acceptHeader: "test/not-registered",
|
||||
expectedContentType: "application/json",
|
||||
expectedBody: `{"status":"success","data":"test"}`,
|
||||
},
|
||||
{
|
||||
name: "Accept header with single content type which cannot encode the response payload",
|
||||
acceptHeader: "test/cannot-encode",
|
||||
expectedContentType: "application/json",
|
||||
expectedBody: `{"status":"success","data":"test"}`,
|
||||
},
|
||||
{
|
||||
name: "Accept header with multiple content types, all of which are suitable",
|
||||
acceptHeader: "test/can-encode, test/can-encode-2",
|
||||
expectedContentType: "test/can-encode",
|
||||
expectedBody: `response from test/can-encode codec`,
|
||||
},
|
||||
{
|
||||
name: "Accept header with multiple content types, only one of which is available",
|
||||
acceptHeader: "test/not-registered, test/can-encode",
|
||||
expectedContentType: "test/can-encode",
|
||||
expectedBody: `response from test/can-encode codec`,
|
||||
},
|
||||
{
|
||||
name: "Accept header with multiple content types, only one of which can encode the response payload",
|
||||
acceptHeader: "test/cannot-encode, test/can-encode",
|
||||
expectedContentType: "test/can-encode",
|
||||
expectedBody: `response from test/can-encode codec`,
|
||||
},
|
||||
{
|
||||
name: "Accept header with multiple content types, none of which are available",
|
||||
acceptHeader: "test/not-registered, test/also-not-registered",
|
||||
expectedContentType: "application/json",
|
||||
expectedBody: `{"status":"success","data":"test"}`,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest(http.MethodGet, s.URL, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
t.Fatalf("Return code %d expected in success response but got %d", 200, resp.StatusCode)
|
||||
}
|
||||
if h := resp.Header.Get("Content-Type"); h != "application/json" {
|
||||
t.Fatalf("Expected Content-Type %q but got %q", "application/json", h)
|
||||
}
|
||||
if tc.acceptHeader != "" {
|
||||
req.Header.Set("Accept", tc.acceptHeader)
|
||||
}
|
||||
|
||||
var res response
|
||||
if err = json.Unmarshal([]byte(body), &res); err != nil {
|
||||
t.Fatalf("Error unmarshaling JSON body: %s", err)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
exp := &response{
|
||||
Status: statusSuccess,
|
||||
Data: "test",
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
defer resp.Body.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
require.Equal(t, tc.expectedContentType, resp.Header.Get("Content-Type"))
|
||||
require.Equal(t, tc.expectedBody, string(body))
|
||||
})
|
||||
}
|
||||
require.Equal(t, exp, &res)
|
||||
}
|
||||
|
||||
func TestRespondError(t *testing.T) {
|
||||
|
@ -2826,12 +2878,12 @@ func TestRespondError(t *testing.T) {
|
|||
t.Fatalf("Expected Content-Type %q but got %q", "application/json", h)
|
||||
}
|
||||
|
||||
var res response
|
||||
var res Response
|
||||
if err = json.Unmarshal([]byte(body), &res); err != nil {
|
||||
t.Fatalf("Error unmarshaling JSON body: %s", err)
|
||||
}
|
||||
|
||||
exp := &response{
|
||||
exp := &Response{
|
||||
Status: statusError,
|
||||
Data: "test",
|
||||
ErrorType: errorTimeout,
|
||||
|
@ -3049,165 +3101,6 @@ func TestOptionsMethod(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestRespond(t *testing.T) {
|
||||
cases := []struct {
|
||||
response interface{}
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
response: &queryData{
|
||||
ResultType: parser.ValueTypeMatrix,
|
||||
Result: promql.Matrix{
|
||||
promql.Series{
|
||||
Points: []promql.Point{{V: 1, T: 1000}},
|
||||
Metric: labels.FromStrings("__name__", "foo"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"foo"},"values":[[1,"1"]]}]}}`,
|
||||
},
|
||||
{
|
||||
response: &queryData{
|
||||
ResultType: parser.ValueTypeMatrix,
|
||||
Result: promql.Matrix{
|
||||
promql.Series{
|
||||
Points: []promql.Point{{H: &histogram.FloatHistogram{
|
||||
Schema: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 12,
|
||||
Count: 10,
|
||||
Sum: 20,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 3, Length: 2},
|
||||
{Offset: 1, Length: 3},
|
||||
},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 2, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []float64{1, 2, 2, 1, 1},
|
||||
NegativeBuckets: []float64{2, 1},
|
||||
}, T: 1000}},
|
||||
Metric: labels.FromStrings("__name__", "foo"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"foo"},"histograms":[[1,{"count":"10","sum":"20","buckets":[[1,"-1.6817928305074288","-1.414213562373095","1"],[1,"-1.414213562373095","-1.189207115002721","2"],[3,"-0.001","0.001","12"],[0,"1.414213562373095","1.6817928305074288","1"],[0,"1.6817928305074288","2","2"],[0,"2.378414230005442","2.82842712474619","2"],[0,"2.82842712474619","3.3635856610148576","1"],[0,"3.3635856610148576","4","1"]]}]]}]}}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 0, T: 0},
|
||||
expected: `{"status":"success","data":[0,"0"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 20, T: 1},
|
||||
expected: `{"status":"success","data":[0.001,"20"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 20, T: 10},
|
||||
expected: `{"status":"success","data":[0.010,"20"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 20, T: 100},
|
||||
expected: `{"status":"success","data":[0.100,"20"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 20, T: 1001},
|
||||
expected: `{"status":"success","data":[1.001,"20"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 20, T: 1010},
|
||||
expected: `{"status":"success","data":[1.010,"20"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 20, T: 1100},
|
||||
expected: `{"status":"success","data":[1.100,"20"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 20, T: 12345678123456555},
|
||||
expected: `{"status":"success","data":[12345678123456.555,"20"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 20, T: -1},
|
||||
expected: `{"status":"success","data":[-0.001,"20"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: math.NaN(), T: 0},
|
||||
expected: `{"status":"success","data":[0,"NaN"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: math.Inf(1), T: 0},
|
||||
expected: `{"status":"success","data":[0,"+Inf"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: math.Inf(-1), T: 0},
|
||||
expected: `{"status":"success","data":[0,"-Inf"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 1.2345678e6, T: 0},
|
||||
expected: `{"status":"success","data":[0,"1234567.8"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 1.2345678e-6, T: 0},
|
||||
expected: `{"status":"success","data":[0,"0.0000012345678"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 1.2345678e-67, T: 0},
|
||||
expected: `{"status":"success","data":[0,"1.2345678e-67"]}`,
|
||||
},
|
||||
{
|
||||
response: []exemplar.QueryResult{
|
||||
{
|
||||
SeriesLabels: labels.FromStrings("foo", "bar"),
|
||||
Exemplars: []exemplar.Exemplar{
|
||||
{
|
||||
Labels: labels.FromStrings("traceID", "abc"),
|
||||
Value: 100.123,
|
||||
Ts: 1234,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: `{"status":"success","data":[{"seriesLabels":{"foo":"bar"},"exemplars":[{"labels":{"traceID":"abc"},"value":"100.123","timestamp":1.234}]}]}`,
|
||||
},
|
||||
{
|
||||
response: []exemplar.QueryResult{
|
||||
{
|
||||
SeriesLabels: labels.FromStrings("foo", "bar"),
|
||||
Exemplars: []exemplar.Exemplar{
|
||||
{
|
||||
Labels: labels.FromStrings("traceID", "abc"),
|
||||
Value: math.Inf(1),
|
||||
Ts: 1234,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: `{"status":"success","data":[{"seriesLabels":{"foo":"bar"},"exemplars":[{"labels":{"traceID":"abc"},"value":"+Inf","timestamp":1.234}]}]}`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
api := API{}
|
||||
api.respond(w, c.response, nil)
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
resp, err := http.Get(s.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("Error on test request: %s", err)
|
||||
}
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
defer resp.Body.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading response body: %s", err)
|
||||
}
|
||||
|
||||
if string(body) != c.expected {
|
||||
t.Fatalf("Expected response \n%v\n but got \n%v\n", c.expected, string(body))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTSDBStatus(t *testing.T) {
|
||||
tsdb := &fakeDB{}
|
||||
tsdbStatusAPI := func(api *API) apiFunc { return api.serveTSDBStatus }
|
||||
|
@ -3283,6 +3176,8 @@ var testResponseWriter = httptest.ResponseRecorder{}
|
|||
|
||||
func BenchmarkRespond(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
request, err := http.NewRequest(http.MethodGet, "/does-not-matter", nil)
|
||||
require.NoError(b, err)
|
||||
points := []promql.Point{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
points = append(points, promql.Point{V: float64(i * 1000000), T: int64(i)})
|
||||
|
@ -3299,7 +3194,7 @@ func BenchmarkRespond(b *testing.B) {
|
|||
b.ResetTimer()
|
||||
api := API{}
|
||||
for n := 0; n < b.N; n++ {
|
||||
api.respond(&testResponseWriter, response, nil)
|
||||
api.respond(&testResponseWriter, request, response, nil)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3410,3 +3305,20 @@ func TestGetGlobalURL(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testCodec struct {
|
||||
contentType string
|
||||
canEncode bool
|
||||
}
|
||||
|
||||
func (t *testCodec) ContentType() string {
|
||||
return t.contentType
|
||||
}
|
||||
|
||||
func (t *testCodec) CanEncode(_ *Response) bool {
|
||||
return t.canEncode
|
||||
}
|
||||
|
||||
func (t *testCodec) Encode(_ *Response) ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("response from %v codec", t.contentType)), nil
|
||||
}
|
||||
|
|
26
web/api/v1/codec.go
Normal file
26
web/api/v1/codec.go
Normal file
|
@ -0,0 +1,26 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
// A Codec performs encoding of API responses.
|
||||
type Codec interface {
|
||||
// ContentType returns the MIME time that this Codec emits.
|
||||
ContentType() string
|
||||
|
||||
// CanEncode determines if this Codec can encode resp.
|
||||
CanEncode(resp *Response) bool
|
||||
|
||||
// Encode encodes resp, ready for transmission to an API consumer.
|
||||
Encode(resp *Response) ([]byte, error)
|
||||
}
|
292
web/api/v1/json_codec.go
Normal file
292
web/api/v1/json_codec.go
Normal file
|
@ -0,0 +1,292 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/util/jsonutil"
|
||||
)
|
||||
|
||||
func init() {
|
||||
jsoniter.RegisterTypeEncoderFunc("promql.Series", marshalSeriesJSON, marshalSeriesJSONIsEmpty)
|
||||
jsoniter.RegisterTypeEncoderFunc("promql.Sample", marshalSampleJSON, marshalSampleJSONIsEmpty)
|
||||
jsoniter.RegisterTypeEncoderFunc("promql.Point", marshalPointJSON, marshalPointJSONIsEmpty)
|
||||
jsoniter.RegisterTypeEncoderFunc("exemplar.Exemplar", marshalExemplarJSON, marshalExemplarJSONEmpty)
|
||||
}
|
||||
|
||||
// JSONCodec is a Codec that encodes API responses as JSON.
|
||||
type JSONCodec struct{}
|
||||
|
||||
func (j JSONCodec) ContentType() string {
|
||||
return "application/json"
|
||||
}
|
||||
|
||||
func (j JSONCodec) CanEncode(_ *Response) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (j JSONCodec) Encode(resp *Response) ([]byte, error) {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
return json.Marshal(resp)
|
||||
}
|
||||
|
||||
// marshalSeriesJSON writes something like the following:
|
||||
//
|
||||
// {
|
||||
// "metric" : {
|
||||
// "__name__" : "up",
|
||||
// "job" : "prometheus",
|
||||
// "instance" : "localhost:9090"
|
||||
// },
|
||||
// "values": [
|
||||
// [ 1435781451.781, "1" ],
|
||||
// < more values>
|
||||
// ],
|
||||
// "histograms": [
|
||||
// [ 1435781451.781, { < histogram, see below > } ],
|
||||
// < more histograms >
|
||||
// ],
|
||||
// },
|
||||
func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||
s := *((*promql.Series)(ptr))
|
||||
stream.WriteObjectStart()
|
||||
stream.WriteObjectField(`metric`)
|
||||
m, err := s.Metric.MarshalJSON()
|
||||
if err != nil {
|
||||
stream.Error = err
|
||||
return
|
||||
}
|
||||
stream.SetBuffer(append(stream.Buffer(), m...))
|
||||
|
||||
// We make two passes through the series here: In the first marshaling
|
||||
// all value points, in the second marshaling all histogram
|
||||
// points. That's probably cheaper than just one pass in which we copy
|
||||
// out histogram Points into a newly allocated slice for separate
|
||||
// marshaling. (Could be benchmarked, though.)
|
||||
var foundValue, foundHistogram bool
|
||||
for _, p := range s.Points {
|
||||
if p.H == nil {
|
||||
stream.WriteMore()
|
||||
if !foundValue {
|
||||
stream.WriteObjectField(`values`)
|
||||
stream.WriteArrayStart()
|
||||
}
|
||||
foundValue = true
|
||||
marshalPointJSON(unsafe.Pointer(&p), stream)
|
||||
} else {
|
||||
foundHistogram = true
|
||||
}
|
||||
}
|
||||
if foundValue {
|
||||
stream.WriteArrayEnd()
|
||||
}
|
||||
if foundHistogram {
|
||||
firstHistogram := true
|
||||
for _, p := range s.Points {
|
||||
if p.H != nil {
|
||||
stream.WriteMore()
|
||||
if firstHistogram {
|
||||
stream.WriteObjectField(`histograms`)
|
||||
stream.WriteArrayStart()
|
||||
}
|
||||
firstHistogram = false
|
||||
marshalPointJSON(unsafe.Pointer(&p), stream)
|
||||
}
|
||||
}
|
||||
stream.WriteArrayEnd()
|
||||
}
|
||||
stream.WriteObjectEnd()
|
||||
}
|
||||
|
||||
func marshalSeriesJSONIsEmpty(ptr unsafe.Pointer) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// marshalSampleJSON writes something like the following for normal value samples:
|
||||
//
|
||||
// {
|
||||
// "metric" : {
|
||||
// "__name__" : "up",
|
||||
// "job" : "prometheus",
|
||||
// "instance" : "localhost:9090"
|
||||
// },
|
||||
// "value": [ 1435781451.781, "1" ]
|
||||
// },
|
||||
//
|
||||
// For histogram samples, it writes something like this:
|
||||
//
|
||||
// {
|
||||
// "metric" : {
|
||||
// "__name__" : "up",
|
||||
// "job" : "prometheus",
|
||||
// "instance" : "localhost:9090"
|
||||
// },
|
||||
// "histogram": [ 1435781451.781, { < histogram, see below > } ]
|
||||
// },
|
||||
func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||
s := *((*promql.Sample)(ptr))
|
||||
stream.WriteObjectStart()
|
||||
stream.WriteObjectField(`metric`)
|
||||
m, err := s.Metric.MarshalJSON()
|
||||
if err != nil {
|
||||
stream.Error = err
|
||||
return
|
||||
}
|
||||
stream.SetBuffer(append(stream.Buffer(), m...))
|
||||
stream.WriteMore()
|
||||
if s.Point.H == nil {
|
||||
stream.WriteObjectField(`value`)
|
||||
} else {
|
||||
stream.WriteObjectField(`histogram`)
|
||||
}
|
||||
marshalPointJSON(unsafe.Pointer(&s.Point), stream)
|
||||
stream.WriteObjectEnd()
|
||||
}
|
||||
|
||||
func marshalSampleJSONIsEmpty(ptr unsafe.Pointer) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// marshalPointJSON writes `[ts, "val"]`.
|
||||
func marshalPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||
p := *((*promql.Point)(ptr))
|
||||
stream.WriteArrayStart()
|
||||
jsonutil.MarshalTimestamp(p.T, stream)
|
||||
stream.WriteMore()
|
||||
if p.H == nil {
|
||||
jsonutil.MarshalValue(p.V, stream)
|
||||
} else {
|
||||
marshalHistogram(p.H, stream)
|
||||
}
|
||||
stream.WriteArrayEnd()
|
||||
}
|
||||
|
||||
func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// marshalHistogramJSON writes something like:
|
||||
//
|
||||
// {
|
||||
// "count": "42",
|
||||
// "sum": "34593.34",
|
||||
// "buckets": [
|
||||
// [ 3, "-0.25", "0.25", "3"],
|
||||
// [ 0, "0.25", "0.5", "12"],
|
||||
// [ 0, "0.5", "1", "21"],
|
||||
// [ 0, "2", "4", "6"]
|
||||
// ]
|
||||
// }
|
||||
//
|
||||
// The 1st element in each bucket array determines if the boundaries are
|
||||
// inclusive (AKA closed) or exclusive (AKA open):
|
||||
//
|
||||
// 0: lower exclusive, upper inclusive
|
||||
// 1: lower inclusive, upper exclusive
|
||||
// 2: both exclusive
|
||||
// 3: both inclusive
|
||||
//
|
||||
// The 2nd and 3rd elements are the lower and upper boundary. The 4th element is
|
||||
// the bucket count.
|
||||
func marshalHistogram(h *histogram.FloatHistogram, stream *jsoniter.Stream) {
|
||||
stream.WriteObjectStart()
|
||||
stream.WriteObjectField(`count`)
|
||||
jsonutil.MarshalValue(h.Count, stream)
|
||||
stream.WriteMore()
|
||||
stream.WriteObjectField(`sum`)
|
||||
jsonutil.MarshalValue(h.Sum, stream)
|
||||
|
||||
bucketFound := false
|
||||
it := h.AllBucketIterator()
|
||||
for it.Next() {
|
||||
bucket := it.At()
|
||||
if bucket.Count == 0 {
|
||||
continue // No need to expose empty buckets in JSON.
|
||||
}
|
||||
stream.WriteMore()
|
||||
if !bucketFound {
|
||||
stream.WriteObjectField(`buckets`)
|
||||
stream.WriteArrayStart()
|
||||
}
|
||||
bucketFound = true
|
||||
boundaries := 2 // Exclusive on both sides AKA open interval.
|
||||
if bucket.LowerInclusive {
|
||||
if bucket.UpperInclusive {
|
||||
boundaries = 3 // Inclusive on both sides AKA closed interval.
|
||||
} else {
|
||||
boundaries = 1 // Inclusive only on lower end AKA right open.
|
||||
}
|
||||
} else {
|
||||
if bucket.UpperInclusive {
|
||||
boundaries = 0 // Inclusive only on upper end AKA left open.
|
||||
}
|
||||
}
|
||||
stream.WriteArrayStart()
|
||||
stream.WriteInt(boundaries)
|
||||
stream.WriteMore()
|
||||
jsonutil.MarshalValue(bucket.Lower, stream)
|
||||
stream.WriteMore()
|
||||
jsonutil.MarshalValue(bucket.Upper, stream)
|
||||
stream.WriteMore()
|
||||
jsonutil.MarshalValue(bucket.Count, stream)
|
||||
stream.WriteArrayEnd()
|
||||
}
|
||||
if bucketFound {
|
||||
stream.WriteArrayEnd()
|
||||
}
|
||||
stream.WriteObjectEnd()
|
||||
}
|
||||
|
||||
// marshalExemplarJSON writes.
|
||||
//
|
||||
// {
|
||||
// labels: <labels>,
|
||||
// value: "<string>",
|
||||
// timestamp: <float>
|
||||
// }
|
||||
func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||
p := *((*exemplar.Exemplar)(ptr))
|
||||
stream.WriteObjectStart()
|
||||
|
||||
// "labels" key.
|
||||
stream.WriteObjectField(`labels`)
|
||||
lbls, err := p.Labels.MarshalJSON()
|
||||
if err != nil {
|
||||
stream.Error = err
|
||||
return
|
||||
}
|
||||
stream.SetBuffer(append(stream.Buffer(), lbls...))
|
||||
|
||||
// "value" key.
|
||||
stream.WriteMore()
|
||||
stream.WriteObjectField(`value`)
|
||||
jsonutil.MarshalValue(p.Value, stream)
|
||||
|
||||
// "timestamp" key.
|
||||
stream.WriteMore()
|
||||
stream.WriteObjectField(`timestamp`)
|
||||
jsonutil.MarshalTimestamp(p.Ts, stream)
|
||||
|
||||
stream.WriteObjectEnd()
|
||||
}
|
||||
|
||||
func marshalExemplarJSONEmpty(ptr unsafe.Pointer) bool {
|
||||
return false
|
||||
}
|
178
web/api/v1/json_codec_test.go
Normal file
178
web/api/v1/json_codec_test.go
Normal file
|
@ -0,0 +1,178 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
)
|
||||
|
||||
func TestJsonCodec_Encode(t *testing.T) {
|
||||
cases := []struct {
|
||||
response interface{}
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
response: &queryData{
|
||||
ResultType: parser.ValueTypeMatrix,
|
||||
Result: promql.Matrix{
|
||||
promql.Series{
|
||||
Points: []promql.Point{{V: 1, T: 1000}},
|
||||
Metric: labels.FromStrings("__name__", "foo"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"foo"},"values":[[1,"1"]]}]}}`,
|
||||
},
|
||||
{
|
||||
response: &queryData{
|
||||
ResultType: parser.ValueTypeMatrix,
|
||||
Result: promql.Matrix{
|
||||
promql.Series{
|
||||
Points: []promql.Point{{H: &histogram.FloatHistogram{
|
||||
Schema: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 12,
|
||||
Count: 10,
|
||||
Sum: 20,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 3, Length: 2},
|
||||
{Offset: 1, Length: 3},
|
||||
},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 2, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []float64{1, 2, 2, 1, 1},
|
||||
NegativeBuckets: []float64{2, 1},
|
||||
}, T: 1000}},
|
||||
Metric: labels.FromStrings("__name__", "foo"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"foo"},"histograms":[[1,{"count":"10","sum":"20","buckets":[[1,"-1.6817928305074288","-1.414213562373095","1"],[1,"-1.414213562373095","-1.189207115002721","2"],[3,"-0.001","0.001","12"],[0,"1.414213562373095","1.6817928305074288","1"],[0,"1.6817928305074288","2","2"],[0,"2.378414230005442","2.82842712474619","2"],[0,"2.82842712474619","3.3635856610148576","1"],[0,"3.3635856610148576","4","1"]]}]]}]}}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 0, T: 0},
|
||||
expected: `{"status":"success","data":[0,"0"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 20, T: 1},
|
||||
expected: `{"status":"success","data":[0.001,"20"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 20, T: 10},
|
||||
expected: `{"status":"success","data":[0.010,"20"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 20, T: 100},
|
||||
expected: `{"status":"success","data":[0.100,"20"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 20, T: 1001},
|
||||
expected: `{"status":"success","data":[1.001,"20"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 20, T: 1010},
|
||||
expected: `{"status":"success","data":[1.010,"20"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 20, T: 1100},
|
||||
expected: `{"status":"success","data":[1.100,"20"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 20, T: 12345678123456555},
|
||||
expected: `{"status":"success","data":[12345678123456.555,"20"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 20, T: -1},
|
||||
expected: `{"status":"success","data":[-0.001,"20"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: math.NaN(), T: 0},
|
||||
expected: `{"status":"success","data":[0,"NaN"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: math.Inf(1), T: 0},
|
||||
expected: `{"status":"success","data":[0,"+Inf"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: math.Inf(-1), T: 0},
|
||||
expected: `{"status":"success","data":[0,"-Inf"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 1.2345678e6, T: 0},
|
||||
expected: `{"status":"success","data":[0,"1234567.8"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 1.2345678e-6, T: 0},
|
||||
expected: `{"status":"success","data":[0,"0.0000012345678"]}`,
|
||||
},
|
||||
{
|
||||
response: promql.Point{V: 1.2345678e-67, T: 0},
|
||||
expected: `{"status":"success","data":[0,"1.2345678e-67"]}`,
|
||||
},
|
||||
{
|
||||
response: []exemplar.QueryResult{
|
||||
{
|
||||
SeriesLabels: labels.FromStrings("foo", "bar"),
|
||||
Exemplars: []exemplar.Exemplar{
|
||||
{
|
||||
Labels: labels.FromStrings("traceID", "abc"),
|
||||
Value: 100.123,
|
||||
Ts: 1234,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: `{"status":"success","data":[{"seriesLabels":{"foo":"bar"},"exemplars":[{"labels":{"traceID":"abc"},"value":"100.123","timestamp":1.234}]}]}`,
|
||||
},
|
||||
{
|
||||
response: []exemplar.QueryResult{
|
||||
{
|
||||
SeriesLabels: labels.FromStrings("foo", "bar"),
|
||||
Exemplars: []exemplar.Exemplar{
|
||||
{
|
||||
Labels: labels.FromStrings("traceID", "abc"),
|
||||
Value: math.Inf(1),
|
||||
Ts: 1234,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: `{"status":"success","data":[{"seriesLabels":{"foo":"bar"},"exemplars":[{"labels":{"traceID":"abc"},"value":"+Inf","timestamp":1.234}]}]}`,
|
||||
},
|
||||
}
|
||||
|
||||
codec := JSONCodec{}
|
||||
|
||||
for _, c := range cases {
|
||||
body, err := codec.Encode(&Response{
|
||||
Status: statusSuccess,
|
||||
Data: c.response,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error encoding response body: %s", err)
|
||||
}
|
||||
|
||||
if string(body) != c.expected {
|
||||
t.Fatalf("Expected response \n%v\n but got \n%v\n", c.expected, string(body))
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue