2016-04-13 07:08:22 -07:00
|
|
|
// Copyright 2016 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2015-06-04 09:07:57 -07:00
|
|
|
package v1
|
|
|
|
|
|
|
|
import (
|
2017-10-24 21:21:42 -07:00
|
|
|
"context"
|
2015-06-04 09:07:57 -07:00
|
|
|
"fmt"
|
2016-12-24 15:37:46 -08:00
|
|
|
"math"
|
2017-12-02 21:07:05 -08:00
|
|
|
"math/rand"
|
2015-06-04 09:07:57 -07:00
|
|
|
"net/http"
|
2017-04-24 22:42:33 -07:00
|
|
|
"net/url"
|
2017-12-02 21:07:05 -08:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2019-01-17 07:01:06 -08:00
|
|
|
"regexp"
|
2017-10-26 03:44:49 -07:00
|
|
|
"sort"
|
2015-06-04 09:07:57 -07:00
|
|
|
"strconv"
|
|
|
|
"time"
|
2018-02-07 04:27:57 -08:00
|
|
|
"unsafe"
|
2015-06-04 09:07:57 -07:00
|
|
|
|
2018-07-06 10:44:45 -07:00
|
|
|
"github.com/go-kit/kit/log"
|
|
|
|
"github.com/go-kit/kit/log/level"
|
2018-02-07 04:27:57 -08:00
|
|
|
jsoniter "github.com/json-iterator/go"
|
2019-03-25 16:01:12 -07:00
|
|
|
"github.com/pkg/errors"
|
2018-10-10 16:09:08 -07:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2015-08-20 08:18:46 -07:00
|
|
|
"github.com/prometheus/common/model"
|
2015-09-24 08:07:11 -07:00
|
|
|
"github.com/prometheus/common/route"
|
2019-08-13 01:34:14 -07:00
|
|
|
tsdbLabels "github.com/prometheus/prometheus/tsdb/labels"
|
2015-06-04 09:07:57 -07:00
|
|
|
|
2017-05-11 08:09:24 -07:00
|
|
|
"github.com/prometheus/prometheus/config"
|
2018-09-25 12:07:34 -07:00
|
|
|
"github.com/prometheus/prometheus/pkg/gate"
|
2016-12-29 00:27:30 -08:00
|
|
|
"github.com/prometheus/prometheus/pkg/labels"
|
2018-05-18 00:32:11 -07:00
|
|
|
"github.com/prometheus/prometheus/pkg/textparse"
|
2016-12-30 01:43:44 -08:00
|
|
|
"github.com/prometheus/prometheus/pkg/timestamp"
|
2017-10-23 13:28:17 -07:00
|
|
|
"github.com/prometheus/prometheus/prompb"
|
2015-06-04 09:07:57 -07:00
|
|
|
"github.com/prometheus/prometheus/promql"
|
2018-03-25 09:50:34 -07:00
|
|
|
"github.com/prometheus/prometheus/rules"
|
2018-02-01 01:55:07 -08:00
|
|
|
"github.com/prometheus/prometheus/scrape"
|
2016-12-29 00:27:30 -08:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2017-10-23 13:28:17 -07:00
|
|
|
"github.com/prometheus/prometheus/storage/remote"
|
2015-09-17 05:49:50 -07:00
|
|
|
"github.com/prometheus/prometheus/util/httputil"
|
2017-02-08 03:58:40 -08:00
|
|
|
"github.com/prometheus/prometheus/util/stats"
|
2015-06-04 09:07:57 -07:00
|
|
|
)
|
|
|
|
|
2018-10-10 16:09:08 -07:00
|
|
|
const (
|
|
|
|
namespace = "prometheus"
|
|
|
|
subsystem = "api"
|
|
|
|
)
|
|
|
|
|
2015-06-04 09:07:57 -07:00
|
|
|
type status string
|
|
|
|
|
|
|
|
const (
|
|
|
|
statusSuccess status = "success"
|
2017-12-02 05:52:43 -08:00
|
|
|
statusError status = "error"
|
2015-06-04 09:07:57 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
type errorType string
|
|
|
|
|
|
|
|
const (
|
2017-12-02 21:07:05 -08:00
|
|
|
errorNone errorType = ""
|
2017-12-03 10:46:10 -08:00
|
|
|
errorTimeout errorType = "timeout"
|
|
|
|
errorCanceled errorType = "canceled"
|
|
|
|
errorExec errorType = "execution"
|
|
|
|
errorBadData errorType = "bad_data"
|
|
|
|
errorInternal errorType = "internal"
|
|
|
|
errorUnavailable errorType = "unavailable"
|
2018-05-18 00:32:11 -07:00
|
|
|
errorNotFound errorType = "not_found"
|
2015-06-04 09:07:57 -07:00
|
|
|
)
|
|
|
|
|
2018-10-10 16:09:08 -07:00
|
|
|
var remoteReadQueries = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "remote_read_queries",
|
|
|
|
Help: "The current number of remote read queries being executed or waiting.",
|
|
|
|
})
|
|
|
|
|
2015-06-04 09:07:57 -07:00
|
|
|
type apiError struct {
|
|
|
|
typ errorType
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *apiError) Error() string {
|
|
|
|
return fmt.Sprintf("%s: %s", e.typ, e.err)
|
|
|
|
}
|
|
|
|
|
2016-12-02 04:31:43 -08:00
|
|
|
type targetRetriever interface {
|
2018-09-26 02:20:56 -07:00
|
|
|
TargetsActive() map[string][]*scrape.Target
|
|
|
|
TargetsDropped() map[string][]*scrape.Target
|
2016-12-02 04:31:43 -08:00
|
|
|
}
|
|
|
|
|
2017-01-13 01:20:11 -08:00
|
|
|
type alertmanagerRetriever interface {
|
2017-04-24 22:42:33 -07:00
|
|
|
Alertmanagers() []*url.URL
|
2018-02-21 01:00:07 -08:00
|
|
|
DroppedAlertmanagers() []*url.URL
|
2016-12-02 04:31:43 -08:00
|
|
|
}
|
|
|
|
|
2018-06-27 00:15:17 -07:00
|
|
|
type rulesRetriever interface {
|
2018-03-25 09:50:34 -07:00
|
|
|
RuleGroups() []*rules.Group
|
2018-06-27 00:15:17 -07:00
|
|
|
AlertingRules() []*rules.AlertingRule
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
|
|
|
|
2015-06-04 09:07:57 -07:00
|
|
|
type response struct {
|
|
|
|
Status status `json:"status"`
|
|
|
|
Data interface{} `json:"data,omitempty"`
|
|
|
|
ErrorType errorType `json:"errorType,omitempty"`
|
|
|
|
Error string `json:"error,omitempty"`
|
2018-11-30 06:27:12 -08:00
|
|
|
Warnings []string `json:"warnings,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type apiFuncResult struct {
|
|
|
|
data interface{}
|
|
|
|
err *apiError
|
|
|
|
warnings storage.Warnings
|
|
|
|
finalizer func()
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
type apiFunc func(r *http.Request) apiFuncResult
|
2015-11-11 11:46:57 -08:00
|
|
|
|
2018-11-15 05:22:16 -08:00
|
|
|
// TSDBAdmin defines the tsdb interfaces used by the v1 API for admin operations.
|
|
|
|
type TSDBAdmin interface {
|
|
|
|
CleanTombstones() error
|
|
|
|
Delete(mint, maxt int64, ms ...tsdbLabels.Matcher) error
|
|
|
|
Dir() string
|
|
|
|
Snapshot(dir string, withHead bool) error
|
|
|
|
}
|
|
|
|
|
2015-06-04 09:07:57 -07:00
|
|
|
// API can register a set of endpoints in a router and handle
|
|
|
|
// them using the provided storage and query engine.
|
|
|
|
type API struct {
|
2018-01-09 08:44:23 -08:00
|
|
|
Queryable storage.Queryable
|
2015-06-04 09:07:57 -07:00
|
|
|
QueryEngine *promql.Engine
|
2015-06-08 12:19:52 -07:00
|
|
|
|
2017-01-13 01:20:11 -08:00
|
|
|
targetRetriever targetRetriever
|
|
|
|
alertmanagerRetriever alertmanagerRetriever
|
2018-06-27 00:15:17 -07:00
|
|
|
rulesRetriever rulesRetriever
|
2018-03-25 09:50:34 -07:00
|
|
|
now func() time.Time
|
|
|
|
config func() config.Config
|
|
|
|
flagsMap map[string]string
|
|
|
|
ready func(http.HandlerFunc) http.HandlerFunc
|
2017-12-02 21:07:05 -08:00
|
|
|
|
2019-08-19 13:16:10 -07:00
|
|
|
db func() TSDBAdmin
|
|
|
|
enableAdmin bool
|
|
|
|
logger log.Logger
|
|
|
|
remoteReadSampleLimit int
|
|
|
|
remoteReadMaxBytesInFrame int
|
|
|
|
remoteReadGate *gate.Gate
|
|
|
|
CORSOrigin *regexp.Regexp
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2018-10-10 16:09:08 -07:00
|
|
|
func init() {
|
|
|
|
jsoniter.RegisterTypeEncoderFunc("promql.Point", marshalPointJSON, marshalPointJSONIsEmpty)
|
|
|
|
prometheus.MustRegister(remoteReadQueries)
|
|
|
|
}
|
|
|
|
|
2015-11-11 11:46:57 -08:00
|
|
|
// NewAPI returns an initialized API type.
|
2017-10-06 08:20:20 -07:00
|
|
|
func NewAPI(
|
|
|
|
qe *promql.Engine,
|
2018-01-09 08:44:23 -08:00
|
|
|
q storage.Queryable,
|
2017-10-06 08:20:20 -07:00
|
|
|
tr targetRetriever,
|
|
|
|
ar alertmanagerRetriever,
|
|
|
|
configFunc func() config.Config,
|
api: Added v1/status/flags endpoint. (#3864)
Endpoint URL: /api/v1/status/flags
Example Output:
```json
{
"status": "success",
"data": {
"alertmanager.notification-queue-capacity": "10000",
"alertmanager.timeout": "10s",
"completion-bash": "false",
"completion-script-bash": "false",
"completion-script-zsh": "false",
"config.file": "my_cool_prometheus.yaml",
"help": "false",
"help-long": "false",
"help-man": "false",
"log.level": "info",
"query.lookback-delta": "5m",
"query.max-concurrency": "20",
"query.timeout": "2m",
"storage.tsdb.max-block-duration": "36h",
"storage.tsdb.min-block-duration": "2h",
"storage.tsdb.no-lockfile": "false",
"storage.tsdb.path": "data/",
"storage.tsdb.retention": "15d",
"version": "false",
"web.console.libraries": "console_libraries",
"web.console.templates": "consoles",
"web.enable-admin-api": "false",
"web.enable-lifecycle": "false",
"web.external-url": "",
"web.listen-address": "0.0.0.0:9090",
"web.max-connections": "512",
"web.read-timeout": "5m",
"web.route-prefix": "/",
"web.user-assets": ""
}
}
```
Signed-off-by: Bartek Plotka <bwplotka@gmail.com>
2018-02-21 00:49:02 -08:00
|
|
|
flagsMap map[string]string,
|
2017-10-06 08:20:20 -07:00
|
|
|
readyFunc func(http.HandlerFunc) http.HandlerFunc,
|
2018-11-15 05:22:16 -08:00
|
|
|
db func() TSDBAdmin,
|
2017-12-02 21:07:05 -08:00
|
|
|
enableAdmin bool,
|
2018-07-06 10:44:45 -07:00
|
|
|
logger log.Logger,
|
2018-06-27 00:15:17 -07:00
|
|
|
rr rulesRetriever,
|
2018-09-25 12:07:34 -07:00
|
|
|
remoteReadSampleLimit int,
|
|
|
|
remoteReadConcurrencyLimit int,
|
2019-08-19 13:16:10 -07:00
|
|
|
remoteReadMaxBytesInFrame int,
|
2019-01-17 07:01:06 -08:00
|
|
|
CORSOrigin *regexp.Regexp,
|
2017-10-06 08:20:20 -07:00
|
|
|
) *API {
|
2015-11-11 11:46:57 -08:00
|
|
|
return &API{
|
2017-01-13 01:20:11 -08:00
|
|
|
QueryEngine: qe,
|
2017-07-06 05:38:40 -07:00
|
|
|
Queryable: q,
|
2017-01-13 01:20:11 -08:00
|
|
|
targetRetriever: tr,
|
|
|
|
alertmanagerRetriever: ar,
|
2018-09-05 06:50:50 -07:00
|
|
|
|
2019-08-19 13:16:10 -07:00
|
|
|
now: time.Now,
|
|
|
|
config: configFunc,
|
|
|
|
flagsMap: flagsMap,
|
|
|
|
ready: readyFunc,
|
|
|
|
db: db,
|
|
|
|
enableAdmin: enableAdmin,
|
|
|
|
rulesRetriever: rr,
|
|
|
|
remoteReadSampleLimit: remoteReadSampleLimit,
|
|
|
|
remoteReadGate: gate.New(remoteReadConcurrencyLimit),
|
|
|
|
remoteReadMaxBytesInFrame: remoteReadMaxBytesInFrame,
|
|
|
|
logger: logger,
|
|
|
|
CORSOrigin: CORSOrigin,
|
2015-11-11 11:46:57 -08:00
|
|
|
}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Register the API's endpoints in the given router.
|
|
|
|
func (api *API) Register(r *route.Router) {
|
2018-03-21 01:16:16 -07:00
|
|
|
wrap := func(f apiFunc) http.HandlerFunc {
|
2015-09-17 05:49:50 -07:00
|
|
|
hf := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
2019-01-17 07:01:06 -08:00
|
|
|
httputil.SetCORS(w, api.CORSOrigin, r)
|
2018-11-30 06:27:12 -08:00
|
|
|
result := f(r)
|
|
|
|
if result.err != nil {
|
|
|
|
api.respondError(w, result.err, result.data)
|
|
|
|
} else if result.data != nil {
|
|
|
|
api.respond(w, result.data, result.warnings)
|
2016-01-25 16:32:46 -08:00
|
|
|
} else {
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
2018-11-30 06:27:12 -08:00
|
|
|
if result.finalizer != nil {
|
|
|
|
result.finalizer()
|
Optimise PromQL (#3966)
* Move range logic to 'eval'
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make aggregegate range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* PromQL is statically typed, so don't eval to find the type.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Extend rangewrapper to multiple exprs
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Start making function evaluation ranged
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make instant queries a special case of range queries
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Eliminate evalString
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Evaluate range vector functions one series at a time
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make unary operators range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make binops range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Pass time to range-aware functions.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make simple _over_time functions range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Reduce allocs when working with matrix selectors
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Add basic benchmark for range evaluation
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Reuse objects for function arguments
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Do dropmetricname and allocating output vector only once.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Add range-aware support for range vector functions with params
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Optimise holt_winters, cut cpu and allocs by ~25%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make rate&friends range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make more functions range aware. Document calling convention.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make date functions range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make simple math functions range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Convert more functions to be range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make more functions range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Specialcase timestamp() with vector selector arg for range awareness
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Remove transition code for functions
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Remove the rest of the engine transition code
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Remove more obselete code
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Remove the last uses of the eval* functions
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Remove engine finalizers to prevent corruption
The finalizers set by matrixSelector were being called
just before the value they were retruning to the pool
was then being provided to the caller. Thus a concurrent query
could corrupt the data that the user has just been returned.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Add new benchmark suite for range functinos
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Migrate existing benchmarks to new system
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Expand promql benchmarks
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Simply test by removing unused range code
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* When testing instant queries, check range queries too.
To protect against subsequent steps in a range query being
affected by the previous steps, add a test that evaluates
an instant query that we know works again as a range query
with the tiimestamp we care about not being the first step.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Reuse ring for matrix iters. Put query results back in pool.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Reuse buffer when iterating over matrix selectors
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Unary minus should remove metric name
Cut down benchmarks for faster runs.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Reduce repetition in benchmark test cases
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Work series by series when doing normal vectorSelectors
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Optimise benchmark setup, cuts time by 60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Have rangeWrapper use an evalNodeHelper to cache across steps
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Use evalNodeHelper with functions
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Cache dropMetricName within a node evaluation.
This saves both the calculations and allocs done by dropMetricName
across steps.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Reuse input vectors in rangewrapper
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Reuse the point slices in the matrixes input/output by rangeWrapper
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make benchmark setup faster using AddFast
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Simplify benchmark code.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Add caching in VectorBinop
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Use xor to have one-level resultMetric hash key
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Add more benchmarks
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Call Query.Close in apiv1
This allows point slices allocated for the response data
to be reused by later queries, saving allocations.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Optimise histogram_quantile
It's now 5-10% faster with 97% less garbage generated for 1k steps
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make the input collection in rangeVector linear rather than quadratic
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Optimise label_replace, for 1k steps 15x fewer allocs and 3x faster
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Optimise label_join, 1.8x faster and 11x less memory for 1k steps
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Expand benchmarks, cleanup comments, simplify numSteps logic.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Address Fabian's comments
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Comments from Alin.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Address jrv's comments
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Remove dead code
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Address Simon's comments.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Rename populateIterators, pre-init some sizes
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Handle case where function has non-matrix args first
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Split rangeWrapper out to rangeEval function, improve comments
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Cleanup and make things more consistent
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make EvalNodeHelper public
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Fabian's comments.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2018-06-04 06:47:45 -07:00
|
|
|
}
|
2015-06-04 09:07:57 -07:00
|
|
|
})
|
2018-03-21 01:16:16 -07:00
|
|
|
return api.ready(httputil.CompressionHandler{
|
2015-09-18 07:51:53 -07:00
|
|
|
Handler: hf,
|
2018-03-21 01:16:16 -07:00
|
|
|
}.ServeHTTP)
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2018-03-21 01:16:16 -07:00
|
|
|
r.Options("/*path", wrap(api.options))
|
2016-01-25 16:32:46 -08:00
|
|
|
|
2018-03-21 01:16:16 -07:00
|
|
|
r.Get("/query", wrap(api.query))
|
|
|
|
r.Post("/query", wrap(api.query))
|
|
|
|
r.Get("/query_range", wrap(api.queryRange))
|
|
|
|
r.Post("/query_range", wrap(api.queryRange))
|
2015-06-04 09:07:57 -07:00
|
|
|
|
2018-11-19 02:21:14 -08:00
|
|
|
r.Get("/labels", wrap(api.labelNames))
|
|
|
|
r.Post("/labels", wrap(api.labelNames))
|
2018-03-21 01:16:16 -07:00
|
|
|
r.Get("/label/:name/values", wrap(api.labelValues))
|
2015-06-09 07:09:31 -07:00
|
|
|
|
2018-03-21 01:16:16 -07:00
|
|
|
r.Get("/series", wrap(api.series))
|
2019-04-02 10:00:29 -07:00
|
|
|
r.Post("/series", wrap(api.series))
|
2018-03-21 01:16:16 -07:00
|
|
|
r.Del("/series", wrap(api.dropSeries))
|
2016-12-02 04:31:43 -08:00
|
|
|
|
2018-03-21 01:16:16 -07:00
|
|
|
r.Get("/targets", wrap(api.targets))
|
2018-05-18 00:32:11 -07:00
|
|
|
r.Get("/targets/metadata", wrap(api.targetMetadata))
|
2018-03-21 01:16:16 -07:00
|
|
|
r.Get("/alertmanagers", wrap(api.alertmanagers))
|
2017-05-11 08:09:24 -07:00
|
|
|
|
2018-03-21 01:16:16 -07:00
|
|
|
r.Get("/status/config", wrap(api.serveConfig))
|
|
|
|
r.Get("/status/flags", wrap(api.serveFlags))
|
|
|
|
r.Post("/read", api.ready(http.HandlerFunc(api.remoteRead)))
|
2017-12-02 21:07:05 -08:00
|
|
|
|
2018-03-25 09:50:34 -07:00
|
|
|
r.Get("/alerts", wrap(api.alerts))
|
|
|
|
r.Get("/rules", wrap(api.rules))
|
|
|
|
|
2017-12-02 21:07:05 -08:00
|
|
|
// Admin APIs
|
2018-03-21 01:16:16 -07:00
|
|
|
r.Post("/admin/tsdb/delete_series", wrap(api.deleteSeries))
|
|
|
|
r.Post("/admin/tsdb/clean_tombstones", wrap(api.cleanTombstones))
|
|
|
|
r.Post("/admin/tsdb/snapshot", wrap(api.snapshot))
|
2019-03-20 10:33:45 -07:00
|
|
|
|
|
|
|
r.Put("/admin/tsdb/delete_series", wrap(api.deleteSeries))
|
|
|
|
r.Put("/admin/tsdb/clean_tombstones", wrap(api.cleanTombstones))
|
|
|
|
r.Put("/admin/tsdb/snapshot", wrap(api.snapshot))
|
|
|
|
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
type queryData struct {
|
2017-11-16 07:30:48 -08:00
|
|
|
ResultType promql.ValueType `json:"resultType"`
|
|
|
|
Result promql.Value `json:"result"`
|
2017-02-08 03:58:40 -08:00
|
|
|
Stats *stats.QueryStats `json:"stats,omitempty"`
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) options(r *http.Request) apiFuncResult {
|
|
|
|
return apiFuncResult{nil, nil, nil, nil}
|
2016-01-25 16:32:46 -08:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) query(r *http.Request) apiFuncResult {
|
2016-12-24 15:37:46 -08:00
|
|
|
var ts time.Time
|
2015-11-11 11:46:57 -08:00
|
|
|
if t := r.FormValue("time"); t != "" {
|
|
|
|
var err error
|
|
|
|
ts, err = parseTime(t)
|
|
|
|
if err != nil {
|
2019-03-25 16:01:12 -07:00
|
|
|
err = errors.Wrapf(err, "invalid parameter 'time'")
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2015-11-11 11:46:57 -08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ts = api.now()
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
2015-11-11 11:46:57 -08:00
|
|
|
|
2017-05-02 16:49:29 -07:00
|
|
|
ctx := r.Context()
|
2017-03-06 09:32:21 -08:00
|
|
|
if to := r.FormValue("timeout"); to != "" {
|
|
|
|
var cancel context.CancelFunc
|
|
|
|
timeout, err := parseDuration(to)
|
|
|
|
if err != nil {
|
2019-03-25 16:01:12 -07:00
|
|
|
err = errors.Wrapf(err, "invalid parameter 'timeout'")
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2017-03-06 09:32:21 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel = context.WithTimeout(ctx, timeout)
|
|
|
|
defer cancel()
|
|
|
|
}
|
|
|
|
|
2018-01-09 08:44:23 -08:00
|
|
|
qry, err := api.QueryEngine.NewInstantQuery(api.Queryable, r.FormValue("query"), ts)
|
2015-06-04 09:07:57 -07:00
|
|
|
if err != nil {
|
2019-03-25 16:01:12 -07:00
|
|
|
err = errors.Wrapf(err, "invalid parameter 'query'")
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2017-03-06 09:32:21 -08:00
|
|
|
res := qry.Exec(ctx)
|
2015-06-04 09:07:57 -07:00
|
|
|
if res.Err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, returnAPIError(res.Err), res.Warnings, qry.Close}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
2017-11-16 07:30:48 -08:00
|
|
|
|
|
|
|
// Optional stats field in response if parameter "stats" is not empty.
|
|
|
|
var qs *stats.QueryStats
|
|
|
|
if r.FormValue("stats") != "" {
|
|
|
|
qs = stats.NewQueryStats(qry.Stats())
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{&queryData{
|
2015-06-04 09:07:57 -07:00
|
|
|
ResultType: res.Value.Type(),
|
|
|
|
Result: res.Value,
|
2017-11-16 07:30:48 -08:00
|
|
|
Stats: qs,
|
2018-11-30 06:27:12 -08:00
|
|
|
}, nil, res.Warnings, qry.Close}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) queryRange(r *http.Request) apiFuncResult {
|
2015-06-04 09:07:57 -07:00
|
|
|
start, err := parseTime(r.FormValue("start"))
|
|
|
|
if err != nil {
|
2019-03-25 16:01:12 -07:00
|
|
|
err = errors.Wrapf(err, "invalid parameter 'start'")
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
end, err := parseTime(r.FormValue("end"))
|
|
|
|
if err != nil {
|
2019-03-25 16:01:12 -07:00
|
|
|
err = errors.Wrapf(err, "invalid parameter 'end'")
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
2016-11-01 06:25:34 -07:00
|
|
|
if end.Before(start) {
|
|
|
|
err := errors.New("end timestamp must not be before start time")
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2016-11-01 06:25:34 -07:00
|
|
|
}
|
|
|
|
|
2015-06-04 09:07:57 -07:00
|
|
|
step, err := parseDuration(r.FormValue("step"))
|
|
|
|
if err != nil {
|
2019-03-25 16:01:12 -07:00
|
|
|
err = errors.Wrapf(err, "invalid parameter 'step'")
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2016-08-16 06:10:02 -07:00
|
|
|
if step <= 0 {
|
|
|
|
err := errors.New("zero or negative query resolution step widths are not accepted. Try a positive integer")
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2016-08-16 06:10:02 -07:00
|
|
|
}
|
|
|
|
|
2015-06-04 09:07:57 -07:00
|
|
|
// For safety, limit the number of returned points per timeseries.
|
|
|
|
// This is sufficient for 60s resolution for a week or 1h resolution for a year.
|
|
|
|
if end.Sub(start)/step > 11000 {
|
|
|
|
err := errors.New("exceeded maximum resolution of 11,000 points per timeseries. Try decreasing the query resolution (?step=XX)")
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2017-05-02 16:49:29 -07:00
|
|
|
ctx := r.Context()
|
2017-03-06 09:32:21 -08:00
|
|
|
if to := r.FormValue("timeout"); to != "" {
|
|
|
|
var cancel context.CancelFunc
|
|
|
|
timeout, err := parseDuration(to)
|
|
|
|
if err != nil {
|
2019-03-25 16:01:12 -07:00
|
|
|
err = errors.Wrap(err, "invalid parameter 'timeout'")
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2017-03-06 09:32:21 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel = context.WithTimeout(ctx, timeout)
|
|
|
|
defer cancel()
|
|
|
|
}
|
|
|
|
|
2018-01-09 08:44:23 -08:00
|
|
|
qry, err := api.QueryEngine.NewRangeQuery(api.Queryable, r.FormValue("query"), start, end, step)
|
2015-06-04 09:07:57 -07:00
|
|
|
if err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2017-03-06 09:32:21 -08:00
|
|
|
res := qry.Exec(ctx)
|
2015-06-04 09:07:57 -07:00
|
|
|
if res.Err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, returnAPIError(res.Err), res.Warnings, qry.Close}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
2016-12-29 00:27:30 -08:00
|
|
|
|
2017-02-08 03:58:40 -08:00
|
|
|
// Optional stats field in response if parameter "stats" is not empty.
|
|
|
|
var qs *stats.QueryStats
|
|
|
|
if r.FormValue("stats") != "" {
|
2017-11-16 07:30:48 -08:00
|
|
|
qs = stats.NewQueryStats(qry.Stats())
|
2017-02-08 03:58:40 -08:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{&queryData{
|
2015-06-04 09:07:57 -07:00
|
|
|
ResultType: res.Value.Type(),
|
|
|
|
Result: res.Value,
|
2017-02-08 03:58:40 -08:00
|
|
|
Stats: qs,
|
2018-11-30 06:27:12 -08:00
|
|
|
}, nil, res.Warnings, qry.Close}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2018-11-14 01:55:54 -08:00
|
|
|
func returnAPIError(err error) *apiError {
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
switch err.(type) {
|
|
|
|
case promql.ErrQueryCanceled:
|
|
|
|
return &apiError{errorCanceled, err}
|
|
|
|
case promql.ErrQueryTimeout:
|
|
|
|
return &apiError{errorTimeout, err}
|
|
|
|
case promql.ErrStorage:
|
|
|
|
return &apiError{errorInternal, err}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &apiError{errorExec, err}
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) labelNames(r *http.Request) apiFuncResult {
|
2018-11-19 02:21:14 -08:00
|
|
|
q, err := api.Queryable.Querier(r.Context(), math.MinInt64, math.MaxInt64)
|
|
|
|
if err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil}
|
2018-11-19 02:21:14 -08:00
|
|
|
}
|
|
|
|
defer q.Close()
|
|
|
|
|
2019-06-17 00:31:17 -07:00
|
|
|
names, warnings, err := q.LabelNames()
|
2018-11-19 02:21:14 -08:00
|
|
|
if err != nil {
|
2019-06-17 00:31:17 -07:00
|
|
|
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil}
|
2018-11-19 02:21:14 -08:00
|
|
|
}
|
2019-06-17 00:31:17 -07:00
|
|
|
return apiFuncResult{names, nil, warnings, nil}
|
2018-11-19 02:21:14 -08:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) labelValues(r *http.Request) apiFuncResult {
|
2017-10-04 12:04:15 -07:00
|
|
|
ctx := r.Context()
|
|
|
|
name := route.Param(ctx, "name")
|
2015-06-08 12:19:52 -07:00
|
|
|
|
2015-08-20 08:18:46 -07:00
|
|
|
if !model.LabelNameRE.MatchString(name) {
|
2019-03-25 16:01:12 -07:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.Errorf("invalid label name: %q", name)}, nil, nil}
|
2015-06-08 12:19:52 -07:00
|
|
|
}
|
2017-10-04 12:04:15 -07:00
|
|
|
q, err := api.Queryable.Querier(ctx, math.MinInt64, math.MaxInt64)
|
2016-10-12 10:34:22 -07:00
|
|
|
if err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil}
|
2016-10-12 10:34:22 -07:00
|
|
|
}
|
|
|
|
defer q.Close()
|
|
|
|
|
2019-06-17 00:31:17 -07:00
|
|
|
vals, warnings, err := q.LabelValues(name)
|
2016-07-11 11:27:25 -07:00
|
|
|
if err != nil {
|
2019-06-17 00:31:17 -07:00
|
|
|
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil}
|
2016-07-11 11:27:25 -07:00
|
|
|
}
|
2015-06-04 09:07:57 -07:00
|
|
|
|
2019-06-17 00:31:17 -07:00
|
|
|
return apiFuncResult{vals, nil, warnings, nil}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2017-01-16 05:09:59 -08:00
|
|
|
var (
|
2019-07-08 02:43:59 -07:00
|
|
|
minTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC()
|
|
|
|
maxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC()
|
|
|
|
|
|
|
|
minTimeFormatted = minTime.Format(time.RFC3339Nano)
|
|
|
|
maxTimeFormatted = maxTime.Format(time.RFC3339Nano)
|
2017-01-16 05:09:59 -08:00
|
|
|
)
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) series(r *http.Request) apiFuncResult {
|
2018-08-17 08:24:35 -07:00
|
|
|
if err := r.ParseForm(); err != nil {
|
2019-03-25 16:01:12 -07:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.Wrapf(err, "error parsing form values")}, nil, nil}
|
2018-08-17 08:24:35 -07:00
|
|
|
}
|
2015-06-09 07:09:31 -07:00
|
|
|
if len(r.Form["match[]"]) == 0 {
|
2019-03-25 16:01:12 -07:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.New("no match[] parameter provided")}, nil, nil}
|
2015-06-09 07:09:31 -07:00
|
|
|
}
|
2016-05-11 14:59:52 -07:00
|
|
|
|
2016-12-24 15:37:46 -08:00
|
|
|
var start time.Time
|
2016-05-11 14:59:52 -07:00
|
|
|
if t := r.FormValue("start"); t != "" {
|
|
|
|
var err error
|
|
|
|
start, err = parseTime(t)
|
|
|
|
if err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2016-05-11 14:59:52 -07:00
|
|
|
}
|
|
|
|
} else {
|
2017-01-16 05:09:59 -08:00
|
|
|
start = minTime
|
2016-05-11 14:59:52 -07:00
|
|
|
}
|
|
|
|
|
2016-12-24 15:37:46 -08:00
|
|
|
var end time.Time
|
2016-05-11 14:59:52 -07:00
|
|
|
if t := r.FormValue("end"); t != "" {
|
|
|
|
var err error
|
|
|
|
end, err = parseTime(t)
|
|
|
|
if err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2016-05-11 14:59:52 -07:00
|
|
|
}
|
|
|
|
} else {
|
2017-01-16 05:09:59 -08:00
|
|
|
end = maxTime
|
2016-05-11 14:59:52 -07:00
|
|
|
}
|
|
|
|
|
2016-12-29 00:27:30 -08:00
|
|
|
var matcherSets [][]*labels.Matcher
|
2016-07-11 11:27:25 -07:00
|
|
|
for _, s := range r.Form["match[]"] {
|
|
|
|
matchers, err := promql.ParseMetricSelector(s)
|
2015-06-09 07:09:31 -07:00
|
|
|
if err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2015-06-09 07:09:31 -07:00
|
|
|
}
|
2016-07-11 11:27:25 -07:00
|
|
|
matcherSets = append(matcherSets, matchers)
|
|
|
|
}
|
|
|
|
|
2017-10-04 12:04:15 -07:00
|
|
|
q, err := api.Queryable.Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end))
|
2016-12-30 01:43:44 -08:00
|
|
|
if err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil}
|
2016-12-30 01:43:44 -08:00
|
|
|
}
|
|
|
|
defer q.Close()
|
|
|
|
|
2017-12-10 03:00:23 -08:00
|
|
|
var sets []storage.SeriesSet
|
2018-11-30 06:27:12 -08:00
|
|
|
var warnings storage.Warnings
|
2016-12-30 01:43:44 -08:00
|
|
|
for _, mset := range matcherSets {
|
2019-01-02 03:10:13 -08:00
|
|
|
s, wrn, err := q.Select(nil, mset...) //TODO
|
2018-11-30 06:27:12 -08:00
|
|
|
warnings = append(warnings, wrn...)
|
2017-11-23 04:50:06 -08:00
|
|
|
if err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil}
|
2017-11-23 04:50:06 -08:00
|
|
|
}
|
2017-12-10 03:00:23 -08:00
|
|
|
sets = append(sets, s)
|
2016-12-30 01:43:44 -08:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
set := storage.NewMergeSeriesSet(sets, nil)
|
2017-04-04 02:09:11 -07:00
|
|
|
metrics := []labels.Labels{}
|
|
|
|
for set.Next() {
|
|
|
|
metrics = append(metrics, set.At().Labels())
|
|
|
|
}
|
|
|
|
if set.Err() != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorExec, set.Err()}, warnings, nil}
|
2017-04-04 02:09:11 -07:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{metrics, nil, warnings, nil}
|
2015-06-09 07:09:31 -07:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) dropSeries(r *http.Request) apiFuncResult {
|
2019-03-25 16:01:12 -07:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, errors.New("not implemented")}, nil, nil}
|
2015-06-09 07:09:31 -07:00
|
|
|
}
|
|
|
|
|
2017-03-06 09:51:27 -08:00
|
|
|
// Target has the information for one target.
|
2016-12-02 04:31:43 -08:00
|
|
|
type Target struct {
|
|
|
|
// Labels before any processing.
|
2016-12-29 00:27:30 -08:00
|
|
|
DiscoveredLabels map[string]string `json:"discoveredLabels"`
|
2016-12-02 04:31:43 -08:00
|
|
|
// Any labels that are added to this target and its metrics.
|
2016-12-29 00:27:30 -08:00
|
|
|
Labels map[string]string `json:"labels"`
|
2016-12-02 04:31:43 -08:00
|
|
|
|
2017-01-13 08:15:04 -08:00
|
|
|
ScrapeURL string `json:"scrapeUrl"`
|
2016-12-02 04:31:43 -08:00
|
|
|
|
2018-02-01 01:55:07 -08:00
|
|
|
LastError string `json:"lastError"`
|
|
|
|
LastScrape time.Time `json:"lastScrape"`
|
|
|
|
Health scrape.TargetHealth `json:"health"`
|
2016-12-02 04:31:43 -08:00
|
|
|
}
|
|
|
|
|
2018-02-21 09:26:18 -08:00
|
|
|
// DroppedTarget has the information for one target that was dropped during relabelling.
|
|
|
|
type DroppedTarget struct {
|
|
|
|
// Labels before any processing.
|
|
|
|
DiscoveredLabels map[string]string `json:"discoveredLabels"`
|
|
|
|
}
|
|
|
|
|
2017-03-06 03:46:37 -08:00
|
|
|
// TargetDiscovery has all the active targets.
|
2017-01-13 08:15:04 -08:00
|
|
|
type TargetDiscovery struct {
|
2018-10-25 01:19:20 -07:00
|
|
|
ActiveTargets []*Target `json:"activeTargets"`
|
|
|
|
DroppedTargets []*DroppedTarget `json:"droppedTargets"`
|
2017-01-13 08:15:04 -08:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) targets(r *http.Request) apiFuncResult {
|
2018-10-25 01:19:20 -07:00
|
|
|
flatten := func(targets map[string][]*scrape.Target) []*scrape.Target {
|
|
|
|
var n int
|
|
|
|
keys := make([]string, 0, len(targets))
|
|
|
|
for k := range targets {
|
|
|
|
keys = append(keys, k)
|
|
|
|
n += len(targets[k])
|
|
|
|
}
|
|
|
|
sort.Strings(keys)
|
|
|
|
res := make([]*scrape.Target, 0, n)
|
|
|
|
for _, k := range keys {
|
|
|
|
res = append(res, targets[k]...)
|
2016-12-02 04:31:43 -08:00
|
|
|
}
|
2018-10-25 01:19:20 -07:00
|
|
|
return res
|
2016-12-02 04:31:43 -08:00
|
|
|
}
|
|
|
|
|
2018-10-25 01:19:20 -07:00
|
|
|
tActive := flatten(api.targetRetriever.TargetsActive())
|
|
|
|
tDropped := flatten(api.targetRetriever.TargetsDropped())
|
|
|
|
res := &TargetDiscovery{ActiveTargets: make([]*Target, 0, len(tActive)), DroppedTargets: make([]*DroppedTarget, 0, len(tDropped))}
|
|
|
|
|
|
|
|
for _, target := range tActive {
|
|
|
|
lastErrStr := ""
|
|
|
|
lastErr := target.LastError()
|
|
|
|
if lastErr != nil {
|
|
|
|
lastErrStr = lastErr.Error()
|
2018-02-21 09:26:18 -08:00
|
|
|
}
|
2018-10-25 01:19:20 -07:00
|
|
|
|
|
|
|
res.ActiveTargets = append(res.ActiveTargets, &Target{
|
|
|
|
DiscoveredLabels: target.DiscoveredLabels().Map(),
|
|
|
|
Labels: target.Labels().Map(),
|
|
|
|
ScrapeURL: target.URL().String(),
|
|
|
|
LastError: lastErrStr,
|
|
|
|
LastScrape: target.LastScrape(),
|
|
|
|
Health: target.Health(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, t := range tDropped {
|
|
|
|
res.DroppedTargets = append(res.DroppedTargets, &DroppedTarget{
|
|
|
|
DiscoveredLabels: t.DiscoveredLabels().Map(),
|
|
|
|
})
|
2018-02-21 09:26:18 -08:00
|
|
|
}
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{res, nil, nil, nil}
|
2016-12-02 04:31:43 -08:00
|
|
|
}
|
|
|
|
|
2018-11-08 06:11:38 -08:00
|
|
|
func matchLabels(lset labels.Labels, matchers []*labels.Matcher) bool {
|
|
|
|
for _, m := range matchers {
|
|
|
|
if !m.Matches(lset.Get(m.Name)) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) targetMetadata(r *http.Request) apiFuncResult {
|
2018-05-18 00:32:11 -07:00
|
|
|
limit := -1
|
|
|
|
if s := r.FormValue("limit"); s != "" {
|
|
|
|
var err error
|
|
|
|
if limit, err = strconv.Atoi(s); err != nil {
|
2019-03-25 16:01:12 -07:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit must be a number")}, nil, nil}
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-05 03:30:19 -07:00
|
|
|
matchers, err := promql.ParseMetricSelector(r.FormValue("match_target"))
|
2018-05-18 00:32:11 -07:00
|
|
|
if err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
|
|
|
|
2018-06-05 03:30:19 -07:00
|
|
|
metric := r.FormValue("metric")
|
2018-05-18 00:32:11 -07:00
|
|
|
|
|
|
|
var res []metricMetadata
|
2018-09-26 02:20:56 -07:00
|
|
|
for _, tt := range api.targetRetriever.TargetsActive() {
|
|
|
|
for _, t := range tt {
|
|
|
|
if limit >= 0 && len(res) >= limit {
|
|
|
|
break
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
2018-11-08 06:11:38 -08:00
|
|
|
// Filter targets that don't satisfy the label matchers.
|
|
|
|
if !matchLabels(t.Labels(), matchers) {
|
|
|
|
continue
|
2018-09-26 02:20:56 -07:00
|
|
|
}
|
|
|
|
// If no metric is specified, get the full list for the target.
|
|
|
|
if metric == "" {
|
|
|
|
for _, md := range t.MetadataList() {
|
|
|
|
res = append(res, metricMetadata{
|
|
|
|
Target: t.Labels(),
|
|
|
|
Metric: md.Metric,
|
|
|
|
Type: md.Type,
|
|
|
|
Help: md.Help,
|
2018-10-05 09:11:16 -07:00
|
|
|
Unit: md.Unit,
|
2018-09-26 02:20:56 -07:00
|
|
|
})
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Get metadata for the specified metric.
|
|
|
|
if md, ok := t.Metadata(metric); ok {
|
2018-05-18 00:32:11 -07:00
|
|
|
res = append(res, metricMetadata{
|
|
|
|
Target: t.Labels(),
|
|
|
|
Type: md.Type,
|
|
|
|
Help: md.Help,
|
2018-10-05 09:11:16 -07:00
|
|
|
Unit: md.Unit,
|
2018-05-18 00:32:11 -07:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(res) == 0 {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorNotFound, errors.New("specified metadata not found")}, nil, nil}
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{res, nil, nil, nil}
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
type metricMetadata struct {
|
|
|
|
Target labels.Labels `json:"target"`
|
|
|
|
Metric string `json:"metric,omitempty"`
|
|
|
|
Type textparse.MetricType `json:"type"`
|
|
|
|
Help string `json:"help"`
|
2018-10-05 09:11:16 -07:00
|
|
|
Unit string `json:"unit"`
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
|
|
|
|
2017-03-06 09:51:27 -08:00
|
|
|
// AlertmanagerDiscovery has all the active Alertmanagers.
|
2017-01-13 01:20:11 -08:00
|
|
|
type AlertmanagerDiscovery struct {
|
2018-02-21 01:00:07 -08:00
|
|
|
ActiveAlertmanagers []*AlertmanagerTarget `json:"activeAlertmanagers"`
|
|
|
|
DroppedAlertmanagers []*AlertmanagerTarget `json:"droppedAlertmanagers"`
|
2017-01-13 01:20:11 -08:00
|
|
|
}
|
|
|
|
|
2017-03-06 09:51:27 -08:00
|
|
|
// AlertmanagerTarget has info on one AM.
|
2017-01-13 01:20:11 -08:00
|
|
|
type AlertmanagerTarget struct {
|
|
|
|
URL string `json:"url"`
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) alertmanagers(r *http.Request) apiFuncResult {
|
2017-01-13 01:20:11 -08:00
|
|
|
urls := api.alertmanagerRetriever.Alertmanagers()
|
2018-02-21 01:00:07 -08:00
|
|
|
droppedURLS := api.alertmanagerRetriever.DroppedAlertmanagers()
|
|
|
|
ams := &AlertmanagerDiscovery{ActiveAlertmanagers: make([]*AlertmanagerTarget, len(urls)), DroppedAlertmanagers: make([]*AlertmanagerTarget, len(droppedURLS))}
|
2017-04-24 22:42:33 -07:00
|
|
|
for i, url := range urls {
|
|
|
|
ams.ActiveAlertmanagers[i] = &AlertmanagerTarget{URL: url.String()}
|
2017-01-13 01:20:11 -08:00
|
|
|
}
|
2018-02-21 01:00:07 -08:00
|
|
|
for i, url := range droppedURLS {
|
|
|
|
ams.DroppedAlertmanagers[i] = &AlertmanagerTarget{URL: url.String()}
|
|
|
|
}
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{ams, nil, nil, nil}
|
2017-01-13 01:20:11 -08:00
|
|
|
}
|
|
|
|
|
2018-06-27 00:15:17 -07:00
|
|
|
// AlertDiscovery has info for all active alerts.
|
2018-03-25 09:50:34 -07:00
|
|
|
type AlertDiscovery struct {
|
2018-06-27 00:15:17 -07:00
|
|
|
Alerts []*Alert `json:"alerts"`
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
|
|
|
|
2018-06-27 00:15:17 -07:00
|
|
|
// Alert has info for an alert.
|
2018-03-25 09:50:34 -07:00
|
|
|
type Alert struct {
|
|
|
|
Labels labels.Labels `json:"labels"`
|
2018-06-27 00:15:17 -07:00
|
|
|
Annotations labels.Labels `json:"annotations"`
|
|
|
|
State string `json:"state"`
|
|
|
|
ActiveAt *time.Time `json:"activeAt,omitempty"`
|
2019-05-21 02:41:54 -07:00
|
|
|
Value string `json:"value"`
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) alerts(r *http.Request) apiFuncResult {
|
2018-06-27 00:15:17 -07:00
|
|
|
alertingRules := api.rulesRetriever.AlertingRules()
|
|
|
|
alerts := []*Alert{}
|
2018-03-25 09:50:34 -07:00
|
|
|
|
2018-06-27 00:15:17 -07:00
|
|
|
for _, alertingRule := range alertingRules {
|
|
|
|
alerts = append(
|
|
|
|
alerts,
|
|
|
|
rulesAlertsToAPIAlerts(alertingRule.ActiveAlerts())...,
|
|
|
|
)
|
|
|
|
}
|
2018-03-25 09:50:34 -07:00
|
|
|
|
2018-06-27 00:15:17 -07:00
|
|
|
res := &AlertDiscovery{Alerts: alerts}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{res, nil, nil, nil}
|
2018-06-27 00:15:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func rulesAlertsToAPIAlerts(rulesAlerts []*rules.Alert) []*Alert {
|
|
|
|
apiAlerts := make([]*Alert, len(rulesAlerts))
|
|
|
|
for i, ruleAlert := range rulesAlerts {
|
|
|
|
apiAlerts[i] = &Alert{
|
|
|
|
Labels: ruleAlert.Labels,
|
|
|
|
Annotations: ruleAlert.Annotations,
|
|
|
|
State: ruleAlert.State.String(),
|
|
|
|
ActiveAt: &ruleAlert.ActiveAt,
|
2019-05-21 02:41:54 -07:00
|
|
|
Value: strconv.FormatFloat(ruleAlert.Value, 'e', -1, 64),
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-27 00:15:17 -07:00
|
|
|
return apiAlerts
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
|
|
|
|
2018-06-27 00:15:17 -07:00
|
|
|
// RuleDiscovery has info for all rules
|
|
|
|
type RuleDiscovery struct {
|
|
|
|
RuleGroups []*RuleGroup `json:"groups"`
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
|
|
|
|
2018-06-27 00:15:17 -07:00
|
|
|
// RuleGroup has info for rules which are part of a group
|
|
|
|
type RuleGroup struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
File string `json:"file"`
|
|
|
|
// In order to preserve rule ordering, while exposing type (alerting or recording)
|
|
|
|
// specific properties, both alerting and recording rules are exposed in the
|
|
|
|
// same array.
|
|
|
|
Rules []rule `json:"rules"`
|
|
|
|
Interval float64 `json:"interval"`
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
|
|
|
|
2018-06-27 00:15:17 -07:00
|
|
|
type rule interface{}
|
2018-03-25 09:50:34 -07:00
|
|
|
|
2018-06-27 00:15:17 -07:00
|
|
|
type alertingRule struct {
|
2018-08-23 06:00:10 -07:00
|
|
|
Name string `json:"name"`
|
|
|
|
Query string `json:"query"`
|
|
|
|
Duration float64 `json:"duration"`
|
|
|
|
Labels labels.Labels `json:"labels"`
|
|
|
|
Annotations labels.Labels `json:"annotations"`
|
|
|
|
Alerts []*Alert `json:"alerts"`
|
|
|
|
Health rules.RuleHealth `json:"health"`
|
|
|
|
LastError string `json:"lastError,omitempty"`
|
2018-06-27 00:15:17 -07:00
|
|
|
// Type of an alertingRule is always "alerting".
|
|
|
|
Type string `json:"type"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type recordingRule struct {
|
2018-08-23 06:00:10 -07:00
|
|
|
Name string `json:"name"`
|
|
|
|
Query string `json:"query"`
|
|
|
|
Labels labels.Labels `json:"labels,omitempty"`
|
|
|
|
Health rules.RuleHealth `json:"health"`
|
|
|
|
LastError string `json:"lastError,omitempty"`
|
2018-06-27 00:15:17 -07:00
|
|
|
// Type of a recordingRule is always "recording".
|
|
|
|
Type string `json:"type"`
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) rules(r *http.Request) apiFuncResult {
|
2018-06-27 00:15:17 -07:00
|
|
|
ruleGroups := api.rulesRetriever.RuleGroups()
|
|
|
|
res := &RuleDiscovery{RuleGroups: make([]*RuleGroup, len(ruleGroups))}
|
|
|
|
for i, grp := range ruleGroups {
|
|
|
|
apiRuleGroup := &RuleGroup{
|
|
|
|
Name: grp.Name(),
|
|
|
|
File: grp.File(),
|
|
|
|
Interval: grp.Interval().Seconds(),
|
|
|
|
Rules: []rule{},
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
2018-06-27 00:15:17 -07:00
|
|
|
|
|
|
|
for _, r := range grp.Rules() {
|
|
|
|
var enrichedRule rule
|
|
|
|
|
2018-08-23 06:00:10 -07:00
|
|
|
lastError := ""
|
|
|
|
if r.LastError() != nil {
|
|
|
|
lastError = r.LastError().Error()
|
|
|
|
}
|
|
|
|
|
2018-06-27 00:15:17 -07:00
|
|
|
switch rule := r.(type) {
|
|
|
|
case *rules.AlertingRule:
|
|
|
|
enrichedRule = alertingRule{
|
|
|
|
Name: rule.Name(),
|
|
|
|
Query: rule.Query().String(),
|
|
|
|
Duration: rule.Duration().Seconds(),
|
|
|
|
Labels: rule.Labels(),
|
|
|
|
Annotations: rule.Annotations(),
|
|
|
|
Alerts: rulesAlertsToAPIAlerts(rule.ActiveAlerts()),
|
2018-08-23 06:00:10 -07:00
|
|
|
Health: rule.Health(),
|
|
|
|
LastError: lastError,
|
2018-06-27 00:15:17 -07:00
|
|
|
Type: "alerting",
|
|
|
|
}
|
|
|
|
case *rules.RecordingRule:
|
|
|
|
enrichedRule = recordingRule{
|
2018-08-23 06:00:10 -07:00
|
|
|
Name: rule.Name(),
|
|
|
|
Query: rule.Query().String(),
|
|
|
|
Labels: rule.Labels(),
|
|
|
|
Health: rule.Health(),
|
|
|
|
LastError: lastError,
|
|
|
|
Type: "recording",
|
2018-06-27 00:15:17 -07:00
|
|
|
}
|
|
|
|
default:
|
2019-03-25 16:01:12 -07:00
|
|
|
err := errors.Errorf("failed to assert type of rule '%v'", rule.Name())
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
2018-06-27 00:15:17 -07:00
|
|
|
|
|
|
|
apiRuleGroup.Rules = append(apiRuleGroup.Rules, enrichedRule)
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
2018-06-27 00:15:17 -07:00
|
|
|
res.RuleGroups[i] = apiRuleGroup
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{res, nil, nil, nil}
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
|
|
|
|
2017-05-11 08:09:24 -07:00
|
|
|
type prometheusConfig struct {
|
|
|
|
YAML string `json:"yaml"`
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) serveConfig(r *http.Request) apiFuncResult {
|
2017-05-11 08:09:24 -07:00
|
|
|
cfg := &prometheusConfig{
|
|
|
|
YAML: api.config().String(),
|
|
|
|
}
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{cfg, nil, nil, nil}
|
2017-05-11 08:09:24 -07:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) serveFlags(r *http.Request) apiFuncResult {
|
|
|
|
return apiFuncResult{api.flagsMap, nil, nil, nil}
|
api: Added v1/status/flags endpoint. (#3864)
Endpoint URL: /api/v1/status/flags
Example Output:
```json
{
"status": "success",
"data": {
"alertmanager.notification-queue-capacity": "10000",
"alertmanager.timeout": "10s",
"completion-bash": "false",
"completion-script-bash": "false",
"completion-script-zsh": "false",
"config.file": "my_cool_prometheus.yaml",
"help": "false",
"help-long": "false",
"help-man": "false",
"log.level": "info",
"query.lookback-delta": "5m",
"query.max-concurrency": "20",
"query.timeout": "2m",
"storage.tsdb.max-block-duration": "36h",
"storage.tsdb.min-block-duration": "2h",
"storage.tsdb.no-lockfile": "false",
"storage.tsdb.path": "data/",
"storage.tsdb.retention": "15d",
"version": "false",
"web.console.libraries": "console_libraries",
"web.console.templates": "consoles",
"web.enable-admin-api": "false",
"web.enable-lifecycle": "false",
"web.external-url": "",
"web.listen-address": "0.0.0.0:9090",
"web.max-connections": "512",
"web.read-timeout": "5m",
"web.route-prefix": "/",
"web.user-assets": ""
}
}
```
Signed-off-by: Bartek Plotka <bwplotka@gmail.com>
2018-02-21 00:49:02 -08:00
|
|
|
}
|
|
|
|
|
2017-10-23 13:28:17 -07:00
|
|
|
func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) {
|
2019-08-19 13:16:10 -07:00
|
|
|
ctx := r.Context()
|
|
|
|
if err := api.remoteReadGate.Start(ctx); err != nil {
|
2019-05-03 06:11:28 -07:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2018-10-10 16:09:08 -07:00
|
|
|
remoteReadQueries.Inc()
|
|
|
|
|
2018-09-25 12:07:34 -07:00
|
|
|
defer api.remoteReadGate.Done()
|
2018-10-10 16:09:08 -07:00
|
|
|
defer remoteReadQueries.Dec()
|
2018-09-25 12:07:34 -07:00
|
|
|
|
2017-10-23 13:28:17 -07:00
|
|
|
req, err := remote.DecodeReadRequest(r)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-08-19 13:16:10 -07:00
|
|
|
externalLabels := api.config().GlobalConfig.ExternalLabels.Map()
|
|
|
|
|
|
|
|
sortedExternalLabels := make([]prompb.Label, 0, len(externalLabels))
|
|
|
|
for name, value := range externalLabels {
|
|
|
|
sortedExternalLabels = append(sortedExternalLabels, prompb.Label{
|
|
|
|
Name: string(name),
|
|
|
|
Value: string(value),
|
|
|
|
})
|
2017-10-23 13:28:17 -07:00
|
|
|
}
|
2019-08-19 13:16:10 -07:00
|
|
|
sort.Slice(sortedExternalLabels, func(i, j int) bool {
|
|
|
|
return sortedExternalLabels[i].Name < sortedExternalLabels[j].Name
|
|
|
|
})
|
|
|
|
|
|
|
|
responseType, err := remote.NegotiateResponseType(req.AcceptedResponseTypes)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
switch responseType {
|
|
|
|
case prompb.ReadRequest_STREAMED_XOR_CHUNKS:
|
|
|
|
w.Header().Set("Content-Type", "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse")
|
|
|
|
|
|
|
|
f, ok := w.(http.Flusher)
|
|
|
|
if !ok {
|
|
|
|
http.Error(w, "internal http.ResponseWriter does not implement http.Flusher interface", http.StatusInternalServerError)
|
2017-10-23 13:28:17 -07:00
|
|
|
return
|
|
|
|
}
|
2019-08-19 13:16:10 -07:00
|
|
|
for i, query := range req.Queries {
|
|
|
|
err := api.remoteReadQuery(ctx, query, externalLabels, func(set storage.SeriesSet) error {
|
|
|
|
|
|
|
|
return remote.StreamChunkedReadResponses(
|
|
|
|
remote.NewChunkedWriter(w, f),
|
|
|
|
int64(i),
|
|
|
|
set,
|
|
|
|
sortedExternalLabels,
|
|
|
|
api.remoteReadMaxBytesInFrame,
|
|
|
|
)
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
if httpErr, ok := err.(remote.HTTPError); ok {
|
|
|
|
http.Error(w, httpErr.Error(), httpErr.Status())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
w.Header().Set("Content-Type", "application/x-protobuf")
|
|
|
|
w.Header().Set("Content-Encoding", "snappy")
|
2017-10-23 13:28:17 -07:00
|
|
|
|
2019-08-19 13:16:10 -07:00
|
|
|
// On empty or unknown types in req.AcceptedResponseTypes we default to non streamed, raw samples response.
|
|
|
|
resp := prompb.ReadResponse{
|
|
|
|
Results: make([]*prompb.QueryResult, len(req.Queries)),
|
2017-10-23 13:28:17 -07:00
|
|
|
}
|
2019-08-19 13:16:10 -07:00
|
|
|
for i, query := range req.Queries {
|
|
|
|
err := api.remoteReadQuery(ctx, query, externalLabels, func(set storage.SeriesSet) error {
|
2017-10-23 13:28:17 -07:00
|
|
|
|
2019-08-19 13:16:10 -07:00
|
|
|
resp.Results[i], err = remote.ToQueryResult(set, api.remoteReadSampleLimit)
|
2017-10-23 13:28:17 -07:00
|
|
|
if err != nil {
|
2019-08-19 13:16:10 -07:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, ts := range resp.Results[i].Timeseries {
|
|
|
|
ts.Labels = remote.MergeLabels(ts.Labels, sortedExternalLabels)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
if httpErr, ok := err.(remote.HTTPError); ok {
|
|
|
|
http.Error(w, httpErr.Error(), httpErr.Status())
|
2017-10-23 13:28:17 -07:00
|
|
|
return
|
|
|
|
}
|
2019-08-19 13:16:10 -07:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
2017-10-23 13:28:17 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-19 13:16:10 -07:00
|
|
|
if err := remote.EncodeReadResponse(&resp, w); err != nil {
|
2017-11-23 04:50:06 -08:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2019-08-19 13:16:10 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// filterExtLabelsFromMatchers change equality matchers which match external labels
|
|
|
|
// to a matcher that looks for an empty label,
|
|
|
|
// as that label should not be present in the storage.
|
|
|
|
func filterExtLabelsFromMatchers(pbMatchers []*prompb.LabelMatcher, externalLabels map[string]string) ([]*labels.Matcher, error) {
|
|
|
|
matchers, err := remote.FromLabelMatchers(pbMatchers)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
filteredMatchers := make([]*labels.Matcher, 0, len(matchers))
|
|
|
|
for _, m := range matchers {
|
|
|
|
value := externalLabels[m.Name]
|
|
|
|
if m.Type == labels.MatchEqual && value == m.Value {
|
|
|
|
matcher, err := labels.NewMatcher(labels.MatchEqual, m.Name, "")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2018-09-05 06:50:50 -07:00
|
|
|
}
|
2019-08-19 13:16:10 -07:00
|
|
|
filteredMatchers = append(filteredMatchers, matcher)
|
|
|
|
} else {
|
|
|
|
filteredMatchers = append(filteredMatchers, m)
|
2017-10-23 13:28:17 -07:00
|
|
|
}
|
2019-08-19 13:16:10 -07:00
|
|
|
}
|
2017-10-23 13:28:17 -07:00
|
|
|
|
2019-08-19 13:16:10 -07:00
|
|
|
return filteredMatchers, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (api *API) remoteReadQuery(ctx context.Context, query *prompb.Query, externalLabels map[string]string, seriesHandleFn func(set storage.SeriesSet) error) error {
|
|
|
|
filteredMatchers, err := filterExtLabelsFromMatchers(query.Matchers, externalLabels)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
querier, err := api.Queryable.Querier(ctx, query.StartTimestampMs, query.EndTimestampMs)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-10-26 03:44:49 -07:00
|
|
|
|
2019-08-19 13:16:10 -07:00
|
|
|
var selectParams *storage.SelectParams
|
|
|
|
if query.Hints != nil {
|
|
|
|
selectParams = &storage.SelectParams{
|
|
|
|
Start: query.Hints.StartMs,
|
|
|
|
End: query.Hints.EndMs,
|
|
|
|
Step: query.Hints.StepMs,
|
|
|
|
Func: query.Hints.Func,
|
2017-10-23 13:28:17 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-19 13:16:10 -07:00
|
|
|
defer func() {
|
|
|
|
if err := querier.Close(); err != nil {
|
|
|
|
level.Warn(api.logger).Log("msg", "error on querier close", "err", err.Error())
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
set, _, err := querier.Select(selectParams, filteredMatchers...)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-10-23 13:28:17 -07:00
|
|
|
}
|
2019-08-19 13:16:10 -07:00
|
|
|
return seriesHandleFn(set)
|
2017-10-23 13:28:17 -07:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) deleteSeries(r *http.Request) apiFuncResult {
|
2017-12-02 21:07:05 -08:00
|
|
|
if !api.enableAdmin {
|
2019-01-04 05:47:38 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
db := api.db()
|
|
|
|
if db == nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("TSDB not ready")}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
|
2018-08-17 08:24:35 -07:00
|
|
|
if err := r.ParseForm(); err != nil {
|
2019-03-25 16:01:12 -07:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.Wrap(err, "error parsing form values")}, nil, nil}
|
2018-08-17 08:24:35 -07:00
|
|
|
}
|
2017-12-02 21:07:05 -08:00
|
|
|
if len(r.Form["match[]"]) == 0 {
|
2019-03-25 16:01:12 -07:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.New("no match[] parameter provided")}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
var start time.Time
|
|
|
|
if t := r.FormValue("start"); t != "" {
|
|
|
|
var err error
|
|
|
|
start, err = parseTime(t)
|
|
|
|
if err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
start = minTime
|
|
|
|
}
|
|
|
|
|
|
|
|
var end time.Time
|
|
|
|
if t := r.FormValue("end"); t != "" {
|
|
|
|
var err error
|
|
|
|
end, err = parseTime(t)
|
|
|
|
if err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
end = maxTime
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, s := range r.Form["match[]"] {
|
|
|
|
matchers, err := promql.ParseMetricSelector(s)
|
|
|
|
if err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
var selector tsdbLabels.Selector
|
|
|
|
for _, m := range matchers {
|
|
|
|
selector = append(selector, convertMatcher(m))
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := db.Delete(timestamp.FromTime(start), timestamp.FromTime(end), selector...); err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, nil, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) snapshot(r *http.Request) apiFuncResult {
|
2017-12-02 21:07:05 -08:00
|
|
|
if !api.enableAdmin {
|
2019-01-04 05:47:38 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
2018-10-02 03:48:07 -07:00
|
|
|
var (
|
|
|
|
skipHead bool
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
if r.FormValue("skip_head") != "" {
|
|
|
|
skipHead, err = strconv.ParseBool(r.FormValue("skip_head"))
|
|
|
|
if err != nil {
|
2019-03-25 16:01:12 -07:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.Wrapf(err, "unable to parse boolean 'skip_head' argument")}, nil, nil}
|
2018-10-02 03:48:07 -07:00
|
|
|
}
|
2018-08-17 08:24:35 -07:00
|
|
|
}
|
2018-03-08 02:43:41 -08:00
|
|
|
|
2017-12-02 21:07:05 -08:00
|
|
|
db := api.db()
|
|
|
|
if db == nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("TSDB not ready")}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
snapdir = filepath.Join(db.Dir(), "snapshots")
|
2017-12-10 11:19:34 -08:00
|
|
|
name = fmt.Sprintf("%s-%x",
|
|
|
|
time.Now().UTC().Format("20060102T150405Z0700"),
|
|
|
|
rand.Int())
|
|
|
|
dir = filepath.Join(snapdir, name)
|
2017-12-02 21:07:05 -08:00
|
|
|
)
|
|
|
|
if err := os.MkdirAll(dir, 0777); err != nil {
|
2019-03-25 16:01:12 -07:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, errors.Wrap(err, "create snapshot directory")}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
2018-03-08 02:43:41 -08:00
|
|
|
if err := db.Snapshot(dir, !skipHead); err != nil {
|
2019-03-25 16:01:12 -07:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, errors.Wrap(err, "create snapshot")}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{struct {
|
2017-12-02 21:07:05 -08:00
|
|
|
Name string `json:"name"`
|
2018-11-30 06:27:12 -08:00
|
|
|
}{name}, nil, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) cleanTombstones(r *http.Request) apiFuncResult {
|
2017-12-02 21:07:05 -08:00
|
|
|
if !api.enableAdmin {
|
2019-01-04 05:47:38 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
db := api.db()
|
|
|
|
if db == nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("TSDB not ready")}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := db.CleanTombstones(); err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, nil, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
func convertMatcher(m *labels.Matcher) tsdbLabels.Matcher {
|
|
|
|
switch m.Type {
|
|
|
|
case labels.MatchEqual:
|
|
|
|
return tsdbLabels.NewEqualMatcher(m.Name, m.Value)
|
|
|
|
|
|
|
|
case labels.MatchNotEqual:
|
|
|
|
return tsdbLabels.Not(tsdbLabels.NewEqualMatcher(m.Name, m.Value))
|
|
|
|
|
|
|
|
case labels.MatchRegexp:
|
|
|
|
res, err := tsdbLabels.NewRegexpMatcher(m.Name, "^(?:"+m.Value+")$")
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
|
|
|
|
case labels.MatchNotRegexp:
|
|
|
|
res, err := tsdbLabels.NewRegexpMatcher(m.Name, "^(?:"+m.Value+")$")
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return tsdbLabels.Not(res)
|
|
|
|
}
|
|
|
|
panic("storage.convertMatcher: invalid matcher type")
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) respond(w http.ResponseWriter, data interface{}, warnings storage.Warnings) {
|
|
|
|
statusMessage := statusSuccess
|
|
|
|
var warningStrings []string
|
|
|
|
for _, warning := range warnings {
|
|
|
|
warningStrings = append(warningStrings, warning.Error())
|
|
|
|
}
|
2017-12-02 05:52:59 -08:00
|
|
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
2015-06-04 09:07:57 -07:00
|
|
|
b, err := json.Marshal(&response{
|
2018-11-30 06:27:12 -08:00
|
|
|
Status: statusMessage,
|
|
|
|
Data: data,
|
|
|
|
Warnings: warningStrings,
|
2015-06-04 09:07:57 -07:00
|
|
|
})
|
|
|
|
if err != nil {
|
2018-11-27 08:44:29 -08:00
|
|
|
level.Error(api.logger).Log("msg", "error marshaling json response", "err", err)
|
2018-07-13 11:31:23 -07:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2015-06-04 09:07:57 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
2018-07-13 11:31:23 -07:00
|
|
|
w.WriteHeader(http.StatusOK)
|
2018-07-06 10:44:45 -07:00
|
|
|
if n, err := w.Write(b); err != nil {
|
2018-07-25 05:35:47 -07:00
|
|
|
level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err)
|
2018-07-06 10:44:45 -07:00
|
|
|
}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2018-07-06 10:44:45 -07:00
|
|
|
func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data interface{}) {
|
2018-07-25 05:17:10 -07:00
|
|
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
|
|
|
b, err := json.Marshal(&response{
|
|
|
|
Status: statusError,
|
|
|
|
ErrorType: apiErr.typ,
|
|
|
|
Error: apiErr.err.Error(),
|
|
|
|
Data: data,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2018-11-27 08:44:29 -08:00
|
|
|
level.Error(api.logger).Log("msg", "error marshaling json response", "err", err)
|
2018-07-25 05:17:10 -07:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2015-11-11 14:00:54 -08:00
|
|
|
|
|
|
|
var code int
|
|
|
|
switch apiErr.typ {
|
|
|
|
case errorBadData:
|
|
|
|
code = http.StatusBadRequest
|
|
|
|
case errorExec:
|
|
|
|
code = 422
|
|
|
|
case errorCanceled, errorTimeout:
|
|
|
|
code = http.StatusServiceUnavailable
|
2017-04-04 09:22:51 -07:00
|
|
|
case errorInternal:
|
|
|
|
code = http.StatusInternalServerError
|
2018-05-18 00:32:11 -07:00
|
|
|
case errorNotFound:
|
|
|
|
code = http.StatusNotFound
|
2015-11-11 14:00:54 -08:00
|
|
|
default:
|
|
|
|
code = http.StatusInternalServerError
|
|
|
|
}
|
2015-06-04 09:07:57 -07:00
|
|
|
|
2018-07-25 05:17:10 -07:00
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
w.WriteHeader(code)
|
2018-07-06 10:44:45 -07:00
|
|
|
if n, err := w.Write(b); err != nil {
|
2018-07-25 05:35:47 -07:00
|
|
|
level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err)
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-24 15:37:46 -08:00
|
|
|
func parseTime(s string) (time.Time, error) {
|
2015-06-04 09:07:57 -07:00
|
|
|
if t, err := strconv.ParseFloat(s, 64); err == nil {
|
2016-12-24 15:37:46 -08:00
|
|
|
s, ns := math.Modf(t)
|
2018-12-03 04:25:54 -08:00
|
|
|
ns = math.Round(ns*1000) / 1000
|
2016-12-24 15:37:46 -08:00
|
|
|
return time.Unix(int64(s), int64(ns*float64(time.Second))), nil
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
if t, err := time.Parse(time.RFC3339Nano, s); err == nil {
|
2016-12-24 15:37:46 -08:00
|
|
|
return t, nil
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
2019-07-08 02:43:59 -07:00
|
|
|
|
|
|
|
// Stdlib's time parser can only handle 4 digit years. As a workaround until
|
|
|
|
// that is fixed we want to at least support our own boundary times.
|
|
|
|
// Context: https://github.com/prometheus/client_golang/issues/614
|
|
|
|
// Upstream issue: https://github.com/golang/go/issues/20555
|
|
|
|
switch s {
|
|
|
|
case minTimeFormatted:
|
|
|
|
return minTime, nil
|
|
|
|
case maxTimeFormatted:
|
|
|
|
return maxTime, nil
|
|
|
|
}
|
2019-03-25 16:01:12 -07:00
|
|
|
return time.Time{}, errors.Errorf("cannot parse %q to a valid timestamp", s)
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func parseDuration(s string) (time.Duration, error) {
|
|
|
|
if d, err := strconv.ParseFloat(s, 64); err == nil {
|
2017-03-16 07:16:20 -07:00
|
|
|
ts := d * float64(time.Second)
|
|
|
|
if ts > float64(math.MaxInt64) || ts < float64(math.MinInt64) {
|
2019-03-25 16:01:12 -07:00
|
|
|
return 0, errors.Errorf("cannot parse %q to a valid duration. It overflows int64", s)
|
2017-03-16 07:16:20 -07:00
|
|
|
}
|
|
|
|
return time.Duration(ts), nil
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
2016-01-29 06:23:11 -08:00
|
|
|
if d, err := model.ParseDuration(s); err == nil {
|
|
|
|
return time.Duration(d), nil
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
2019-03-25 16:01:12 -07:00
|
|
|
return 0, errors.Errorf("cannot parse %q to a valid duration", s)
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
2018-02-07 04:27:57 -08:00
|
|
|
|
|
|
|
func marshalPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
|
|
|
p := *((*promql.Point)(ptr))
|
|
|
|
stream.WriteArrayStart()
|
|
|
|
// Write out the timestamp as a float divided by 1000.
|
|
|
|
// This is ~3x faster than converting to a float.
|
|
|
|
t := p.T
|
|
|
|
if t < 0 {
|
|
|
|
stream.WriteRaw(`-`)
|
|
|
|
t = -t
|
|
|
|
}
|
|
|
|
stream.WriteInt64(t / 1000)
|
|
|
|
fraction := t % 1000
|
|
|
|
if fraction != 0 {
|
|
|
|
stream.WriteRaw(`.`)
|
|
|
|
if fraction < 100 {
|
|
|
|
stream.WriteRaw(`0`)
|
|
|
|
}
|
|
|
|
if fraction < 10 {
|
|
|
|
stream.WriteRaw(`0`)
|
|
|
|
}
|
|
|
|
stream.WriteInt64(fraction)
|
|
|
|
}
|
|
|
|
stream.WriteMore()
|
|
|
|
stream.WriteRaw(`"`)
|
2019-09-17 06:52:26 -07:00
|
|
|
|
|
|
|
// Taken from https://github.com/json-iterator/go/blob/master/stream_float.go#L71 as a workaround
|
|
|
|
// to https://github.com/json-iterator/go/issues/365 (jsoniter, to follow json standard, doesn't allow inf/nan).
|
|
|
|
buf := stream.Buffer()
|
|
|
|
abs := math.Abs(p.V)
|
|
|
|
fmt := byte('f')
|
|
|
|
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
|
|
|
|
if abs != 0 {
|
|
|
|
if abs < 1e-6 || abs >= 1e21 {
|
|
|
|
fmt = 'e'
|
|
|
|
}
|
|
|
|
}
|
|
|
|
buf = strconv.AppendFloat(buf, p.V, fmt, -1, 64)
|
|
|
|
stream.SetBuffer(buf)
|
|
|
|
|
2018-02-07 04:27:57 -08:00
|
|
|
stream.WriteRaw(`"`)
|
|
|
|
stream.WriteArrayEnd()
|
|
|
|
}
|
|
|
|
|
|
|
|
func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool {
|
|
|
|
return false
|
|
|
|
}
|