mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge remote-tracking branch 'upstream/main' into sync
This commit is contained in:
commit
e37ff8f6b6
|
@ -120,6 +120,7 @@ jobs:
|
|||
steps:
|
||||
- checkout
|
||||
- run: ./scripts/sync_repo_files.sh
|
||||
- run: ./scripts/sync_codemirror.sh
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
|
|
7
.gitpod.Dockerfile
vendored
Normal file
7
.gitpod.Dockerfile
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
FROM gitpod/workspace-full
|
||||
|
||||
ENV CUSTOM_NODE_VERSION=16
|
||||
|
||||
RUN bash -c ". .nvm/nvm.sh && nvm install ${CUSTOM_NODE_VERSION} && nvm use ${CUSTOM_NODE_VERSION} && nvm alias default ${CUSTOM_NODE_VERSION}"
|
||||
|
||||
RUN echo "nvm use default &>/dev/null" >> ~/.bashrc.d/51-nvm-fix
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
image:
|
||||
file: .gitpod.Dockerfile
|
||||
tasks:
|
||||
- init:
|
||||
make build
|
||||
|
@ -6,7 +7,7 @@ tasks:
|
|||
gp sync-done build
|
||||
./prometheus --config.file=documentation/examples/prometheus.yml
|
||||
- command: |
|
||||
cd web/ui/react-app
|
||||
cd web/ui/
|
||||
gp sync-await build
|
||||
unset BROWSER
|
||||
export DANGEROUSLY_DISABLE_HOST_CHECK=true
|
||||
|
|
17
CHANGELOG.md
17
CHANGELOG.md
|
@ -1,3 +1,20 @@
|
|||
## 2.30.3 / 2021-10-05
|
||||
|
||||
* [BUGFIX] TSDB: Fix panic on failed snapshot replay. #9438
|
||||
* [BUGFIX] TSDB: Don't fail snapshot replay with exemplar storage disabled when the snapshot contains exemplars. #9438
|
||||
|
||||
## 2.30.2 / 2021-10-01
|
||||
|
||||
* [BUGFIX] TSDB: Don't error on overlapping m-mapped chunks during WAL replay. #9381
|
||||
|
||||
## 2.30.1 / 2021-09-28
|
||||
|
||||
* [ENHANCEMENT] Remote Write: Redact remote write URL when used for metric label. #9383
|
||||
* [ENHANCEMENT] UI: Redact remote write URL and proxy URL passwords in the `/config` page. #9408
|
||||
* [BUGFIX] promtool rules backfill: Prevent creation of data before the start time. #9339
|
||||
* [BUGFIX] promtool rules backfill: Do not query after the end time. #9340
|
||||
* [BUGFIX] Azure SD: Fix panic when no computername is set. #9387
|
||||
|
||||
## 2.30.0 / 2021-09-14
|
||||
|
||||
* [FEATURE] **experimental** TSDB: Snapshot in-memory chunks on shutdown for faster restarts. Behind `--enable-feature=memory-snapshot-on-shutdown` flag. #7229
|
||||
|
|
|
@ -14,14 +14,13 @@ COPY LICENSE /LICENSE
|
|||
COPY NOTICE /NOTICE
|
||||
COPY npm_licenses.tar.bz2 /npm_licenses.tar.bz2
|
||||
|
||||
RUN ln -s /usr/share/prometheus/console_libraries /usr/share/prometheus/consoles/ /etc/prometheus/
|
||||
RUN mkdir -p /prometheus && \
|
||||
chown -R nobody:nobody etc/prometheus /prometheus
|
||||
WORKDIR /prometheus
|
||||
RUN ln -s /usr/share/prometheus/console_libraries /usr/share/prometheus/consoles/ /etc/prometheus/ && \
|
||||
chown -R nobody:nobody /etc/prometheus /prometheus
|
||||
|
||||
USER nobody
|
||||
EXPOSE 9090
|
||||
VOLUME [ "/prometheus" ]
|
||||
WORKDIR /prometheus
|
||||
ENTRYPOINT [ "/bin/prometheus" ]
|
||||
CMD [ "--config.file=/etc/prometheus/prometheus.yml", \
|
||||
"--storage.tsdb.path=/prometheus", \
|
||||
|
|
|
@ -464,7 +464,9 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine {
|
|||
}
|
||||
}
|
||||
|
||||
if vm.VirtualMachineProperties != nil && vm.VirtualMachineProperties.OsProfile != nil {
|
||||
if vm.VirtualMachineProperties != nil &&
|
||||
vm.VirtualMachineProperties.OsProfile != nil &&
|
||||
vm.VirtualMachineProperties.OsProfile.ComputerName != nil {
|
||||
computerName = *(vm.VirtualMachineProperties.OsProfile.ComputerName)
|
||||
}
|
||||
|
||||
|
|
|
@ -199,7 +199,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
|||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
||||
wrapper, err := config.NewClientFromConfig(conf.HTTPClientConfig, "consul_sd", config.WithHTTP2Disabled(), config.WithIdleConnTimeout(2*watchTimeout))
|
||||
wrapper, err := config.NewClientFromConfig(conf.HTTPClientConfig, "consul_sd", config.WithIdleConnTimeout(2*watchTimeout))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
|||
port: conf.Port,
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "digitalocean_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "digitalocean_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ type Discovery struct {
|
|||
|
||||
// NewDiscovery creates a new Eureka discovery for the given role.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, er
|
|||
port: conf.Port,
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro
|
|||
endpoint: conf.robotEndpoint,
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -113,7 +113,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
|||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
||||
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http", config.WithHTTP2Disabled())
|
||||
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -283,7 +283,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
|
|||
}
|
||||
level.Info(l).Log("msg", "Using pod service account via in-cluster config")
|
||||
} else {
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -132,7 +132,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
|||
eventPollingEnabled: true,
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "linode_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "linode_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ type Discovery struct {
|
|||
|
||||
// NewDiscovery returns a new Marathon Discovery.
|
||||
func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) {
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger) (*DockerDiscove
|
|||
// unix, which are not supported by the HTTP client. Passing HTTP client
|
||||
// options to the Docker client makes those non-HTTP requests fail.
|
||||
if hostURL.Scheme == "http" || hostURL.Scheme == "https" {
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "docker_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "docker_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -146,7 +146,7 @@ func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger) (*Discovery, err
|
|||
// unix, which are not supported by the HTTP client. Passing HTTP client
|
||||
// options to the Docker client makes those non-HTTP requests fail.
|
||||
if hostURL.Scheme == "http" || hostURL.Scheme == "https" {
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "dockerswarm_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "dockerswarm_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -136,7 +136,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
|||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
||||
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http", config.WithHTTP2Disabled())
|
||||
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ func newBaremetalDiscovery(conf *SDConfig) (*baremetalDiscovery, error) {
|
|||
tagsFilter: conf.TagsFilter,
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "scaleway_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "scaleway_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ func newInstanceDiscovery(conf *SDConfig) (*instanceDiscovery, error) {
|
|||
tagsFilter: conf.TagsFilter,
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "scaleway_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "scaleway_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ func NewHTTPResourceClient(conf *HTTPResourceClientConfig, protocolVersion Proto
|
|||
endpointURL.RawQuery = conf.ExtraQueryParams.Encode()
|
||||
}
|
||||
|
||||
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, conf.Name, config.WithHTTP2Disabled(), config.WithIdleConnTimeout(conf.Timeout))
|
||||
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, conf.Name, config.WithIdleConnTimeout(conf.Timeout))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -1546,6 +1546,29 @@ Available meta labels:
|
|||
* If the endpoints belong to a service, all labels of the `role: service` discovery are attached.
|
||||
* For all targets backed by a pod, all labels of the `role: pod` discovery are attached.
|
||||
|
||||
#### `endpointslice`
|
||||
|
||||
The `endpointslice` role discovers targets from existing endpointslices. For each endpoint
|
||||
address referenced in the endpointslice object one target is discovered. If the endpoint is backed by a pod, all
|
||||
additional container ports of the pod, not bound to an endpoint port, are discovered as targets as well.
|
||||
|
||||
Available meta labels:
|
||||
* `__meta_kubernetes_namespace`: The namespace of the endpoints object.
|
||||
* `__meta_kubernetes_endpointslice_name`: The name of endpointslice object.
|
||||
* For all targets discovered directly from the endpointslice list (those not additionally inferred
|
||||
from underlying pods), the following labels are attached:
|
||||
* `__meta_kubernetes_endpointslice_address_target_kind`: Kind of the referenced object.
|
||||
* `__meta_kubernetes_endpointslice_address_target_name`: Name of referenced object.
|
||||
* `__meta_kubernetes_endpointslice_address_type`: The ip protocol family of the adress target.
|
||||
* `__meta_kubernetes_endpointslice_endpoint_conditions_ready`: Set to `true` or `false` for the referenced endpoint's ready state.
|
||||
* `__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname`: Name of the node hosting the referenced endpoint.
|
||||
* `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: Flag that shows if the referenced object has a kubernetes.io/hostname annotation.
|
||||
* `__meta_kubernetes_endpointslice_port`: Port of the referenced endpoint.
|
||||
* `__meta_kubernetes_endpointslice_port_name`: Named port of the referenced endpoint.
|
||||
* `__meta_kubernetes_endpointslice_port_protocol`: Protocol of the referenced endpoint.
|
||||
* If the endpoints belong to a service, all labels of the `role: service` discovery are attached.
|
||||
* For all targets backed by a pod, all labels of the `role: pod` discovery are attached.
|
||||
|
||||
#### `ingress`
|
||||
|
||||
The `ingress` role discovers a target for each path of each ingress.
|
||||
|
@ -1579,7 +1602,7 @@ See below for the configuration options for Kubernetes discovery:
|
|||
# One of endpoints, service, pod, node, or ingress.
|
||||
role: <string>
|
||||
|
||||
# Optional path to a kubeconfig file.
|
||||
# Optional path to a kubeconfig file.
|
||||
# Note that api_server and kube_config are mutually exclusive.
|
||||
[ kubeconfig_file: <filename> ]
|
||||
|
||||
|
@ -1658,7 +1681,7 @@ inside a Prometheus-enabled mesh.
|
|||
|
||||
The following meta labels are available for each target:
|
||||
|
||||
* `__meta_kuma_mesh`: the name of the proxy's Mesh
|
||||
* `__meta_kuma_mesh`: the name of the proxy's Mesh
|
||||
* `__meta_kuma_dataplane`: the name of the proxy
|
||||
* `__meta_kuma_service`: the name of the proxy's associated Service
|
||||
* `__meta_kuma_label_<tagname>`: each tag of the proxy
|
||||
|
|
|
@ -361,7 +361,7 @@ URL query parameters:
|
|||
- `end=<rfc3339 | unix_timestamp>`: End timestamp.
|
||||
|
||||
```json
|
||||
$ curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metric_total&start=2020-09-14T15:22:25.479Z&end=020-09-14T15:23:25.479Z'
|
||||
$ curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metric_total&start=2020-09-14T15:22:25.479Z&end=2020-09-14T15:23:25.479Z'
|
||||
{
|
||||
"status": "success",
|
||||
"data": [
|
||||
|
|
|
@ -40,6 +40,16 @@ grouping labels becoming the output label set. The metric name is dropped. Entri
|
|||
for which no matching entry in the right-hand vector can be found are not part of
|
||||
the result.
|
||||
|
||||
### Trigonometric binary operators
|
||||
|
||||
The following trigonometric binary operators, which work in radians, exist in Prometheus:
|
||||
|
||||
* `atan2` (based on https://pkg.go.dev/math#Atan2)
|
||||
|
||||
Trigonometric operators allow trigonometric functions to be executed on two vectors using
|
||||
vector matching, which isn't available with normal functions. They act in the same manner
|
||||
as arithmetic operators.
|
||||
|
||||
### Comparison binary operators
|
||||
|
||||
The following binary comparison operators exist in Prometheus:
|
||||
|
@ -264,7 +274,7 @@ The following list shows the precedence of binary operators in Prometheus, from
|
|||
highest to lowest.
|
||||
|
||||
1. `^`
|
||||
2. `*`, `/`, `%`
|
||||
2. `*`, `/`, `%`, `atan2`
|
||||
3. `+`, `-`
|
||||
4. `==`, `!=`, `<=`, `<`, `>=`, `>`
|
||||
5. `and`, `unless`
|
||||
|
|
2
go.mod
2
go.mod
|
@ -46,7 +46,7 @@ require (
|
|||
github.com/prometheus/alertmanager v0.23.0
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.30.0
|
||||
github.com/prometheus/common v0.31.1
|
||||
github.com/prometheus/common/sigv4 v0.1.0
|
||||
github.com/prometheus/exporter-toolkit v0.6.1
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44
|
||||
|
|
3
go.sum
3
go.sum
|
@ -1140,8 +1140,9 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
|
|||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug=
|
||||
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.31.1 h1:d18hG4PkHnNAKNMOmFuXFaiY8Us0nird/2m60uS1AMs=
|
||||
github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
||||
github.com/prometheus/exporter-toolkit v0.6.1 h1:Aqk75wQD92N9CqmTlZwjKwq6272nOGrWIbc8Z7+xQO0=
|
||||
|
|
|
@ -634,7 +634,7 @@ type alertmanagerSet struct {
|
|||
}
|
||||
|
||||
func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger log.Logger, metrics *alertMetrics) (*alertmanagerSet, error) {
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager", config_util.WithHTTP2Disabled())
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ func TestHandlerSendAll(t *testing.T) {
|
|||
Username: "prometheus",
|
||||
Password: "testing_password",
|
||||
},
|
||||
}, "auth_alertmanager", config_util.WithHTTP2Disabled())
|
||||
}, "auth_alertmanager")
|
||||
|
||||
h.alertmanagers = make(map[string]*alertmanagerSet)
|
||||
|
||||
|
|
|
@ -2116,6 +2116,8 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64) (float64, bool) {
|
|||
return lhs, lhs >= rhs
|
||||
case parser.LTE:
|
||||
return lhs, lhs <= rhs
|
||||
case parser.ATAN2:
|
||||
return math.Atan2(lhs, rhs), true
|
||||
}
|
||||
panic(errors.Errorf("operator %q not allowed for operations between Vectors", op))
|
||||
}
|
||||
|
|
|
@ -84,6 +84,7 @@ NEQ_REGEX
|
|||
POW
|
||||
SUB
|
||||
AT
|
||||
ATAN2
|
||||
%token operatorsEnd
|
||||
|
||||
// Aggregators.
|
||||
|
@ -156,7 +157,7 @@ START_METRIC_SELECTOR
|
|||
%left LAND LUNLESS
|
||||
%left EQLC GTE GTR LSS LTE NEQ
|
||||
%left ADD SUB
|
||||
%left MUL DIV MOD
|
||||
%left MUL DIV MOD ATAN2
|
||||
%right POW
|
||||
|
||||
// Offset modifiers do not have associativity.
|
||||
|
@ -237,6 +238,7 @@ aggregate_modifier:
|
|||
|
||||
// Operator precedence only works if each of those is listed separately.
|
||||
binary_expr : expr ADD bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
|
||||
| expr ATAN2 bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
|
||||
| expr DIV bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
|
||||
| expr EQLC bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
|
||||
| expr GTE bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
|
||||
|
@ -674,7 +676,7 @@ series_value : IDENTIFIER
|
|||
aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK ;
|
||||
|
||||
// inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name.
|
||||
maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END;
|
||||
maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2;
|
||||
|
||||
unary_op : ADD | SUB;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -97,6 +97,7 @@ var key = map[string]ItemType{
|
|||
"and": LAND,
|
||||
"or": LOR,
|
||||
"unless": LUNLESS,
|
||||
"atan2": ATAN2,
|
||||
|
||||
// Aggregators.
|
||||
"sum": SUM,
|
||||
|
|
|
@ -340,6 +340,10 @@ var tests = []struct {
|
|||
input: "bool",
|
||||
expected: []Item{{BOOL, 0, "bool"}},
|
||||
},
|
||||
{
|
||||
input: "atan2",
|
||||
expected: []Item{{ATAN2, 0, "atan2"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
14
promql/testdata/operators.test
vendored
14
promql/testdata/operators.test
vendored
|
@ -467,3 +467,17 @@ eval instant at 5m test_total < bool test_smaller
|
|||
{instance="localhost"} 0
|
||||
|
||||
eval instant at 5m test_total < test_smaller
|
||||
|
||||
clear
|
||||
|
||||
# Testing atan2.
|
||||
load 5m
|
||||
trigy{} 10
|
||||
trigx{} 20
|
||||
trigNaN{} NaN
|
||||
|
||||
eval instant at 5m trigy atan2 trigx
|
||||
trigy{} 0.4636476090008061
|
||||
|
||||
eval instant at 5m trigy atan2 trigNaN
|
||||
trigy{} NaN
|
||||
|
|
|
@ -269,7 +269,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
|
|||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, config_util.WithHTTP2Disabled())
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName)
|
||||
if err != nil {
|
||||
targetScrapePoolsFailed.Inc()
|
||||
return nil, errors.Wrap(err, "error creating HTTP client")
|
||||
|
@ -380,7 +380,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
targetScrapePoolReloads.Inc()
|
||||
start := time.Now()
|
||||
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, config_util.WithHTTP2Disabled())
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName)
|
||||
if err != nil {
|
||||
targetScrapePoolReloadsFailed.Inc()
|
||||
return errors.Wrap(err, "error creating HTTP client")
|
||||
|
|
|
@ -149,7 +149,7 @@ func TestNewHTTPBearerToken(t *testing.T) {
|
|||
cfg := config_util.HTTPClientConfig{
|
||||
BearerToken: "1234",
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", config_util.WithHTTP2Disabled())
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ func TestNewHTTPBearerTokenFile(t *testing.T) {
|
|||
cfg := config_util.HTTPClientConfig{
|
||||
BearerTokenFile: "testdata/bearertoken.txt",
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", config_util.WithHTTP2Disabled())
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ func TestNewHTTPBasicAuth(t *testing.T) {
|
|||
Password: "password123",
|
||||
},
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", config_util.WithHTTP2Disabled())
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -233,7 +233,7 @@ func TestNewHTTPCACert(t *testing.T) {
|
|||
CAFile: caCertPath,
|
||||
},
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", config_util.WithHTTP2Disabled())
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ func TestNewHTTPClientCert(t *testing.T) {
|
|||
KeyFile: "testdata/client.key",
|
||||
},
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", config_util.WithHTTP2Disabled())
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -295,7 +295,7 @@ func TestNewHTTPWithServerName(t *testing.T) {
|
|||
ServerName: "prometheus.rocks",
|
||||
},
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", config_util.WithHTTP2Disabled())
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -324,7 +324,7 @@ func TestNewHTTPWithBadServerName(t *testing.T) {
|
|||
ServerName: "badname",
|
||||
},
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", config_util.WithHTTP2Disabled())
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -362,7 +362,7 @@ func TestNewClientWithBadTLSConfig(t *testing.T) {
|
|||
KeyFile: "testdata/nonexistent_client.key",
|
||||
},
|
||||
}
|
||||
_, err := config_util.NewClientFromConfig(cfg, "test", config_util.WithHTTP2Disabled())
|
||||
_, err := config_util.NewClientFromConfig(cfg, "test")
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error, got nil.")
|
||||
}
|
||||
|
|
135
scripts/sync_codemirror.sh
Executable file
135
scripts/sync_codemirror.sh
Executable file
|
@ -0,0 +1,135 @@
|
|||
#!/usr/bin/env bash
|
||||
# vim: ts=2 et
|
||||
# Setting -x is absolutely forbidden as it could leak the GitHub token.
|
||||
set -uo pipefail
|
||||
|
||||
# GITHUB_TOKEN required scope: repo.repo_public
|
||||
|
||||
git_mail="prometheus-team@googlegroups.com"
|
||||
git_user="prombot"
|
||||
branch="repo_sync_codemirror"
|
||||
commit_msg="Update codemirror"
|
||||
pr_title="Synchronize codemirror from prometheus/prometheus"
|
||||
pr_msg="Propagating changes from prometheus/prometheus default branch."
|
||||
target_repo="prometheus-community/codemirror-promql"
|
||||
source_path="web/ui/module/codemirror-promql"
|
||||
|
||||
color_red='\e[31m'
|
||||
color_green='\e[32m'
|
||||
color_yellow='\e[33m'
|
||||
color_none='\e[0m'
|
||||
|
||||
echo_red() {
|
||||
echo -e "${color_red}$@${color_none}" 1>&2
|
||||
}
|
||||
|
||||
echo_green() {
|
||||
echo -e "${color_green}$@${color_none}" 1>&2
|
||||
}
|
||||
|
||||
echo_yellow() {
|
||||
echo -e "${color_yellow}$@${color_none}" 1>&2
|
||||
}
|
||||
|
||||
GITHUB_TOKEN="${GITHUB_TOKEN:-}"
|
||||
if [ -z "${GITHUB_TOKEN}" ]; then
|
||||
echo_red 'GitHub token (GITHUB_TOKEN) not set. Terminating.'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# List of files that should not be synced.
|
||||
excluded_files="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint MAINTAINERS.md"
|
||||
excluded_dirs=".github .circleci"
|
||||
|
||||
# Go to the root of the repo
|
||||
cd "$(git rev-parse --show-cdup)" || exit 1
|
||||
|
||||
source_dir="$(pwd)/${source_path}"
|
||||
|
||||
tmp_dir="$(mktemp -d)"
|
||||
trap 'rm -rf "${tmp_dir}"' EXIT
|
||||
|
||||
## Internal functions
|
||||
github_api() {
|
||||
local url
|
||||
url="https://api.github.com/${1}"
|
||||
shift 1
|
||||
curl --retry 5 --silent --fail -u "${git_user}:${GITHUB_TOKEN}" "${url}" "$@"
|
||||
}
|
||||
|
||||
get_default_branch() {
|
||||
github_api "repos/${1}" 2> /dev/null |
|
||||
jq -r .default_branch
|
||||
}
|
||||
|
||||
push_branch() {
|
||||
local git_url
|
||||
git_url="https://${git_user}:${GITHUB_TOKEN}@github.com/${1}"
|
||||
# stdout and stderr are redirected to /dev/null otherwise git-push could leak
|
||||
# the token in the logs.
|
||||
# Delete the remote branch in case it was merged but not deleted.
|
||||
git push --quiet "${git_url}" ":${branch}" 1>/dev/null 2>&1
|
||||
git push --quiet "${git_url}" --set-upstream "${branch}" 1>/dev/null 2>&1
|
||||
}
|
||||
|
||||
post_pull_request() {
|
||||
local repo="$1"
|
||||
local default_branch="$2"
|
||||
local post_json
|
||||
post_json="$(printf '{"title":"%s","base":"%s","head":"%s","body":"%s"}' "${pr_title}" "${default_branch}" "${branch}" "${pr_msg}")"
|
||||
echo "Posting PR to ${default_branch} on ${repo}"
|
||||
github_api "repos/${repo}/pulls" --data "${post_json}" --show-error |
|
||||
jq -r '"PR URL " + .html_url'
|
||||
}
|
||||
|
||||
process_repo() {
|
||||
local org_repo
|
||||
local default_branch
|
||||
org_repo="$1"
|
||||
mkdir -p "${tmp_dir}/${org_repo}"
|
||||
echo_green "Processing '${org_repo}'"
|
||||
|
||||
default_branch="$(get_default_branch "${org_repo}")"
|
||||
if [[ -z "${default_branch}" ]]; then
|
||||
echo "Can't get the default branch."
|
||||
return
|
||||
fi
|
||||
echo "Default branch: ${default_branch}"
|
||||
|
||||
# Clone target repo to temporary directory and checkout to new branch
|
||||
git clone --quiet "https://github.com/${org_repo}.git" "${tmp_dir}/${org_repo}"
|
||||
cd "${tmp_dir}/${org_repo}" || return 1
|
||||
git checkout -b "${branch}" || return 1
|
||||
|
||||
git rm -r .
|
||||
|
||||
cp -ra ${source_dir}/. .
|
||||
git add .
|
||||
|
||||
for excluded_dir in ${excluded_dirs}; do
|
||||
git reset -- "${excluded_dir}/*"
|
||||
git checkout -- "${excluded_dir}/*"
|
||||
done
|
||||
|
||||
for excluded_file in ${excluded_files}; do
|
||||
git reset -- "${excluded_file}"
|
||||
git checkout -- "${excluded_file}"
|
||||
done
|
||||
|
||||
if [[ -n "$(git status --porcelain)" ]]; then
|
||||
git config user.email "${git_mail}"
|
||||
git config user.name "${git_user}"
|
||||
git add .
|
||||
git commit -s -m "${commit_msg}"
|
||||
if push_branch "${org_repo}"; then
|
||||
if ! post_pull_request "${org_repo}" "${default_branch}"; then
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
echo "Pushing ${branch} to ${org_repo} failed"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
process_repo ${target_repo}
|
|
@ -18,7 +18,6 @@ import (
|
|||
"container/heap"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
@ -197,15 +196,13 @@ func mergeStrings(a, b []string) []string {
|
|||
res := make([]string, 0, maxl*10/9)
|
||||
|
||||
for len(a) > 0 && len(b) > 0 {
|
||||
d := strings.Compare(a[0], b[0])
|
||||
|
||||
if d == 0 {
|
||||
if a[0] == b[0] {
|
||||
res = append(res, a[0])
|
||||
a, b = a[1:], b[1:]
|
||||
} else if d < 0 {
|
||||
} else if a[0] < b[0] {
|
||||
res = append(res, a[0])
|
||||
a = a[1:]
|
||||
} else if d > 0 {
|
||||
} else {
|
||||
res = append(res, b[0])
|
||||
b = b[1:]
|
||||
}
|
||||
|
|
|
@ -110,7 +110,7 @@ type ReadClient interface {
|
|||
|
||||
// NewReadClient creates a new client for remote read.
|
||||
func NewReadClient(name string, conf *ClientConfig) (ReadClient, error) {
|
||||
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_read_client", config_util.WithHTTP2Disabled())
|
||||
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_read_client")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -136,7 +136,7 @@ func NewReadClient(name string, conf *ClientConfig) (ReadClient, error) {
|
|||
|
||||
// NewWriteClient creates a new client for remote write.
|
||||
func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
|
||||
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_write_client", config_util.WithHTTP2Disabled())
|
||||
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_write_client")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/exemplar"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/textparse"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
|
@ -450,6 +451,17 @@ func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, erro
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func exemplarProtoToExemplar(ep prompb.Exemplar) exemplar.Exemplar {
|
||||
timestamp := ep.Timestamp
|
||||
|
||||
return exemplar.Exemplar{
|
||||
Labels: labelProtosToLabels(ep.Labels),
|
||||
Value: ep.Value,
|
||||
Ts: timestamp,
|
||||
HasTs: timestamp != 0,
|
||||
}
|
||||
}
|
||||
|
||||
// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric
|
||||
func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
|
||||
metric := make(model.Metric, len(labelPairs))
|
||||
|
|
|
@ -36,7 +36,8 @@ var writeRequestFixture = &prompb.WriteRequest{
|
|||
{Name: "d", Value: "e"},
|
||||
{Name: "foo", Value: "bar"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
|
||||
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
|
||||
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
|
@ -46,7 +47,8 @@ var writeRequestFixture = &prompb.WriteRequest{
|
|||
{Name: "d", Value: "e"},
|
||||
{Name: "foo", Value: "bar"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
|
||||
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
|
||||
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -158,7 +158,10 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
|||
continue
|
||||
}
|
||||
|
||||
endpoint := rwConf.URL.String()
|
||||
// Redacted to remove any passwords in the URL (that are
|
||||
// technically accepted but not recommended) since this is
|
||||
// only used for metric labels.
|
||||
endpoint := rwConf.URL.Redacted()
|
||||
newQueues[hash] = NewQueueManager(
|
||||
newQueueManagerMetrics(rws.reg, name, endpoint),
|
||||
rws.watcherMetrics,
|
||||
|
|
|
@ -15,10 +15,14 @@ package remote
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/exemplar"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
)
|
||||
|
@ -62,16 +66,35 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// checkAppendExemplarError modifies the AppendExamplar's returned error based on the error cause.
|
||||
func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, outOfOrderErrs *int) error {
|
||||
switch errors.Cause(err) {
|
||||
case storage.ErrNotFound:
|
||||
return storage.ErrNotFound
|
||||
case storage.ErrOutOfOrderExemplar:
|
||||
*outOfOrderErrs++
|
||||
level.Debug(h.logger).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e))
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) {
|
||||
var (
|
||||
outOfOrderExemplarErrs = 0
|
||||
)
|
||||
|
||||
app := h.appendable.Appender(ctx)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
app.Rollback()
|
||||
_ = app.Rollback()
|
||||
return
|
||||
}
|
||||
err = app.Commit()
|
||||
}()
|
||||
|
||||
var exemplarErr error
|
||||
for _, ts := range req.Timeseries {
|
||||
labels := labelProtosToLabels(ts.Labels)
|
||||
for _, s := range ts.Samples {
|
||||
|
@ -79,7 +102,23 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for _, ep := range ts.Exemplars {
|
||||
e := exemplarProtoToExemplar(ep)
|
||||
|
||||
_, exemplarErr = app.AppendExemplar(0, labels, e)
|
||||
exemplarErr = h.checkAppendExemplarError(exemplarErr, e, &outOfOrderExemplarErrs)
|
||||
if exemplarErr != nil {
|
||||
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors.
|
||||
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if outOfOrderExemplarErrs > 0 {
|
||||
_ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -23,11 +23,12 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/exemplar"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRemoteWriteHandler(t *testing.T) {
|
||||
|
@ -47,16 +48,23 @@ func TestRemoteWriteHandler(t *testing.T) {
|
|||
require.Equal(t, http.StatusNoContent, resp.StatusCode)
|
||||
|
||||
i := 0
|
||||
j := 0
|
||||
for _, ts := range writeRequestFixture.Timeseries {
|
||||
labels := labelProtosToLabels(ts.Labels)
|
||||
for _, s := range ts.Samples {
|
||||
require.Equal(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i])
|
||||
i++
|
||||
}
|
||||
|
||||
for _, e := range ts.Exemplars {
|
||||
exemplarLabels := labelProtosToLabels(e.Labels)
|
||||
require.Equal(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
|
||||
j++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutOfOrder(t *testing.T) {
|
||||
func TestOutOfOrderSample(t *testing.T) {
|
||||
buf, _, err := buildWriteRequest([]prompb.TimeSeries{{
|
||||
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
||||
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
|
||||
|
@ -67,7 +75,7 @@ func TestOutOfOrder(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
appendable := &mockAppendable{
|
||||
latest: 100,
|
||||
latestSample: 100,
|
||||
}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), appendable)
|
||||
|
||||
|
@ -78,6 +86,32 @@ func TestOutOfOrder(t *testing.T) {
|
|||
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
||||
}
|
||||
|
||||
// This test case currently aims to verify that the WriteHandler endpoint
|
||||
// don't fail on ingestion errors since the exemplar storage is
|
||||
// still experimental.
|
||||
func TestOutOfOrderExemplar(t *testing.T) {
|
||||
buf, _, err := buildWriteRequest([]prompb.TimeSeries{{
|
||||
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
||||
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "foo", Value: "bar"}}, Value: 1, Timestamp: 0}},
|
||||
}}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
||||
require.NoError(t, err)
|
||||
|
||||
appendable := &mockAppendable{
|
||||
latestExemplar: 100,
|
||||
}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), appendable)
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
||||
resp := recorder.Result()
|
||||
// TODO: update to require.Equal(t, http.StatusConflict, resp.StatusCode) once exemplar storage is not experimental.
|
||||
require.Equal(t, http.StatusNoContent, resp.StatusCode)
|
||||
}
|
||||
|
||||
func TestCommitErr(t *testing.T) {
|
||||
buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
@ -101,9 +135,11 @@ func TestCommitErr(t *testing.T) {
|
|||
}
|
||||
|
||||
type mockAppendable struct {
|
||||
latest int64
|
||||
samples []mockSample
|
||||
commitErr error
|
||||
latestSample int64
|
||||
samples []mockSample
|
||||
latestExemplar int64
|
||||
exemplars []mockExemplar
|
||||
commitErr error
|
||||
}
|
||||
|
||||
type mockSample struct {
|
||||
|
@ -112,16 +148,23 @@ type mockSample struct {
|
|||
v float64
|
||||
}
|
||||
|
||||
type mockExemplar struct {
|
||||
l labels.Labels
|
||||
el labels.Labels
|
||||
t int64
|
||||
v float64
|
||||
}
|
||||
|
||||
func (m *mockAppendable) Appender(_ context.Context) storage.Appender {
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockAppendable) Append(_ uint64, l labels.Labels, t int64, v float64) (uint64, error) {
|
||||
if t < m.latest {
|
||||
if t < m.latestSample {
|
||||
return 0, storage.ErrOutOfOrderSample
|
||||
}
|
||||
|
||||
m.latest = t
|
||||
m.latestSample = t
|
||||
m.samples = append(m.samples, mockSample{l, t, v})
|
||||
return 0, nil
|
||||
}
|
||||
|
@ -134,7 +177,12 @@ func (*mockAppendable) Rollback() error {
|
|||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (*mockAppendable) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) {
|
||||
// noop until we implement exemplars over remote write
|
||||
func (m *mockAppendable) AppendExemplar(_ uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) {
|
||||
if e.Ts < m.latestExemplar {
|
||||
return 0, storage.ErrOutOfOrderExemplar
|
||||
}
|
||||
|
||||
m.latestExemplar = e.Ts
|
||||
m.exemplars = append(m.exemplars, mockExemplar{l, e.Labels, e.Ts, e.Value})
|
||||
return 0, nil
|
||||
}
|
||||
|
|
|
@ -17,4 +17,6 @@ A series of blog posts explaining different components of TSDB:
|
|||
* [WAL and Checkpoint](https://ganeshvernekar.com/blog/prometheus-tsdb-wal-and-checkpoint/)
|
||||
* [Memory Mapping of Head Chunks from Disk](https://ganeshvernekar.com/blog/prometheus-tsdb-mmapping-head-chunks-from-disk/)
|
||||
* [Persistent Block and its Index](https://ganeshvernekar.com/blog/prometheus-tsdb-persistent-block-and-its-index/)
|
||||
* [Queries](https://ganeshvernekar.com/blog/prometheus-tsdb-queries/)
|
||||
* [Queries](https://ganeshvernekar.com/blog/prometheus-tsdb-queries/)
|
||||
* [Compaction and Retention](https://ganeshvernekar.com/blog/prometheus-tsdb-compaction-and-retention/)
|
||||
* [Snapshot on Shutdown](https://ganeshvernekar.com/blog/prometheus-tsdb-snapshot-on-shutdown/)
|
||||
|
|
15
tsdb/head.go
15
tsdb/head.go
|
@ -176,6 +176,10 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, opts *HeadOpti
|
|||
stats = NewHeadStats()
|
||||
}
|
||||
|
||||
if !opts.EnableExemplarStorage {
|
||||
opts.MaxExemplars.Store(0)
|
||||
}
|
||||
|
||||
h := &Head{
|
||||
wal: wal,
|
||||
logger: l,
|
||||
|
@ -211,7 +215,16 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, opts *HeadOpti
|
|||
|
||||
func (h *Head) resetInMemoryState() error {
|
||||
var err error
|
||||
em := NewExemplarMetrics(h.reg)
|
||||
var em *ExemplarMetrics
|
||||
if h.exemplars != nil {
|
||||
ce, ok := h.exemplars.(*CircularExemplarStorage)
|
||||
if ok {
|
||||
em = ce.metrics
|
||||
}
|
||||
}
|
||||
if em == nil {
|
||||
em = NewExemplarMetrics(h.reg)
|
||||
}
|
||||
es, err := NewCircularExemplarStorage(h.opts.MaxExemplars.Load(), em)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
|
@ -2821,7 +2822,16 @@ func TestChunkSnapshot(t *testing.T) {
|
|||
|
||||
// Test the replay of snapshot.
|
||||
head.opts.EnableMemorySnapshotOnShutdown = true // Enabled to read from snapshot.
|
||||
|
||||
// Disabling exemplars to check that it does not hard fail replay
|
||||
// https://github.com/prometheus/prometheus/issues/9437#issuecomment-933285870.
|
||||
head.opts.EnableExemplarStorage = false
|
||||
head.opts.MaxExemplars.Store(0)
|
||||
expExemplars = expExemplars[:0]
|
||||
|
||||
openHeadAndCheckReplay()
|
||||
|
||||
require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal))
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -2873,7 +2883,8 @@ func TestSnapshotError(t *testing.T) {
|
|||
// Create new Head which should replay this snapshot.
|
||||
w, err := wal.NewSize(nil, nil, head.wal.Dir(), 32768, false)
|
||||
require.NoError(t, err)
|
||||
head, err = NewHead(nil, nil, w, head.opts, nil)
|
||||
// Testing https://github.com/prometheus/prometheus/issues/9437 with the registry.
|
||||
head, err = NewHead(prometheus.NewRegistry(), nil, w, head.opts, nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, head.Init(math.MinInt64))
|
||||
|
||||
|
|
|
@ -47,6 +47,8 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks
|
|||
// for error reporting.
|
||||
var unknownRefs atomic.Uint64
|
||||
var unknownExemplarRefs atomic.Uint64
|
||||
// Track number of series records that had overlapping m-map chunks.
|
||||
var mmapOverlappingChunks uint64
|
||||
|
||||
// Start workers that each process samples for a partition of the series ID space.
|
||||
// They are connected through a ring of channels which ensures that all sample batches
|
||||
|
@ -242,8 +244,6 @@ Outer:
|
|||
}
|
||||
|
||||
// Checking if the new m-mapped chunks overlap with the already existing ones.
|
||||
// This should never happen, but we have a check anyway to detect any
|
||||
// edge cases that we might have missed.
|
||||
if len(mSeries.mmappedChunks) > 0 && len(mmc) > 0 {
|
||||
if overlapsClosedInterval(
|
||||
mSeries.mmappedChunks[0].minTime,
|
||||
|
@ -251,9 +251,17 @@ Outer:
|
|||
mmc[0].minTime,
|
||||
mmc[len(mmc)-1].maxTime,
|
||||
) {
|
||||
// The m-map chunks for the new series ref overlaps with old m-map chunks.
|
||||
seriesCreationErr = errors.Errorf("overlapping m-mapped chunks for series %s", mSeries.lset.String())
|
||||
break Outer
|
||||
mmapOverlappingChunks++
|
||||
level.Debug(h.logger).Log(
|
||||
"msg", "M-mapped chunks overlap on a duplicate series record",
|
||||
"series", mSeries.lset.String(),
|
||||
"oldref", mSeries.ref,
|
||||
"oldmint", mSeries.mmappedChunks[0].minTime,
|
||||
"oldmaxt", mSeries.mmappedChunks[len(mSeries.mmappedChunks)-1].maxTime,
|
||||
"newref", walSeries.Ref,
|
||||
"newmint", mmc[0].minTime,
|
||||
"newmaxt", mmc[len(mmc)-1].maxTime,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -352,6 +360,9 @@ Outer:
|
|||
if unknownRefs.Load() > 0 || unknownExemplarRefs.Load() > 0 {
|
||||
level.Warn(h.logger).Log("msg", "Unknown series references", "samples", unknownRefs.Load(), "exemplars", unknownExemplarRefs.Load())
|
||||
}
|
||||
if mmapOverlappingChunks > 0 {
|
||||
level.Info(h.logger).Log("msg", "Overlapping m-map chunks on duplicate series records", "count", mmapOverlappingChunks)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -938,6 +949,11 @@ Outer:
|
|||
}
|
||||
}
|
||||
|
||||
if !h.opts.EnableExemplarStorage || h.opts.MaxExemplars.Load() <= 0 {
|
||||
// Exemplar storage is disabled.
|
||||
continue Outer
|
||||
}
|
||||
|
||||
decbuf := encoding.Decbuf{B: rec[1:]}
|
||||
|
||||
exemplarBuf = exemplarBuf[:0]
|
||||
|
@ -959,7 +975,7 @@ Outer:
|
|||
Value: e.V,
|
||||
Ts: e.T,
|
||||
}); err != nil {
|
||||
loopErr = errors.Wrap(err, "append exemplar")
|
||||
loopErr = errors.Wrap(err, "add exemplar")
|
||||
break Outer
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
"encoding/binary"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
|
@ -94,8 +93,8 @@ func (p *MemPostings) SortedKeys() []labels.Label {
|
|||
p.mtx.RUnlock()
|
||||
|
||||
sort.Slice(keys, func(i, j int) bool {
|
||||
if d := strings.Compare(keys[i].Name, keys[j].Name); d != 0 {
|
||||
return d < 0
|
||||
if keys[i].Name != keys[j].Name {
|
||||
return keys[i].Name < keys[j].Name
|
||||
}
|
||||
return keys[i].Value < keys[j].Value
|
||||
})
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
CodeMirror-promql
|
||||
=================
|
||||
[](https://circleci.com/gh/prometheus-community/codemirror-promql) [](./LICENSE)
|
||||
[](https://circleci.com/gh/prometheus-community/codemirror-promql) [](./LICENSE)
|
||||
[](https://www.npmjs.org/package/codemirror-promql) [](https://codecov.io/gh/prometheus-community/codemirror-promql)
|
||||
|
||||
## Overview
|
||||
|
@ -8,6 +8,14 @@ CodeMirror-promql
|
|||
This project provides a mode for [CodeMirror Next](https://codemirror.net/6) that handles syntax highlighting, linting
|
||||
and autocompletion for PromQL ([Prometheus Query Language](https://prometheus.io/docs/introduction/overview/)).
|
||||
|
||||

|
||||
|
||||
## Where does it come from?
|
||||
|
||||
The authoritative copy of this code lives in `prometheus/prometheus` and is synced to
|
||||
`prometheus-community/codemirror-promql` on a regular basis by a bot. Please contribute any code changes to the code
|
||||
in https://github.com/prometheus/prometheus/tree/main/web/ui/module/codemirror-promql.
|
||||
|
||||
### Installation
|
||||
|
||||
This mode is available as a npm package:
|
||||
|
@ -38,15 +46,6 @@ npm install --save @codemirror/autocomplete @codemirror/highlight @codemirror/la
|
|||
npm install --save @codemirror/basic-setup
|
||||
```
|
||||
|
||||
### Playground
|
||||
|
||||
[Here](https://codemirror-promql.netlify.app/) you have a playground available that is deployed from the latest commit
|
||||
available on the `master` branch.
|
||||
|
||||
Here is a short preview of it looks like currently:
|
||||
|
||||

|
||||
|
||||
## Usage
|
||||
|
||||
As the setup of the PromQL language can a bit tricky in CMN, this lib provides a class `PromQLExtension`
|
||||
|
@ -172,6 +171,16 @@ You can change it to use the HTTP method `GET` if you prefer.
|
|||
const promQL = new PromQLExtension().setComplete({ remote: { httpMethod: 'GET' } })
|
||||
```
|
||||
|
||||
###### Override the API Prefix
|
||||
|
||||
The default Prometheus Client, when building the query to get data from Prometheus, is using an API prefix which is by default `/api/v1`.
|
||||
|
||||
You can override this value like this:
|
||||
|
||||
```typescript
|
||||
const promql = new PromQLExtension().setComplete({ remote: { apiPrefix: '/my/api/prefix' } })
|
||||
```
|
||||
|
||||
###### Cache
|
||||
|
||||
The default client has an embedded cache that is used to store the different metrics and labels retrieved from a remote
|
||||
|
@ -232,34 +241,9 @@ Note: In case this parameter is provided, then the rest of the configuration is
|
|||
|
||||
### Example
|
||||
|
||||
* The development [app](./src/app) can give you an example of how to use it with no TS Framework.
|
||||
* [ReactJS example](https://github.com/prometheus/prometheus/blob/431ea75a11ca165dad9dd5d629b3cf975f4c186b/web/ui/react-app/src/pages/graph/CMExpressionInput.tsx)
|
||||
* [Angular example](https://github.com/perses/perses/blob/28b3bdac88b0ed7a4602f9c91106442eafcb6c34/internal/api/front/perses/src/app/project/prometheusrule/promql-editor/promql-editor.component.ts)
|
||||
|
||||
## Contributions
|
||||
|
||||
Any contribution or suggestion would be really appreciated. Feel free
|
||||
to [file an issue](https://github.com/prometheus-community/codemirror-promql/issues)
|
||||
or [send a pull request](https://github.com/prometheus-community/codemirror-promql/pulls).
|
||||
|
||||
## Development
|
||||
|
||||
In case you want to contribute and change the code by yourself, run the following commands:
|
||||
|
||||
To install all dependencies:
|
||||
|
||||
```
|
||||
npm install
|
||||
```
|
||||
|
||||
To start the web server:
|
||||
|
||||
```
|
||||
npm start
|
||||
```
|
||||
|
||||
This should create a tab in your browser with the development app that contains CodeMirror Next with the PromQL plugin.
|
||||
|
||||
## License
|
||||
|
||||
[MIT](./LICENSE)
|
||||
Apache License 2.0, see [LICENSE](https://github.com/prometheus-community/codemirror-promql/blob/master/LICENSE).
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
"prometheus"
|
||||
],
|
||||
"author": "Prometheus Authors <prometheus-developers@googlegroups.com>",
|
||||
"license": "MIT",
|
||||
"license": "Apache-2.0",
|
||||
"bugs": {
|
||||
"url": "https://github.com/prometheus-community/codemirror-promql/issues"
|
||||
},
|
||||
|
@ -34,13 +34,15 @@
|
|||
"lru-cache": "^6.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@codemirror/autocomplete": "^0.18.3",
|
||||
"@codemirror/basic-setup": "^0.18.0",
|
||||
"@codemirror/highlight": "^0.18.3",
|
||||
"@codemirror/language": "^0.18.0",
|
||||
"@codemirror/lint": "^0.18.1",
|
||||
"@codemirror/state": "^0.18.2",
|
||||
"@codemirror/view": "^0.18.1",
|
||||
"@codemirror/autocomplete": "^0.19.3",
|
||||
"@codemirror/basic-setup": "^0.19.0",
|
||||
"@codemirror/highlight": "^0.19.5",
|
||||
"@codemirror/language": "^0.19.3",
|
||||
"@codemirror/lint": "^0.19.1",
|
||||
"@codemirror/state": "^0.19.2",
|
||||
"@codemirror/view": "^0.19.7",
|
||||
"@lezer/common": "^0.15.5",
|
||||
"@lezer/generator": "^0.15.1",
|
||||
"@types/chai": "^4.2.12",
|
||||
"@types/lru-cache": "^5.1.0",
|
||||
"@types/mocha": "^8.0.3",
|
||||
|
@ -55,8 +57,6 @@
|
|||
"eslint-plugin-import": "^2.24.2",
|
||||
"eslint-plugin-prettier": "^4.0.0",
|
||||
"isomorphic-fetch": "^3.0.0",
|
||||
"lezer": "^0.13.1",
|
||||
"lezer-generator": "^0.13.1",
|
||||
"mocha": "^8.1.2",
|
||||
"nock": "^13.0.11",
|
||||
"nyc": "^15.1.0",
|
||||
|
@ -67,13 +67,13 @@
|
|||
"typescript": "^4.2.3"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@codemirror/autocomplete": "^0.18.3",
|
||||
"@codemirror/highlight": "^0.18.3",
|
||||
"@codemirror/language": "^0.18.0",
|
||||
"@codemirror/lint": "^0.18.1",
|
||||
"@codemirror/state": "^0.18.2",
|
||||
"@codemirror/view": "^0.18.1",
|
||||
"lezer": "^0.13.0"
|
||||
"@codemirror/autocomplete": "^0.19.3",
|
||||
"@codemirror/highlight": "^0.19.5",
|
||||
"@codemirror/language": "^0.19.3",
|
||||
"@codemirror/lint": "^0.19.1",
|
||||
"@codemirror/state": "^0.19.2",
|
||||
"@codemirror/view": "^0.19.7",
|
||||
"@lezer/common": "^0.15.5"
|
||||
},
|
||||
"prettier": {
|
||||
"singleQuote": true,
|
||||
|
|
|
@ -16,12 +16,6 @@ import { Matcher } from '../types';
|
|||
import { labelMatchersToString } from '../parser';
|
||||
import LRUCache from 'lru-cache';
|
||||
|
||||
const apiPrefix = '/api/v1';
|
||||
const labelsEndpoint = apiPrefix + '/labels';
|
||||
const labelValuesEndpoint = apiPrefix + '/label/:name/values';
|
||||
const seriesEndpoint = apiPrefix + '/series';
|
||||
const metricMetadataEndpoint = apiPrefix + '/metadata';
|
||||
|
||||
export interface MetricMetadata {
|
||||
type: string;
|
||||
help: string;
|
||||
|
@ -61,6 +55,7 @@ export interface PrometheusConfig {
|
|||
// cache will allow user to change the configuration of the cached Prometheus client (which is used by default)
|
||||
cache?: CacheConfig;
|
||||
httpMethod?: 'POST' | 'GET';
|
||||
apiPrefix?: string;
|
||||
}
|
||||
|
||||
interface APIResponse<T> {
|
||||
|
@ -83,6 +78,7 @@ export class HTTPPrometheusClient implements PrometheusClient {
|
|||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
private readonly errorHandler?: (error: any) => void;
|
||||
private readonly httpMethod: 'POST' | 'GET' = 'POST';
|
||||
private readonly apiPrefix: string = '/api/v1';
|
||||
// For some reason, just assigning via "= fetch" here does not end up executing fetch correctly
|
||||
// when calling it, thus the indirection via another function wrapper.
|
||||
private readonly fetchFn: FetchFn = (input: RequestInfo, init?: RequestInit): Promise<Response> => fetch(input, init);
|
||||
|
@ -99,6 +95,9 @@ export class HTTPPrometheusClient implements PrometheusClient {
|
|||
if (config.httpMethod) {
|
||||
this.httpMethod = config.httpMethod;
|
||||
}
|
||||
if (config.apiPrefix) {
|
||||
this.apiPrefix = config.apiPrefix;
|
||||
}
|
||||
}
|
||||
|
||||
labelNames(metricName?: string): Promise<string[]> {
|
||||
|
@ -106,7 +105,7 @@ export class HTTPPrometheusClient implements PrometheusClient {
|
|||
const start = new Date(end.getTime() - this.lookbackInterval);
|
||||
if (metricName === undefined || metricName === '') {
|
||||
const request = this.buildRequest(
|
||||
labelsEndpoint,
|
||||
this.labelsEndpoint(),
|
||||
new URLSearchParams({
|
||||
start: start.toISOString(),
|
||||
end: end.toISOString(),
|
||||
|
@ -150,7 +149,7 @@ export class HTTPPrometheusClient implements PrometheusClient {
|
|||
end: end.toISOString(),
|
||||
});
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
|
||||
return this.fetchAPI<string[]>(`${labelValuesEndpoint.replace(/:name/gi, labelName)}?${params}`).catch((error) => {
|
||||
return this.fetchAPI<string[]>(`${this.labelValuesEndpoint().replace(/:name/gi, labelName)}?${params}`).catch((error) => {
|
||||
if (this.errorHandler) {
|
||||
this.errorHandler(error);
|
||||
}
|
||||
|
@ -175,7 +174,7 @@ export class HTTPPrometheusClient implements PrometheusClient {
|
|||
}
|
||||
|
||||
metricMetadata(): Promise<Record<string, MetricMetadata[]>> {
|
||||
return this.fetchAPI<Record<string, MetricMetadata[]>>(metricMetadataEndpoint).catch((error) => {
|
||||
return this.fetchAPI<Record<string, MetricMetadata[]>>(this.metricMetadataEndpoint()).catch((error) => {
|
||||
if (this.errorHandler) {
|
||||
this.errorHandler(error);
|
||||
}
|
||||
|
@ -187,7 +186,7 @@ export class HTTPPrometheusClient implements PrometheusClient {
|
|||
const end = new Date();
|
||||
const start = new Date(end.getTime() - this.lookbackInterval);
|
||||
const request = this.buildRequest(
|
||||
seriesEndpoint,
|
||||
this.seriesEndpoint(),
|
||||
new URLSearchParams({
|
||||
start: start.toISOString(),
|
||||
end: end.toISOString(),
|
||||
|
@ -239,6 +238,19 @@ export class HTTPPrometheusClient implements PrometheusClient {
|
|||
}
|
||||
return { uri, body };
|
||||
}
|
||||
|
||||
private labelsEndpoint(): string {
|
||||
return `${this.apiPrefix}/labels`;
|
||||
}
|
||||
private labelValuesEndpoint(): string {
|
||||
return `${this.apiPrefix}/label/:name/values`;
|
||||
}
|
||||
private seriesEndpoint(): string {
|
||||
return `${this.apiPrefix}/series`;
|
||||
}
|
||||
private metricMetadataEndpoint(): string {
|
||||
return `${this.apiPrefix}/metadata`;
|
||||
}
|
||||
}
|
||||
|
||||
class Cache {
|
||||
|
|
|
@ -452,6 +452,12 @@ describe('analyzeCompletion test', () => {
|
|||
pos: 16,
|
||||
expectedContext: [{ kind: ContextKind.BinOp }, { kind: ContextKind.Offset }],
|
||||
},
|
||||
{
|
||||
title: 'autocomplete offset or binop 5',
|
||||
expr: 'sum(http_requests_total{method="GET"} off)',
|
||||
pos: 41,
|
||||
expectedContext: [{ kind: ContextKind.BinOp }, { kind: ContextKind.Offset }],
|
||||
},
|
||||
{
|
||||
title: 'not autocompleting duration for a matrixSelector',
|
||||
expr: 'go[]',
|
||||
|
@ -1051,6 +1057,17 @@ describe('autocomplete promQL test', () => {
|
|||
span: /^[a-zA-Z0-9_:]+$/,
|
||||
},
|
||||
},
|
||||
{
|
||||
title: 'autocomplete offset or binop 5',
|
||||
expr: 'sum(http_requests_total{method="GET"} off)',
|
||||
pos: 41,
|
||||
expectedResult: {
|
||||
options: ([] as Completion[]).concat(binOpTerms, [{ label: 'offset' }]),
|
||||
from: 38,
|
||||
to: 41,
|
||||
span: /^[a-zA-Z0-9_:]+$/,
|
||||
},
|
||||
},
|
||||
{
|
||||
title: 'offline not autocompleting duration for a matrixSelector',
|
||||
expr: 'go[]',
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
// limitations under the License.
|
||||
|
||||
import { CompleteStrategy } from './index';
|
||||
import { SyntaxNode } from 'lezer-tree';
|
||||
import { SyntaxNode } from '@lezer/common';
|
||||
import { PrometheusClient } from '../client';
|
||||
import {
|
||||
Add,
|
||||
|
@ -230,22 +230,22 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
|
|||
case Identifier:
|
||||
// sometimes an Identifier has an error has parent. This should be treated in priority
|
||||
if (node.parent?.type.id === 0) {
|
||||
const parent = node.parent;
|
||||
if (parent.parent?.type.id === StepInvariantExpr) {
|
||||
const errorNodeParent = node.parent.parent;
|
||||
if (errorNodeParent?.type.id === StepInvariantExpr) {
|
||||
// we are likely in the given situation:
|
||||
// `expr @ s`
|
||||
// we can autocomplete start / end
|
||||
result.push({ kind: ContextKind.AtModifiers });
|
||||
break;
|
||||
}
|
||||
if (parent.parent?.type.id === AggregateExpr) {
|
||||
if (errorNodeParent?.type.id === AggregateExpr) {
|
||||
// it matches 'sum() b'. So here we can autocomplete:
|
||||
// - the aggregate operation modifier
|
||||
// - the binary operation (since it's not mandatory to have an aggregate operation modifier)
|
||||
result.push({ kind: ContextKind.AggregateOpModifier }, { kind: ContextKind.BinOp });
|
||||
break;
|
||||
}
|
||||
if (parent.parent?.type.id === VectorSelector) {
|
||||
if (errorNodeParent?.type.id === VectorSelector) {
|
||||
// it matches 'sum b'. So here we also have to autocomplete the aggregate operation modifier only
|
||||
// if the associated metricIdentifier is matching an aggregation operation.
|
||||
// Note: here is the corresponding tree in order to understand the situation:
|
||||
|
@ -267,16 +267,29 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
|
|||
result.push({ kind: ContextKind.BinOp }, { kind: ContextKind.Offset });
|
||||
break;
|
||||
}
|
||||
|
||||
if (errorNodeParent && containsChild(errorNodeParent, Expr)) {
|
||||
// this last case can appear with the following expression:
|
||||
// 1. http_requests_total{method="GET"} off
|
||||
// 2. rate(foo[5m]) un
|
||||
// 3. sum(http_requests_total{method="GET"} off)
|
||||
// For these different cases we have this kind of tree:
|
||||
// Parent (
|
||||
// Expr(),
|
||||
// ⚠(Identifier)
|
||||
// )
|
||||
// We don't really care about the parent, here we are more interested if in the siblings of the error node, there is the node 'Expr'
|
||||
// If it is the case, then likely we should autocomplete the BinOp or the offset.
|
||||
result.push({ kind: ContextKind.BinOp }, { kind: ContextKind.Offset });
|
||||
break;
|
||||
}
|
||||
}
|
||||
// As the leaf Identifier is coming for a lot of different case, we have to take a bit time to analyze the tree
|
||||
// As the leaf Identifier is coming for different cases, we have to take a bit time to analyze the tree
|
||||
// in order to know what we have to autocomplete exactly.
|
||||
// Here is some cases:
|
||||
// 1. metric_name / ignor --> we should autocomplete the BinOpModifier + metric/function/aggregation
|
||||
// 2. http_requests_total{method="GET"} off --> offset or binOp should be autocompleted here
|
||||
// 3. rate(foo[5m]) un --> offset or binOp should be autocompleted
|
||||
// 4. sum(http_requests_total{method="GET"} off) --> offset or binOp should be autocompleted
|
||||
// 5. sum(http_requests_total{method="GET"} / o) --> BinOpModifier + metric/function/aggregation
|
||||
// All examples above give a different tree each time but ends up to be treated in this case.
|
||||
// 2. sum(http_requests_total{method="GET"} / o) --> BinOpModifier + metric/function/aggregation
|
||||
// Examples above give a different tree each time and ends up to be treated in this case.
|
||||
// But they all have the following common tree pattern:
|
||||
// Parent( Expr(...),
|
||||
// ... ,
|
||||
|
@ -314,8 +327,6 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
|
|||
if (containsAtLeastOneChild(parent, Eql, Gte, Gtr, Lte, Lss, Neq) && !walkThrough(parent, BinModifiers, Bool)) {
|
||||
result.push({ kind: ContextKind.Bool });
|
||||
}
|
||||
} else if (parent.type.id !== BinaryExpr || (parent.type.id === BinaryExpr && containsAtLeastOneChild(parent, 0))) {
|
||||
result.push({ kind: ContextKind.BinOp }, { kind: ContextKind.Offset });
|
||||
}
|
||||
} else {
|
||||
result.push(
|
||||
|
|
|
@ -28,6 +28,7 @@ export const binOpTerms = [
|
|||
{ label: '<' },
|
||||
{ label: '<=' },
|
||||
{ label: '!=' },
|
||||
{ label: 'atan2' },
|
||||
{ label: 'and' },
|
||||
{ label: 'or' },
|
||||
{ label: 'unless' },
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
@precedence {
|
||||
pow @right,
|
||||
mul @left,
|
||||
mul @left
|
||||
add @left,
|
||||
eql @left,
|
||||
and @left,
|
||||
|
@ -69,6 +69,7 @@ BinaryExpr {
|
|||
Expr !mul Mul BinModifiers Expr |
|
||||
Expr !mul Div BinModifiers Expr |
|
||||
Expr !mul Mod BinModifiers Expr |
|
||||
Expr !mul Atan2 BinModifiers Expr |
|
||||
Expr !add Add BinModifiers Expr |
|
||||
Expr !add Sub BinModifiers Expr |
|
||||
Expr !eql Eql BinModifiers Expr |
|
||||
|
@ -333,6 +334,7 @@ NumberLiteral {
|
|||
// Contextual keywords
|
||||
|
||||
@external extend {Identifier} extendIdentifier from "./tokens" {
|
||||
Atan2,
|
||||
Avg,
|
||||
Bottomk,
|
||||
Count,
|
||||
|
|
|
@ -840,3 +840,10 @@ sum:my_metric_name:rate5m
|
|||
|
||||
==>
|
||||
MetricName(MetricIdentifier(Identifier))
|
||||
|
||||
# Testing Atan2 inherited precedence level
|
||||
|
||||
1 + foo atan2 bar
|
||||
|
||||
==>
|
||||
PromQL(Expr(BinaryExpr(Expr(NumberLiteral),Add,BinModifiers,Expr(BinaryExpr(Expr(VectorSelector(MetricIdentifier(Identifier))),Atan2,BinModifiers,Expr(VectorSelector(MetricIdentifier(Identifier))))))))
|
|
@ -1,5 +1,5 @@
|
|||
import { parser } from '../parser';
|
||||
import { fileTests } from 'lezer-generator/dist/test';
|
||||
import { fileTests } from '@lezer/generator/dist/test';
|
||||
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
import {
|
||||
And,
|
||||
Avg,
|
||||
Atan2,
|
||||
Bool,
|
||||
Bottomk,
|
||||
By,
|
||||
|
@ -58,6 +59,7 @@ export const specializeIdentifier = (value, stack) => {
|
|||
|
||||
const contextualKeywordTokens = {
|
||||
avg: Avg,
|
||||
atan2: Atan2,
|
||||
bottomk: Bottomk,
|
||||
count: Count,
|
||||
count_values: CountValues,
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { SyntaxNode } from 'lezer-tree';
|
||||
import { SyntaxNode } from '@lezer/common';
|
||||
import { EqlRegex, EqlSingle, LabelName, MatchOp, Neq, NeqRegex, StringLiteral } from '../grammar/parser.terms';
|
||||
import { EditorState } from '@codemirror/state';
|
||||
import { Matcher } from '../types';
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
// limitations under the License.
|
||||
|
||||
import { Diagnostic } from '@codemirror/lint';
|
||||
import { SyntaxNode, Tree } from 'lezer-tree';
|
||||
import { SyntaxNode, Tree } from '@lezer/common';
|
||||
import {
|
||||
AggregateExpr,
|
||||
And,
|
||||
|
|
|
@ -35,7 +35,7 @@ import {
|
|||
} from '../grammar/parser.terms';
|
||||
import { createEditorState } from '../test/utils.test';
|
||||
import { containsAtLeastOneChild, containsChild, retrieveAllRecursiveNodes, walkBackward, walkThrough } from './path-finder';
|
||||
import { SyntaxNode } from 'lezer-tree';
|
||||
import { SyntaxNode } from '@lezer/common';
|
||||
import { syntaxTree } from '@codemirror/language';
|
||||
|
||||
describe('walkThrough test', () => {
|
||||
|
@ -147,14 +147,6 @@ describe('containsChild test', () => {
|
|||
walkThrough: [Expr, BinaryExpr],
|
||||
child: [Expr, Expr],
|
||||
},
|
||||
{
|
||||
title: 'Should find all expr in a subtree 2',
|
||||
expr: 'http_requests_total{method="GET"} off',
|
||||
pos: 0,
|
||||
expectedResult: true,
|
||||
walkThrough: [Expr, BinaryExpr],
|
||||
child: [Expr, Expr],
|
||||
},
|
||||
{
|
||||
title: 'Should not find all child required',
|
||||
expr: 'sum(ra)',
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { SyntaxNode } from 'lezer-tree';
|
||||
import { SyntaxNode } from '@lezer/common';
|
||||
|
||||
// walkBackward will iterate other the tree from the leaf to the root until it founds the given `exit` node.
|
||||
// It returns null if the exit is not found.
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import { SyntaxNode } from 'lezer-tree';
|
||||
import { SyntaxNode } from '@lezer/common';
|
||||
import {
|
||||
AggregateExpr,
|
||||
BinaryExpr,
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
// limitations under the License.
|
||||
|
||||
import { EditorState } from '@codemirror/state';
|
||||
import { SyntaxNode } from 'lezer-tree';
|
||||
import { SyntaxNode } from '@lezer/common';
|
||||
import {
|
||||
And,
|
||||
BinaryExpr,
|
||||
|
|
|
@ -17,15 +17,15 @@ import { Extension } from '@codemirror/state';
|
|||
import { CompleteConfiguration, CompleteStrategy, newCompleteStrategy } from './complete';
|
||||
import { LintStrategy, newLintStrategy, promQLLinter } from './lint';
|
||||
import { CompletionContext } from '@codemirror/autocomplete';
|
||||
import { LezerLanguage } from '@codemirror/language';
|
||||
import { LRLanguage } from '@codemirror/language';
|
||||
|
||||
export enum LanguageType {
|
||||
PromQL = 'PromQL',
|
||||
MetricName = 'MetricName',
|
||||
}
|
||||
|
||||
export function promQLLanguage(top: LanguageType): LezerLanguage {
|
||||
return LezerLanguage.define({
|
||||
export function promQLLanguage(top: LanguageType): LRLanguage {
|
||||
return LRLanguage.define({
|
||||
parser: parser.configure({
|
||||
top: top,
|
||||
props: [
|
||||
|
@ -40,7 +40,7 @@ export function promQLLanguage(top: LanguageType): LezerLanguage {
|
|||
'Avg Bottomk Count Count_values Group Max Min Quantile Stddev Stdvar Sum Topk': tags.operatorKeyword,
|
||||
'By Without Bool On Ignoring GroupLeft GroupRight Offset Start End': tags.modifier,
|
||||
'And Unless Or': tags.logicOperator,
|
||||
'Sub Add Mul Mod Div Eql Neq Lte Lss Gte Gtr EqlRegex EqlSingle NeqRegex Pow At': tags.operator,
|
||||
'Sub Add Mul Mod Div Atan2 Eql Neq Lte Lss Gte Gtr EqlRegex EqlSingle NeqRegex Pow At': tags.operator,
|
||||
UnaryOp: tags.arithmeticOperator,
|
||||
'( )': tags.paren,
|
||||
'[ ]': tags.squareBracket,
|
||||
|
|
|
@ -13,13 +13,13 @@
|
|||
|
||||
import { parser } from '../grammar/parser';
|
||||
import { EditorState } from '@codemirror/state';
|
||||
import { LezerLanguage } from '@codemirror/language';
|
||||
import { LRLanguage } from '@codemirror/language';
|
||||
import nock from 'nock';
|
||||
|
||||
// used to inject an implementation of fetch in NodeJS
|
||||
require('isomorphic-fetch');
|
||||
|
||||
const lightPromQLSyntax = LezerLanguage.define({ parser: parser });
|
||||
const lightPromQLSyntax = LRLanguage.define({ parser: parser });
|
||||
|
||||
export function createEditorState(expr: string): EditorState {
|
||||
return EditorState.create({
|
||||
|
|
759
web/ui/package-lock.json
generated
759
web/ui/package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
@ -3,18 +3,18 @@
|
|||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"@codemirror/autocomplete": "^0.18.3",
|
||||
"@codemirror/closebrackets": "^0.18.0",
|
||||
"@codemirror/commands": "^0.18.0",
|
||||
"@codemirror/comment": "^0.18.0",
|
||||
"@codemirror/highlight": "^0.18.3",
|
||||
"@codemirror/history": "^0.18.0",
|
||||
"@codemirror/language": "^0.18.0",
|
||||
"@codemirror/lint": "^0.18.1",
|
||||
"@codemirror/matchbrackets": "^0.18.0",
|
||||
"@codemirror/search": "^0.18.2",
|
||||
"@codemirror/state": "^0.18.2",
|
||||
"@codemirror/view": "^0.18.3",
|
||||
"@codemirror/autocomplete": "^0.19.3",
|
||||
"@codemirror/closebrackets": "^0.19.0",
|
||||
"@codemirror/commands": "^0.19.4",
|
||||
"@codemirror/comment": "^0.19.0",
|
||||
"@codemirror/highlight": "^0.19.5",
|
||||
"@codemirror/history": "^0.19.0",
|
||||
"@codemirror/language": "^0.19.3",
|
||||
"@codemirror/lint": "^0.19.1",
|
||||
"@codemirror/matchbrackets": "^0.19.1",
|
||||
"@codemirror/search": "^0.19.2",
|
||||
"@codemirror/state": "^0.19.2",
|
||||
"@codemirror/view": "^0.19.7",
|
||||
"@forevolve/bootstrap-dark": "^1.0.0",
|
||||
"@fortawesome/fontawesome-svg-core": "^1.2.14",
|
||||
"@fortawesome/free-solid-svg-icons": "^5.7.1",
|
||||
|
|
|
@ -151,9 +151,10 @@ describe('Panel', () => {
|
|||
//change query without executing
|
||||
panel.setProps({ options: { ...defaultProps.options, expr: newExpr } });
|
||||
expect(executeQuerySpy).toHaveBeenCalledTimes(0);
|
||||
const debounceExecuteQuerySpy = jest.spyOn(instance, 'debounceExecuteQuery');
|
||||
//execute query implicitly with time change
|
||||
panel.setProps({ options: { ...defaultProps.options, expr: newExpr, endTime: 1575744840 } });
|
||||
expect(executeQuerySpy).toHaveBeenCalledTimes(1);
|
||||
expect(debounceExecuteQuerySpy).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -13,6 +13,7 @@ import TimeInput from './TimeInput';
|
|||
import QueryStatsView, { QueryStats } from './QueryStatsView';
|
||||
import { QueryParams, ExemplarData } from '../../types/types';
|
||||
import { API_PATH } from '../../constants/constants';
|
||||
import { debounce } from '../../utils';
|
||||
|
||||
interface PanelProps {
|
||||
options: PanelOptions;
|
||||
|
@ -69,6 +70,7 @@ export const PanelDefaultOptions: PanelOptions = {
|
|||
|
||||
class Panel extends Component<PanelProps, PanelState> {
|
||||
private abortInFlightFetch: (() => void) | null = null;
|
||||
private debounceExecuteQuery: () => void;
|
||||
|
||||
constructor(props: PanelProps) {
|
||||
super(props);
|
||||
|
@ -83,17 +85,19 @@ class Panel extends Component<PanelProps, PanelState> {
|
|||
stats: null,
|
||||
exprInputValue: props.options.expr,
|
||||
};
|
||||
|
||||
this.debounceExecuteQuery = debounce(this.executeQuery.bind(this), 250);
|
||||
}
|
||||
|
||||
componentDidUpdate({ options: prevOpts }: PanelProps): void {
|
||||
const { endTime, range, resolution, showExemplars, type } = this.props.options;
|
||||
if (
|
||||
prevOpts.endTime !== endTime ||
|
||||
prevOpts.range !== range ||
|
||||
prevOpts.resolution !== resolution ||
|
||||
prevOpts.type !== type ||
|
||||
showExemplars !== prevOpts.showExemplars
|
||||
) {
|
||||
|
||||
if (prevOpts.endTime !== endTime || prevOpts.range !== range) {
|
||||
this.debounceExecuteQuery();
|
||||
return;
|
||||
}
|
||||
|
||||
if (prevOpts.resolution !== resolution || prevOpts.type !== type || showExemplars !== prevOpts.showExemplars) {
|
||||
this.executeQuery();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -269,3 +269,16 @@ export const parsePrometheusFloat = (value: string): string | number => {
|
|||
return Number(value);
|
||||
}
|
||||
};
|
||||
|
||||
export function debounce<Params extends unknown[]>(
|
||||
func: (...args: Params) => unknown,
|
||||
timeout: number
|
||||
): (...args: Params) => void {
|
||||
let timer: NodeJS.Timeout;
|
||||
return (...args: Params) => {
|
||||
clearTimeout(timer);
|
||||
timer = setTimeout(() => {
|
||||
func(...args);
|
||||
}, timeout);
|
||||
};
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue