mirror of
https://github.com/prometheus/prometheus.git
synced 2025-01-30 07:03:06 -08:00
Merge pull request #15480 from tjhop/merge-release-3.0-to-main
Some checks are pending
buf.build / lint and publish (push) Waiting to run
CI / Go tests (push) Waiting to run
CI / More Go tests (push) Waiting to run
CI / Go tests with previous Go version (push) Waiting to run
CI / UI tests (push) Waiting to run
CI / Go tests on Windows (push) Waiting to run
CI / Mixins tests (push) Waiting to run
CI / Build Prometheus for common architectures (0) (push) Waiting to run
CI / Build Prometheus for common architectures (1) (push) Waiting to run
CI / Build Prometheus for common architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (0) (push) Waiting to run
CI / Build Prometheus for all architectures (1) (push) Waiting to run
CI / Build Prometheus for all architectures (10) (push) Waiting to run
CI / Build Prometheus for all architectures (11) (push) Waiting to run
CI / Build Prometheus for all architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (3) (push) Waiting to run
CI / Build Prometheus for all architectures (4) (push) Waiting to run
CI / Build Prometheus for all architectures (5) (push) Waiting to run
CI / Build Prometheus for all architectures (6) (push) Waiting to run
CI / Build Prometheus for all architectures (7) (push) Waiting to run
CI / Build Prometheus for all architectures (8) (push) Waiting to run
CI / Build Prometheus for all architectures (9) (push) Waiting to run
CI / Report status of build Prometheus for all architectures (push) Blocked by required conditions
CI / Check generated parser (push) Waiting to run
CI / golangci-lint (push) Waiting to run
CI / fuzzing (push) Waiting to run
CI / codeql (push) Waiting to run
CI / Publish main branch artifacts (push) Blocked by required conditions
CI / Publish release artefacts (push) Blocked by required conditions
CI / Publish UI on npm Registry (push) Blocked by required conditions
Scorecards supply-chain security / Scorecards analysis (push) Waiting to run
Some checks are pending
buf.build / lint and publish (push) Waiting to run
CI / Go tests (push) Waiting to run
CI / More Go tests (push) Waiting to run
CI / Go tests with previous Go version (push) Waiting to run
CI / UI tests (push) Waiting to run
CI / Go tests on Windows (push) Waiting to run
CI / Mixins tests (push) Waiting to run
CI / Build Prometheus for common architectures (0) (push) Waiting to run
CI / Build Prometheus for common architectures (1) (push) Waiting to run
CI / Build Prometheus for common architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (0) (push) Waiting to run
CI / Build Prometheus for all architectures (1) (push) Waiting to run
CI / Build Prometheus for all architectures (10) (push) Waiting to run
CI / Build Prometheus for all architectures (11) (push) Waiting to run
CI / Build Prometheus for all architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (3) (push) Waiting to run
CI / Build Prometheus for all architectures (4) (push) Waiting to run
CI / Build Prometheus for all architectures (5) (push) Waiting to run
CI / Build Prometheus for all architectures (6) (push) Waiting to run
CI / Build Prometheus for all architectures (7) (push) Waiting to run
CI / Build Prometheus for all architectures (8) (push) Waiting to run
CI / Build Prometheus for all architectures (9) (push) Waiting to run
CI / Report status of build Prometheus for all architectures (push) Blocked by required conditions
CI / Check generated parser (push) Waiting to run
CI / golangci-lint (push) Waiting to run
CI / fuzzing (push) Waiting to run
CI / codeql (push) Waiting to run
CI / Publish main branch artifacts (push) Blocked by required conditions
CI / Publish release artefacts (push) Blocked by required conditions
CI / Publish UI on npm Registry (push) Blocked by required conditions
Scorecards supply-chain security / Scorecards analysis (push) Waiting to run
Merge release 3.0 to main
This commit is contained in:
commit
c73ca9cb34
|
@ -433,18 +433,40 @@ URL query parameters:
|
|||
series from which to read the label values. Optional.
|
||||
- `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
|
||||
|
||||
|
||||
The `data` section of the JSON response is a list of string label values.
|
||||
|
||||
This example queries for all label values for the `job` label:
|
||||
This example queries for all label values for the `http_status_code` label:
|
||||
|
||||
```json
|
||||
$ curl http://localhost:9090/api/v1/label/job/values
|
||||
$ curl http://localhost:9090/api/v1/label/http_status_code/values
|
||||
{
|
||||
"status" : "success",
|
||||
"data" : [
|
||||
"node",
|
||||
"prometheus"
|
||||
"200",
|
||||
"504"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Label names can optionally be encoded using the Values Escaping method, and is necessary if a name includes the `/` character. To encode a name in this way:
|
||||
|
||||
* Prepend the label with `U__`.
|
||||
* Letters, numbers, and colons appear as-is.
|
||||
* Convert single underscores to double underscores.
|
||||
* For all other characters, use the UTF-8 codepoint as a hex integer, surrounded
|
||||
by underscores. So ` ` becomes `_20_` and a `.` becomes `_2e_`.
|
||||
|
||||
More information about text escaping can be found in the original UTF-8 [Proposal document](https://github.com/prometheus/proposals/blob/main/proposals/2023-08-21-utf8.md#text-escaping).
|
||||
|
||||
This example queries for all label values for the `http.status_code` label:
|
||||
|
||||
```json
|
||||
$ curl http://localhost:9090/api/v1/label/U__http_2e_status_code/values
|
||||
{
|
||||
"status" : "success",
|
||||
"data" : [
|
||||
"200",
|
||||
"404"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
|
|
@ -126,10 +126,7 @@ type QueryEngine interface {
|
|||
// QueryLogger is an interface that can be used to log all the queries logged
|
||||
// by the engine.
|
||||
type QueryLogger interface {
|
||||
Error(msg string, args ...any)
|
||||
Info(msg string, args ...any)
|
||||
Debug(msg string, args ...any)
|
||||
Warn(msg string, args ...any)
|
||||
Log(context.Context, slog.Level, string, ...any)
|
||||
With(args ...any)
|
||||
Close() error
|
||||
}
|
||||
|
@ -637,20 +634,20 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota
|
|||
// The step provided by the user is in seconds.
|
||||
params["step"] = int64(eq.Interval / (time.Second / time.Nanosecond))
|
||||
}
|
||||
l.With("params", params)
|
||||
f := []interface{}{"params", params}
|
||||
if err != nil {
|
||||
l.With("error", err)
|
||||
f = append(f, "error", err)
|
||||
}
|
||||
l.With("stats", stats.NewQueryStats(q.Stats()))
|
||||
f = append(f, "stats", stats.NewQueryStats(q.Stats()))
|
||||
if span := trace.SpanFromContext(ctx); span != nil {
|
||||
l.With("spanID", span.SpanContext().SpanID())
|
||||
f = append(f, "spanID", span.SpanContext().SpanID())
|
||||
}
|
||||
if origin := ctx.Value(QueryOrigin{}); origin != nil {
|
||||
for k, v := range origin.(map[string]interface{}) {
|
||||
l.With(k, v)
|
||||
f = append(f, k, v)
|
||||
}
|
||||
}
|
||||
l.Info("promql query logged")
|
||||
l.Log(context.Background(), slog.LevelInfo, "promql query logged", f...)
|
||||
// TODO: @tjhop -- do we still need this metric/error log if logger doesn't return errors?
|
||||
// ng.metrics.queryLogFailures.Inc()
|
||||
// ng.logger.Error("can't log query", "err", err)
|
||||
|
@ -1524,7 +1521,7 @@ func (ev *evaluator) evalSubquery(ctx context.Context, subq *parser.SubqueryExpr
|
|||
// Avoid double counting samples when running a subquery, those samples will be counted in later stage.
|
||||
ev.samplesStats = ev.samplesStats.NewChild()
|
||||
val, ws := ev.eval(ctx, subq)
|
||||
// But do incorporate the peak from the subquery
|
||||
// But do incorporate the peak from the subquery.
|
||||
samplesStats.UpdatePeakFromSubquery(ev.samplesStats)
|
||||
ev.samplesStats = samplesStats
|
||||
mat := val.(Matrix)
|
||||
|
@ -1989,7 +1986,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
|
|||
// Start with the first timestamp after (ev.startTimestamp - offset - range)
|
||||
// that is aligned with the step (multiple of 'newEv.interval').
|
||||
newEv.startTimestamp = newEv.interval * ((ev.startTimestamp - offsetMillis - rangeMillis) / newEv.interval)
|
||||
if newEv.startTimestamp < (ev.startTimestamp - offsetMillis - rangeMillis) {
|
||||
if newEv.startTimestamp <= (ev.startTimestamp - offsetMillis - rangeMillis) {
|
||||
newEv.startTimestamp += newEv.interval
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
|
@ -1426,23 +1427,23 @@ load 10s
|
|||
},
|
||||
{
|
||||
// The peak samples in memory is during the first evaluation:
|
||||
// - Subquery takes 22 samples, 11 for each bigmetric, but samples on the left bound won't be evaluated.
|
||||
// - Subquery takes 20 samples, 10 for each bigmetric.
|
||||
// - Result is calculated per series where the series samples is buffered, hence 10 more here.
|
||||
// - The result of two series is added before the last series buffer is discarded, so 2 more here.
|
||||
// Hence at peak it is 22 (subquery) + 10 (buffer of a series) + 2 (result from 2 series).
|
||||
// Hence at peak it is 20 (subquery) + 10 (buffer of a series) + 2 (result from 2 series).
|
||||
// The subquery samples and the buffer is discarded before duplicating.
|
||||
Query: `rate(bigmetric[10s:1s] @ 10)`,
|
||||
MaxSamples: 34,
|
||||
MaxSamples: 32,
|
||||
Start: time.Unix(0, 0),
|
||||
End: time.Unix(10, 0),
|
||||
Interval: 5 * time.Second,
|
||||
},
|
||||
{
|
||||
// Here the reasoning is same as above. But LHS and RHS are done one after another.
|
||||
// So while one of them takes 34 samples at peak, we need to hold the 2 sample
|
||||
// So while one of them takes 32 samples at peak, we need to hold the 2 sample
|
||||
// result of the other till then.
|
||||
Query: `rate(bigmetric[10s:1s] @ 10) + rate(bigmetric[10s:1s] @ 30)`,
|
||||
MaxSamples: 36,
|
||||
MaxSamples: 34,
|
||||
Start: time.Unix(0, 0),
|
||||
End: time.Unix(10, 0),
|
||||
Interval: 5 * time.Second,
|
||||
|
@ -1450,28 +1451,28 @@ load 10s
|
|||
{
|
||||
// promql.Sample as above but with only 1 part as step invariant.
|
||||
// Here the peak is caused by the non-step invariant part as it touches more time range.
|
||||
// Hence at peak it is 2*21 (subquery from 0s to 20s)
|
||||
// Hence at peak it is 2*20 (subquery from 0s to 20s)
|
||||
// + 10 (buffer of a series per evaluation)
|
||||
// + 6 (result from 2 series at 3 eval times).
|
||||
Query: `rate(bigmetric[10s:1s]) + rate(bigmetric[10s:1s] @ 30)`,
|
||||
MaxSamples: 58,
|
||||
MaxSamples: 56,
|
||||
Start: time.Unix(10, 0),
|
||||
End: time.Unix(20, 0),
|
||||
Interval: 5 * time.Second,
|
||||
},
|
||||
{
|
||||
// Nested subquery.
|
||||
// We saw that innermost rate takes 34 samples which is still the peak
|
||||
// We saw that innermost rate takes 32 samples which is still the peak
|
||||
// since the other two subqueries just duplicate the result.
|
||||
Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[100s:20s] @ 2000`,
|
||||
MaxSamples: 34,
|
||||
Query: `rate(rate(bigmetric[10:1s] @ 10)[100s:25s] @ 1000)[100s:20s] @ 2000`,
|
||||
MaxSamples: 32,
|
||||
Start: time.Unix(10, 0),
|
||||
},
|
||||
{
|
||||
// Nested subquery.
|
||||
// Now the outermost subquery produces more samples than inner most rate.
|
||||
// Now the outermost subquery produces more samples than innermost rate.
|
||||
Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[17s:1s] @ 2000`,
|
||||
MaxSamples: 36,
|
||||
MaxSamples: 34,
|
||||
Start: time.Unix(10, 0),
|
||||
},
|
||||
}
|
||||
|
@ -1616,6 +1617,19 @@ load 1ms
|
|||
}, {
|
||||
query: "metric[100s:25s] @ 300",
|
||||
start: 100,
|
||||
result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 22, T: 225000}, {F: 25, T: 250000}, {F: 27, T: 275000}, {F: 30, T: 300000}},
|
||||
Metric: lbls1,
|
||||
},
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 44, T: 225000}, {F: 50, T: 250000}, {F: 54, T: 275000}, {F: 60, T: 300000}},
|
||||
Metric: lbls2,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
query: "metric[100s1ms:25s] @ 300", // Add 1ms to the range to see the legacy behavior of the previous test.
|
||||
start: 100,
|
||||
result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 20, T: 200000}, {F: 22, T: 225000}, {F: 25, T: 250000}, {F: 27, T: 275000}, {F: 30, T: 300000}},
|
||||
|
@ -1629,6 +1643,15 @@ load 1ms
|
|||
}, {
|
||||
query: "metric_neg[50s:25s] @ 0",
|
||||
start: 100,
|
||||
result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 26, T: -25000}, {F: 1, T: 0}},
|
||||
Metric: lblsneg,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
query: "metric_neg[50s1ms:25s] @ 0", // Add 1ms to the range to see the legacy behavior of the previous test.
|
||||
start: 100,
|
||||
result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 51, T: -50000}, {F: 26, T: -25000}, {F: 1, T: 0}},
|
||||
|
@ -1638,6 +1661,15 @@ load 1ms
|
|||
}, {
|
||||
query: "metric_neg[50s:25s] @ -100",
|
||||
start: 100,
|
||||
result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 126, T: -125000}, {F: 101, T: -100000}},
|
||||
Metric: lblsneg,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
query: "metric_neg[50s1ms:25s] @ -100", // Add 1ms to the range to see the legacy behavior of the previous test.
|
||||
start: 100,
|
||||
result: promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 151, T: -150000}, {F: 126, T: -125000}, {F: 101, T: -100000}},
|
||||
|
@ -1645,7 +1677,7 @@ load 1ms
|
|||
},
|
||||
},
|
||||
}, {
|
||||
query: `metric_ms[100ms:25ms] @ 2.345`,
|
||||
query: `metric_ms[101ms:25ms] @ 2.345`,
|
||||
start: 100,
|
||||
result: promql.Matrix{
|
||||
promql.Series{
|
||||
|
@ -1830,7 +1862,7 @@ func TestSubquerySelector(t *testing.T) {
|
|||
nil,
|
||||
promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}},
|
||||
Floats: []promql.FPoint{{F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}},
|
||||
Metric: labels.FromStrings("__name__", "metric"),
|
||||
},
|
||||
},
|
||||
|
@ -1877,6 +1909,20 @@ func TestSubquerySelector(t *testing.T) {
|
|||
cases: []caseType{
|
||||
{ // Normal selector.
|
||||
Query: `http_requests{group=~"pro.*",instance="0"}[30s:10s]`,
|
||||
Result: promql.Result{
|
||||
nil,
|
||||
promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 10000, T: 10000000}, {F: 100, T: 10010000}, {F: 130, T: 10020000}},
|
||||
Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"),
|
||||
},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
Start: time.Unix(10020, 0),
|
||||
},
|
||||
{ // Normal selector. Add 1ms to the range to see the legacy behavior of the previous test.
|
||||
Query: `http_requests{group=~"pro.*",instance="0"}[30s1ms:10s]`,
|
||||
Result: promql.Result{
|
||||
nil,
|
||||
promql.Matrix{
|
||||
|
@ -1919,6 +1965,36 @@ func TestSubquerySelector(t *testing.T) {
|
|||
},
|
||||
{
|
||||
Query: `rate(http_requests[1m])[15s:5s]`,
|
||||
Result: promql.Result{
|
||||
nil,
|
||||
promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}},
|
||||
Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"),
|
||||
DropName: true,
|
||||
},
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}},
|
||||
Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"),
|
||||
DropName: true,
|
||||
},
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}},
|
||||
Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"),
|
||||
DropName: true,
|
||||
},
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}},
|
||||
Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"),
|
||||
DropName: true,
|
||||
},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
Start: time.Unix(8000, 0),
|
||||
},
|
||||
{
|
||||
Query: `rate(http_requests[1m])[15s1ms:5s]`, // Add 1ms to the range to see the legacy behavior of the previous test.
|
||||
Result: promql.Result{
|
||||
nil,
|
||||
promql.Matrix{
|
||||
|
@ -1949,6 +2025,35 @@ func TestSubquerySelector(t *testing.T) {
|
|||
},
|
||||
{
|
||||
Query: `sum(http_requests{group=~"pro.*"})[30s:10s]`,
|
||||
Result: promql.Result{
|
||||
nil,
|
||||
promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}},
|
||||
Metric: labels.EmptyLabels(),
|
||||
},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
Start: time.Unix(120, 0),
|
||||
},
|
||||
{
|
||||
Query: `sum(http_requests{group=~"pro.*"})[30s:10s]`,
|
||||
Result: promql.Result{
|
||||
nil,
|
||||
promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}},
|
||||
Metric: labels.EmptyLabels(),
|
||||
},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
Start: time.Unix(121, 0), // 1s later doesn't change the result.
|
||||
},
|
||||
{
|
||||
// Add 1ms to the range to see the legacy behavior of the previous test.
|
||||
Query: `sum(http_requests{group=~"pro.*"})[30s1ms:10s]`,
|
||||
Result: promql.Result{
|
||||
nil,
|
||||
promql.Matrix{
|
||||
|
@ -1963,6 +2068,20 @@ func TestSubquerySelector(t *testing.T) {
|
|||
},
|
||||
{
|
||||
Query: `sum(http_requests)[40s:10s]`,
|
||||
Result: promql.Result{
|
||||
nil,
|
||||
promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 900, T: 90000}, {F: 1000, T: 100000}, {F: 1100, T: 110000}, {F: 1200, T: 120000}},
|
||||
Metric: labels.EmptyLabels(),
|
||||
},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
Start: time.Unix(120, 0),
|
||||
},
|
||||
{
|
||||
Query: `sum(http_requests)[40s1ms:10s]`, // Add 1ms to the range to see the legacy behavior of the previous test.
|
||||
Result: promql.Result{
|
||||
nil,
|
||||
promql.Matrix{
|
||||
|
@ -1977,6 +2096,21 @@ func TestSubquerySelector(t *testing.T) {
|
|||
},
|
||||
{
|
||||
Query: `(sum(http_requests{group=~"p.*"})+sum(http_requests{group=~"c.*"}))[20s:5s]`,
|
||||
Result: promql.Result{
|
||||
nil,
|
||||
promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 1000, T: 105000}, {F: 1100, T: 110000}, {F: 1100, T: 115000}, {F: 1200, T: 120000}},
|
||||
Metric: labels.EmptyLabels(),
|
||||
},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
Start: time.Unix(120, 0),
|
||||
},
|
||||
{
|
||||
// Add 1ms to the range to see the legacy behavior of the previous test.
|
||||
Query: `(sum(http_requests{group=~"p.*"})+sum(http_requests{group=~"c.*"}))[20s1ms:5s]`,
|
||||
Result: promql.Result{
|
||||
nil,
|
||||
promql.Matrix{
|
||||
|
@ -2034,31 +2168,10 @@ func (f *FakeQueryLogger) Close() error {
|
|||
}
|
||||
|
||||
// It implements the promql.QueryLogger interface.
|
||||
func (f *FakeQueryLogger) Info(msg string, args ...any) {
|
||||
log := append([]any{msg}, args...)
|
||||
log = append(log, f.attrs...)
|
||||
f.attrs = f.attrs[:0]
|
||||
f.logs = append(f.logs, log...)
|
||||
}
|
||||
|
||||
// It implements the promql.QueryLogger interface.
|
||||
func (f *FakeQueryLogger) Error(msg string, args ...any) {
|
||||
log := append([]any{msg}, args...)
|
||||
log = append(log, f.attrs...)
|
||||
f.attrs = f.attrs[:0]
|
||||
f.logs = append(f.logs, log...)
|
||||
}
|
||||
|
||||
// It implements the promql.QueryLogger interface.
|
||||
func (f *FakeQueryLogger) Warn(msg string, args ...any) {
|
||||
log := append([]any{msg}, args...)
|
||||
log = append(log, f.attrs...)
|
||||
f.attrs = f.attrs[:0]
|
||||
f.logs = append(f.logs, log...)
|
||||
}
|
||||
|
||||
// It implements the promql.QueryLogger interface.
|
||||
func (f *FakeQueryLogger) Debug(msg string, args ...any) {
|
||||
func (f *FakeQueryLogger) Log(ctx context.Context, level slog.Level, msg string, args ...any) {
|
||||
// Test usage only really cares about existence of keyvals passed in
|
||||
// via args, just append in the log message before handling the
|
||||
// provided args and any embedded kvs added via `.With()` on f.attrs.
|
||||
log := append([]any{msg}, args...)
|
||||
log = append(log, f.attrs...)
|
||||
f.attrs = f.attrs[:0]
|
||||
|
|
|
@ -667,10 +667,16 @@ label_set_list : label_set_list COMMA label_set_item
|
|||
|
||||
label_set_item : IDENTIFIER EQL STRING
|
||||
{ $$ = labels.Label{Name: $1.Val, Value: yylex.(*parser).unquoteString($3.Val) } }
|
||||
| string_identifier EQL STRING
|
||||
{ $$ = labels.Label{Name: $1.Val, Value: yylex.(*parser).unquoteString($3.Val) } }
|
||||
| IDENTIFIER EQL error
|
||||
{ yylex.(*parser).unexpected("label set", "string"); $$ = labels.Label{}}
|
||||
| string_identifier EQL error
|
||||
{ yylex.(*parser).unexpected("label set", "string"); $$ = labels.Label{}}
|
||||
| IDENTIFIER error
|
||||
{ yylex.(*parser).unexpected("label set", "\"=\""); $$ = labels.Label{}}
|
||||
| string_identifier error
|
||||
{ yylex.(*parser).unexpected("label set", "\"=\""); $$ = labels.Label{}}
|
||||
| error
|
||||
{ yylex.(*parser).unexpected("label set", "identifier or \"}\""); $$ = labels.Label{} }
|
||||
;
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1421,7 +1421,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
|
|||
sl.l.Debug("Scrape failed", "err", scrapeErr)
|
||||
sl.scrapeFailureLoggerMtx.RLock()
|
||||
if sl.scrapeFailureLogger != nil {
|
||||
sl.scrapeFailureLogger.Error(scrapeErr.Error())
|
||||
sl.scrapeFailureLogger.Log(context.Background(), slog.LevelError, scrapeErr.Error())
|
||||
}
|
||||
sl.scrapeFailureLoggerMtx.RUnlock()
|
||||
if errc != nil {
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
package logging
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
@ -57,26 +58,8 @@ func (l *JSONFileLogger) With(args ...any) {
|
|||
l.logger = l.logger.With(args...)
|
||||
}
|
||||
|
||||
// Info calls the `Info()` method on the underlying `log/slog.Logger` with the
|
||||
// Log calls the `Log()` method on the underlying `log/slog.Logger` with the
|
||||
// provided msg and args. It implements the promql.QueryLogger interface.
|
||||
func (l *JSONFileLogger) Info(msg string, args ...any) {
|
||||
l.logger.Info(msg, args...)
|
||||
}
|
||||
|
||||
// Error calls the `Error()` method on the underlying `log/slog.Logger` with the
|
||||
// provided msg and args. It implements the promql.QueryLogger interface.
|
||||
func (l *JSONFileLogger) Error(msg string, args ...any) {
|
||||
l.logger.Error(msg, args...)
|
||||
}
|
||||
|
||||
// Debug calls the `Debug()` method on the underlying `log/slog.Logger` with the
|
||||
// provided msg and args. It implements the promql.QueryLogger interface.
|
||||
func (l *JSONFileLogger) Debug(msg string, args ...any) {
|
||||
l.logger.Debug(msg, args...)
|
||||
}
|
||||
|
||||
// Warn calls the `Warn()` method on the underlying `log/slog.Logger` with the
|
||||
// provided msg and args. It implements the promql.QueryLogger interface.
|
||||
func (l *JSONFileLogger) Warn(msg string, args ...any) {
|
||||
l.logger.Warn(msg, args...)
|
||||
func (l *JSONFileLogger) Log(ctx context.Context, level slog.Level, msg string, args ...any) {
|
||||
l.logger.Log(ctx, level, msg, args...)
|
||||
}
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
package logging
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -34,7 +36,7 @@ func TestJSONFileLogger_basic(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.NotNil(t, l, "logger can't be nil")
|
||||
|
||||
l.Info("test", "hello", "world")
|
||||
l.Log(context.Background(), slog.LevelInfo, "test", "hello", "world")
|
||||
require.NoError(t, err)
|
||||
r := make([]byte, 1024)
|
||||
_, err = f.Read(r)
|
||||
|
@ -64,14 +66,14 @@ func TestJSONFileLogger_parallel(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.NotNil(t, l, "logger can't be nil")
|
||||
|
||||
l.Info("test", "hello", "world")
|
||||
l.Log(context.Background(), slog.LevelInfo, "test", "hello", "world")
|
||||
require.NoError(t, err)
|
||||
|
||||
l2, err := NewJSONFileLogger(f.Name())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, l, "logger can't be nil")
|
||||
|
||||
l2.Info("test", "hello", "world")
|
||||
l2.Log(context.Background(), slog.LevelInfo, "test", "hello", "world")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = l.Close()
|
||||
|
|
|
@ -744,7 +744,12 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
|
|||
ctx := r.Context()
|
||||
name := route.Param(ctx, "name")
|
||||
|
||||
if !model.LabelNameRE.MatchString(name) {
|
||||
if strings.HasPrefix(name, "U__") {
|
||||
name = model.UnescapeName(name, model.ValueEncodingEscaping)
|
||||
}
|
||||
|
||||
label := model.LabelName(name)
|
||||
if !label.IsValid() {
|
||||
return apiFuncResult{nil, &apiError{errorBadData, fmt.Errorf("invalid label name: %q", name)}, nil, nil}
|
||||
}
|
||||
|
||||
|
|
|
@ -387,6 +387,8 @@ func TestEndpoints(t *testing.T) {
|
|||
test_metric4{foo="bar", dup="1"} 1+0x100
|
||||
test_metric4{foo="boo", dup="1"} 1+0x100
|
||||
test_metric4{foo="boo"} 1+0x100
|
||||
test_metric5{"host.name"="localhost"} 1+0x100
|
||||
test_metric5{"junk\n{},=: chars"="bar"} 1+0x100
|
||||
`)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
|
||||
|
@ -1117,6 +1119,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
metadata []targetMetadata
|
||||
exemplars []exemplar.QueryResult
|
||||
zeroFunc func(interface{})
|
||||
nameValidationScheme model.ValidationScheme
|
||||
}
|
||||
|
||||
rulesZeroFunc := func(i interface{}) {
|
||||
|
@ -3004,6 +3007,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
"test_metric2",
|
||||
"test_metric3",
|
||||
"test_metric4",
|
||||
"test_metric5",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -3016,13 +3020,36 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
"boo",
|
||||
},
|
||||
},
|
||||
// Bad name parameter.
|
||||
// Bad name parameter for legacy validation.
|
||||
{
|
||||
endpoint: api.labelValues,
|
||||
params: map[string]string{
|
||||
"name": "not!!!allowed",
|
||||
"name": "host.name",
|
||||
},
|
||||
nameValidationScheme: model.LegacyValidation,
|
||||
errType: errorBadData,
|
||||
},
|
||||
// Valid utf8 name parameter for utf8 validation.
|
||||
{
|
||||
endpoint: api.labelValues,
|
||||
params: map[string]string{
|
||||
"name": "host.name",
|
||||
},
|
||||
nameValidationScheme: model.UTF8Validation,
|
||||
response: []string{
|
||||
"localhost",
|
||||
},
|
||||
},
|
||||
// Valid escaped utf8 name parameter for utf8 validation.
|
||||
{
|
||||
endpoint: api.labelValues,
|
||||
params: map[string]string{
|
||||
"name": "U__junk_0a__7b__7d__2c__3d_:_20__20_chars",
|
||||
},
|
||||
nameValidationScheme: model.UTF8Validation,
|
||||
response: []string{
|
||||
"bar",
|
||||
},
|
||||
errType: errorBadData,
|
||||
},
|
||||
// Start and end before LabelValues starts.
|
||||
{
|
||||
|
@ -3258,15 +3285,15 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
"name": "__name__",
|
||||
},
|
||||
query: url.Values{
|
||||
"limit": []string{"4"},
|
||||
"limit": []string{"5"},
|
||||
},
|
||||
responseLen: 4, // API does not specify which particular values will come back.
|
||||
responseLen: 5, // API does not specify which particular values will come back.
|
||||
warningsCount: 0, // No warnings if limit isn't exceeded.
|
||||
},
|
||||
// Label names.
|
||||
{
|
||||
endpoint: api.labelNames,
|
||||
response: []string{"__name__", "dup", "foo"},
|
||||
response: []string{"__name__", "dup", "foo", "host.name", "junk\n{},=: chars"},
|
||||
},
|
||||
// Start and end before Label names starts.
|
||||
{
|
||||
|
@ -3284,7 +3311,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
"start": []string{"1"},
|
||||
"end": []string{"100"},
|
||||
},
|
||||
response: []string{"__name__", "dup", "foo"},
|
||||
response: []string{"__name__", "dup", "foo", "host.name", "junk\n{},=: chars"},
|
||||
},
|
||||
// Start before Label names, end within Label names.
|
||||
{
|
||||
|
@ -3293,7 +3320,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
"start": []string{"-1"},
|
||||
"end": []string{"10"},
|
||||
},
|
||||
response: []string{"__name__", "dup", "foo"},
|
||||
response: []string{"__name__", "dup", "foo", "host.name", "junk\n{},=: chars"},
|
||||
},
|
||||
|
||||
// Start before Label names starts, end after Label names ends.
|
||||
|
@ -3303,7 +3330,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
"start": []string{"-1"},
|
||||
"end": []string{"100000"},
|
||||
},
|
||||
response: []string{"__name__", "dup", "foo"},
|
||||
response: []string{"__name__", "dup", "foo", "host.name", "junk\n{},=: chars"},
|
||||
},
|
||||
// Start with bad data for Label names, end within Label names.
|
||||
{
|
||||
|
@ -3321,7 +3348,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
"start": []string{"1"},
|
||||
"end": []string{"1000000006"},
|
||||
},
|
||||
response: []string{"__name__", "dup", "foo"},
|
||||
response: []string{"__name__", "dup", "foo", "host.name", "junk\n{},=: chars"},
|
||||
},
|
||||
// Start and end after Label names ends.
|
||||
{
|
||||
|
@ -3338,7 +3365,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
query: url.Values{
|
||||
"start": []string{"4"},
|
||||
},
|
||||
response: []string{"__name__", "dup", "foo"},
|
||||
response: []string{"__name__", "dup", "foo", "host.name", "junk\n{},=: chars"},
|
||||
},
|
||||
// Only provide End within Label names, don't provide a start time.
|
||||
{
|
||||
|
@ -3346,7 +3373,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
query: url.Values{
|
||||
"end": []string{"20"},
|
||||
},
|
||||
response: []string{"__name__", "dup", "foo"},
|
||||
response: []string{"__name__", "dup", "foo", "host.name", "junk\n{},=: chars"},
|
||||
},
|
||||
// Label names with bad matchers.
|
||||
{
|
||||
|
@ -3414,9 +3441,9 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
{
|
||||
endpoint: api.labelNames,
|
||||
query: url.Values{
|
||||
"limit": []string{"3"},
|
||||
"limit": []string{"5"},
|
||||
},
|
||||
responseLen: 3, // API does not specify which particular values will come back.
|
||||
responseLen: 5, // API does not specify which particular values will come back.
|
||||
warningsCount: 0, // No warnings if limit isn't exceeded.
|
||||
},
|
||||
}...)
|
||||
|
@ -3452,6 +3479,8 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
ctx = route.WithParam(ctx, p, v)
|
||||
}
|
||||
|
||||
model.NameValidationScheme = test.nameValidationScheme
|
||||
|
||||
req, err := request(method, test.query)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
Loading…
Reference in a new issue