For many-to-one matches, always copy label from one side.

This is a breaking change for everyone using the machine roles
labeling approach.
This commit is contained in:
Brian Brazil 2016-04-21 19:03:10 +01:00
parent 768d09fd2a
commit d991f0cf47
4 changed files with 32 additions and 15 deletions

View file

@ -942,6 +942,8 @@ func resultMetric(lhs, rhs metric.Metric, op itemType, matching *VectorMatching)
// Included labels from the `group_x` modifier are taken from the "one"-side .
if v, ok := rhs.Metric[ln]; ok {
m[ln] = v
} else {
delete(m, ln)
}
}
return metric.Metric{Metric: m, Copied: false}

View file

@ -479,8 +479,6 @@ func (p *parser) expr() Expr {
}
if p.peek().typ == itemLeftParen {
vecMatching.Include = p.labels()
} else if !vecMatching.Ignoring {
p.errorf("must specify labels in INCLUDE clause when using ON")
}
}
}

View file

@ -250,10 +250,6 @@ var testExpr = []struct {
input: "1 offset 1d",
fail: true,
errMsg: "offset modifier must be preceded by an instant or range selector",
}, {
input: "a - on(b) group_left d",
fail: true,
errMsg: "must specify labels in INCLUDE clause when using ON",
}, {
input: "a - on(b) ignoring(c) d",
fail: true,
@ -504,6 +500,27 @@ var testExpr = []struct {
On: model.LabelNames{"test", "blub"},
},
},
}, {
input: "foo * on(test,blub) group_left bar",
expected: &BinaryExpr{
Op: itemMUL,
LHS: &VectorSelector{
Name: "foo",
LabelMatchers: metric.LabelMatchers{
{Type: metric.Equal, Name: model.MetricNameLabel, Value: "foo"},
},
},
RHS: &VectorSelector{
Name: "bar",
LabelMatchers: metric.LabelMatchers{
{Type: metric.Equal, Name: model.MetricNameLabel, Value: "bar"},
},
},
VectorMatching: &VectorMatching{
Card: CardManyToOne,
On: model.LabelNames{"test", "blub"},
},
},
}, {
input: "foo and on(test,blub) bar",
expected: &BinaryExpr{
@ -737,10 +754,6 @@ var testExpr = []struct {
input: "foo unless on(bar) group_right(baz) bar",
fail: true,
errMsg: "no grouping allowed for \"unless\" operation",
}, {
input: `http_requests{group="production"} / on(instance) group_left cpu_count{type="smp"}`,
fail: true,
errMsg: "parse error at char 61: must specify labels in INCLUDE clause when using ON",
}, {
input: `http_requests{group="production"} + on(instance) group_left(job,instance) cpu_count{type="smp"}`,
fail: true,

View file

@ -227,10 +227,10 @@ load 5m
random{foo="bar"} 1
# Copy machine role to node variable.
eval instant at 5m node_role * on (instance,job) group_left (role) node_var
eval instant at 5m node_role * on (instance) group_right (role) node_var
{instance="abc",job="node",role="prometheus"} 2
eval instant at 5m node_var * on (instance,job) group_right (role) node_role
eval instant at 5m node_var * on (instance) group_left (role) node_role
{instance="abc",job="node",role="prometheus"} 2
eval instant at 5m node_var * ignoring (role) group_left (role) node_role
@ -244,19 +244,23 @@ eval instant at 5m node_cpu * ignoring (role, mode) group_left (role) node_role
{instance="abc",job="node",mode="idle",role="prometheus"} 3
{instance="abc",job="node",mode="user",role="prometheus"} 1
eval instant at 5m node_cpu * on (instance) group_left (role) node_role
{instance="abc",job="node",mode="idle",role="prometheus"} 3
{instance="abc",job="node",mode="user",role="prometheus"} 1
# Ratio of total.
eval instant at 5m node_cpu / on (instance,job) group_left (mode) sum by (instance,job)(node_cpu)
eval instant at 5m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu)
{instance="abc",job="node",mode="idle"} .75
{instance="abc",job="node",mode="user"} .25
{instance="def",job="node",mode="idle"} .80
{instance="def",job="node",mode="user"} .20
eval instant at 5m sum by (mode, job)(node_cpu) / on (job) group_left (mode) sum by (job)(node_cpu)
eval instant at 5m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)
{job="node",mode="idle"} 0.7857142857142857
{job="node",mode="user"} 0.21428571428571427
eval instant at 5m sum(sum by (mode, job)(node_cpu) / on (job) group_left (mode) sum by (job)(node_cpu))
eval instant at 5m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu))
{} 1.0