style: Replace else if cascades with switch

Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.

The exceptions that I have found in our codebase are just these two:

* The `if else` is followed by an additional statement before the next
  condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
  used. In this case, using `switch` would require tagging the `for`
  loop, which probably tips the balance.

Why are `switch` statements more readable?

For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.

I'm sure the aforemention wise coders can list even more reasons.

In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.

Signed-off-by: beorn7 <beorn@grafana.com>
This commit is contained in:
beorn7 2023-04-12 16:14:31 +02:00
parent c3c7d44d84
commit 5b53aa1108
44 changed files with 340 additions and 253 deletions

View file

@ -72,9 +72,11 @@ Loop:
if !startedOk { if !startedOk {
t.Fatal("prometheus didn't start in the specified timeout") t.Fatal("prometheus didn't start in the specified timeout")
} }
if err := prom.Process.Kill(); err == nil { switch err := prom.Process.Kill(); {
case err == nil:
t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal") t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal")
} else if stoppedErr != nil && stoppedErr.Error() != "signal: interrupt" { // TODO - find a better way to detect when the process didn't exit as expected! case stoppedErr != nil && stoppedErr.Error() != "signal: interrupt":
// TODO: find a better way to detect when the process didn't exit as expected!
t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr) t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr)
} }
} }

View file

@ -403,14 +403,15 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error)
return nil, nil, err return nil, nil, err
} }
var block tsdb.BlockReader var block tsdb.BlockReader
if blockID != "" { switch {
case blockID != "":
for _, b := range blocks { for _, b := range blocks {
if b.Meta().ULID.String() == blockID { if b.Meta().ULID.String() == blockID {
block = b block = b
break break
} }
} }
} else if len(blocks) > 0 { case len(blocks) > 0:
block = blocks[len(blocks)-1] block = blocks[len(blocks)-1]
} }
if block == nil { if block == nil {

View file

@ -285,21 +285,22 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms
for _, lname := range conf.NameList(name) { for _, lname := range conf.NameList(name) {
response, err := lookupFromAnyServer(lname, qtype, conf, logger) response, err := lookupFromAnyServer(lname, qtype, conf, logger)
if err != nil { switch {
case err != nil:
// We can't go home yet, because a later name // We can't go home yet, because a later name
// may give us a valid, successful answer. However // may give us a valid, successful answer. However
// we can no longer say "this name definitely doesn't // we can no longer say "this name definitely doesn't
// exist", because we did not get that answer for // exist", because we did not get that answer for
// at least one name. // at least one name.
allResponsesValid = false allResponsesValid = false
} else if response.Rcode == dns.RcodeSuccess { case response.Rcode == dns.RcodeSuccess:
// Outcome 1: GOLD! // Outcome 1: GOLD!
return response, nil return response, nil
} }
} }
if allResponsesValid { if allResponsesValid {
// Outcome 2: everyone says NXDOMAIN, that's good enough for me // Outcome 2: everyone says NXDOMAIN, that's good enough for me.
return &dns.Msg{}, nil return &dns.Msg{}, nil
} }
// Outcome 3: boned. // Outcome 3: boned.

View file

@ -299,12 +299,13 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
err error err error
ownNamespace string ownNamespace string
) )
if conf.KubeConfig != "" { switch {
case conf.KubeConfig != "":
kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig) kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
} else if conf.APIServer.URL == nil { case conf.APIServer.URL == nil:
// Use the Kubernetes provided pod service account // Use the Kubernetes provided pod service account
// as described in https://kubernetes.io/docs/admin/service-accounts-admin/ // as described in https://kubernetes.io/docs/admin/service-accounts-admin/
kcfg, err = rest.InClusterConfig() kcfg, err = rest.InClusterConfig()
@ -324,7 +325,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
} }
level.Info(l).Log("msg", "Using pod service account via in-cluster config") level.Info(l).Log("msg", "Using pod service account via in-cluster config")
} else { default:
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd") rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd")
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -250,19 +250,20 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
continue continue
} }
if detailedIP.Public && publicIPv4 == "" { switch {
case detailedIP.Public && publicIPv4 == "":
publicIPv4 = detailedIP.Address publicIPv4 = detailedIP.Address
if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { if detailedIP.RDNS != "" && detailedIP.RDNS != "null" {
publicIPv4RDNS = detailedIP.RDNS publicIPv4RDNS = detailedIP.RDNS
} }
} else if !detailedIP.Public && privateIPv4 == "" { case !detailedIP.Public && privateIPv4 == "":
privateIPv4 = detailedIP.Address privateIPv4 = detailedIP.Address
if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { if detailedIP.RDNS != "" && detailedIP.RDNS != "null" {
privateIPv4RDNS = detailedIP.RDNS privateIPv4RDNS = detailedIP.RDNS
} }
} else { default:
extraIPs = append(extraIPs, detailedIP.Address) extraIPs = append(extraIPs, detailedIP.Address)
} }
} }

View file

@ -136,9 +136,10 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) {
return nil, err return nil, err
} }
if len(conf.AuthToken) > 0 { switch {
case len(conf.AuthToken) > 0:
rt, err = newAuthTokenRoundTripper(conf.AuthToken, rt) rt, err = newAuthTokenRoundTripper(conf.AuthToken, rt)
} else if len(conf.AuthTokenFile) > 0 { case len(conf.AuthTokenFile) > 0:
rt, err = newAuthTokenFileRoundTripper(conf.AuthTokenFile, rt) rt, err = newAuthTokenFileRoundTripper(conf.AuthTokenFile, rt)
} }
if err != nil { if err != nil {
@ -400,19 +401,20 @@ func targetsForApp(app *app) []model.LabelSet {
var labels []map[string]string var labels []map[string]string
var prefix string var prefix string
if len(app.Container.PortMappings) != 0 { switch {
case len(app.Container.PortMappings) != 0:
// In Marathon 1.5.x the "container.docker.portMappings" object was moved // In Marathon 1.5.x the "container.docker.portMappings" object was moved
// to "container.portMappings". // to "container.portMappings".
ports, labels = extractPortMapping(app.Container.PortMappings, app.isContainerNet()) ports, labels = extractPortMapping(app.Container.PortMappings, app.isContainerNet())
prefix = portMappingLabelPrefix prefix = portMappingLabelPrefix
} else if len(app.Container.Docker.PortMappings) != 0 { case len(app.Container.Docker.PortMappings) != 0:
// Prior to Marathon 1.5 the port mappings could be found at the path // Prior to Marathon 1.5 the port mappings could be found at the path
// "container.docker.portMappings". // "container.docker.portMappings".
ports, labels = extractPortMapping(app.Container.Docker.PortMappings, app.isContainerNet()) ports, labels = extractPortMapping(app.Container.Docker.PortMappings, app.isContainerNet())
prefix = portMappingLabelPrefix prefix = portMappingLabelPrefix
} else if len(app.PortDefinitions) != 0 { case len(app.PortDefinitions) != 0:
// PortDefinitions deprecates the "ports" array and can be used to specify // PortDefinitions deprecates the "ports" array and can be used to specify
// a list of ports with metadata in case a mapping is not required. // a list of ports with metadata in case a mapping is not required.
ports = make([]uint32, len(app.PortDefinitions)) ports = make([]uint32, len(app.PortDefinitions))

View file

@ -290,13 +290,14 @@ func mergeSamples(a, b []prompb.Sample) []prompb.Sample {
result := make([]prompb.Sample, 0, len(a)+len(b)) result := make([]prompb.Sample, 0, len(a)+len(b))
i, j := 0, 0 i, j := 0, 0
for i < len(a) && j < len(b) { for i < len(a) && j < len(b) {
if a[i].Timestamp < b[j].Timestamp { switch {
case a[i].Timestamp < b[j].Timestamp:
result = append(result, a[i]) result = append(result, a[i])
i++ i++
} else if a[i].Timestamp > b[j].Timestamp { case a[i].Timestamp > b[j].Timestamp:
result = append(result, b[j]) result = append(result, b[j])
j++ j++
} else { default:
result = append(result, a[i]) result = append(result, a[i])
i++ i++
j++ j++

View file

@ -824,10 +824,11 @@ mergeLoop: // Merge together all buckets from the original schema that fall into
origIdx += span.Offset origIdx += span.Offset
} }
currIdx := i.targetIdx(origIdx) currIdx := i.targetIdx(origIdx)
if firstPass { switch {
case firstPass:
i.currIdx = currIdx i.currIdx = currIdx
firstPass = false firstPass = false
} else if currIdx != i.currIdx { case currIdx != i.currIdx:
// Reached next bucket in targetSchema. // Reached next bucket in targetSchema.
// Do not actually forward to the next bucket, but break out. // Do not actually forward to the next bucket, but break out.
break mergeLoop break mergeLoop

View file

@ -169,11 +169,12 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
b = b[:0] b = b[:0]
i, j := 0, 0 i, j := 0, 0
for i < len(ls) && j < len(names) { for i < len(ls) && j < len(names) {
if names[j] < ls[i].Name { switch {
case names[j] < ls[i].Name:
j++ j++
} else if ls[i].Name < names[j] { case ls[i].Name < names[j]:
i++ i++
} else { default:
b = append(b, ls[i].Name...) b = append(b, ls[i].Name...)
b = append(b, seps[0]) b = append(b, seps[0])
b = append(b, ls[i].Value...) b = append(b, ls[i].Value...)
@ -213,11 +214,12 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte {
b.WriteByte(labelSep) b.WriteByte(labelSep)
i, j := 0, 0 i, j := 0, 0
for i < len(ls) && j < len(names) { for i < len(ls) && j < len(names) {
if names[j] < ls[i].Name { switch {
case names[j] < ls[i].Name:
j++ j++
} else if ls[i].Name < names[j] { case ls[i].Name < names[j]:
i++ i++
} else { default:
if b.Len() > 1 { if b.Len() > 1 {
b.WriteByte(seps[0]) b.WriteByte(seps[0])
} }

View file

@ -400,7 +400,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) {
} }
// NewInstantQuery returns an evaluation query for the given expression at the given time. // NewInstantQuery returns an evaluation query for the given expression at the given time.
func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { func (ng *Engine) NewInstantQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) {
expr, err := parser.ParseExpr(qs) expr, err := parser.ParseExpr(qs)
if err != nil { if err != nil {
return nil, err return nil, err
@ -416,7 +416,7 @@ func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts
// NewRangeQuery returns an evaluation query for the given time range and with // NewRangeQuery returns an evaluation query for the given time range and with
// the resolution set by the interval. // the resolution set by the interval.
func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { func (ng *Engine) NewRangeQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) {
expr, err := parser.ParseExpr(qs) expr, err := parser.ParseExpr(qs)
if err != nil { if err != nil {
return nil, err return nil, err
@ -1979,7 +1979,7 @@ func (ev *evaluator) matrixIterSlice(
// (b) the number of samples is relatively small. // (b) the number of samples is relatively small.
// so a linear search will be as fast as a binary search. // so a linear search will be as fast as a binary search.
var drop int var drop int
for drop = 0; histograms[drop].T < mint; drop++ { for drop = 0; histograms[drop].T < mint; drop++ { // nolint:revive
} }
ev.currentSamples -= drop ev.currentSamples -= drop
copy(histograms, histograms[drop:]) copy(histograms, histograms[drop:])
@ -2096,13 +2096,13 @@ func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching,
} }
func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
if matching.Card != parser.CardManyToMany { switch {
case matching.Card != parser.CardManyToMany:
panic("set operations must only use many-to-many matching") panic("set operations must only use many-to-many matching")
} case len(lhs) == 0: // Short-circuit.
if len(lhs) == 0 { // Short-circuit.
enh.Out = append(enh.Out, rhs...) enh.Out = append(enh.Out, rhs...)
return enh.Out return enh.Out
} else if len(rhs) == 0 { case len(rhs) == 0:
enh.Out = append(enh.Out, lhs...) enh.Out = append(enh.Out, lhs...)
return enh.Out return enh.Out
} }
@ -2221,13 +2221,14 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
hl, hr = hr, hl hl, hr = hr, hl
} }
floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr) floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr)
if returnBool { switch {
case returnBool:
if keep { if keep {
floatValue = 1.0 floatValue = 1.0
} else { } else {
floatValue = 0.0 floatValue = 0.0
} }
} else if !keep { case !keep:
continue continue
} }
metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh) metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh)
@ -2514,14 +2515,15 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
if !ok { if !ok {
var m labels.Labels var m labels.Labels
enh.resetBuilder(metric) enh.resetBuilder(metric)
if without { switch {
case without:
enh.lb.Del(grouping...) enh.lb.Del(grouping...)
enh.lb.Del(labels.MetricName) enh.lb.Del(labels.MetricName)
m = enh.lb.Labels() m = enh.lb.Labels()
} else if len(grouping) > 0 { case len(grouping) > 0:
enh.lb.Keep(grouping...) enh.lb.Keep(grouping...)
m = enh.lb.Labels() m = enh.lb.Labels()
} else { default:
m = labels.EmptyLabels() m = labels.EmptyLabels()
} }
newAgg := &groupedAggregation{ newAgg := &groupedAggregation{
@ -2530,9 +2532,10 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
mean: s.F, mean: s.F,
groupCount: 1, groupCount: 1,
} }
if s.H == nil { switch {
case s.H == nil:
newAgg.hasFloat = true newAgg.hasFloat = true
} else if op == parser.SUM { case op == parser.SUM:
newAgg.histogramValue = s.H.Copy() newAgg.histogramValue = s.H.Copy()
newAgg.hasHistogram = true newAgg.hasHistogram = true
} }
@ -2542,9 +2545,10 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
inputVecLen := int64(len(vec)) inputVecLen := int64(len(vec))
resultSize := k resultSize := k
if k > inputVecLen { switch {
case k > inputVecLen:
resultSize = inputVecLen resultSize = inputVecLen
} else if k == 0 { case k == 0:
resultSize = 1 resultSize = 1
} }
switch op { switch op {
@ -2637,12 +2641,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
case parser.TOPK: case parser.TOPK:
// We build a heap of up to k elements, with the smallest element at heap[0]. // We build a heap of up to k elements, with the smallest element at heap[0].
if int64(len(group.heap)) < k { switch {
case int64(len(group.heap)) < k:
heap.Push(&group.heap, &Sample{ heap.Push(&group.heap, &Sample{
F: s.F, F: s.F,
Metric: s.Metric, Metric: s.Metric,
}) })
} else if group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)) { case group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)):
// This new element is bigger than the previous smallest element - overwrite that. // This new element is bigger than the previous smallest element - overwrite that.
group.heap[0] = Sample{ group.heap[0] = Sample{
F: s.F, F: s.F,
@ -2655,12 +2660,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
case parser.BOTTOMK: case parser.BOTTOMK:
// We build a heap of up to k elements, with the biggest element at heap[0]. // We build a heap of up to k elements, with the biggest element at heap[0].
if int64(len(group.reverseHeap)) < k { switch {
case int64(len(group.reverseHeap)) < k:
heap.Push(&group.reverseHeap, &Sample{ heap.Push(&group.reverseHeap, &Sample{
F: s.F, F: s.F,
Metric: s.Metric, Metric: s.Metric,
}) })
} else if group.reverseHeap[0].F > s.F || (math.IsNaN(group.reverseHeap[0].F) && !math.IsNaN(s.F)) { case group.reverseHeap[0].F > s.F || (math.IsNaN(group.reverseHeap[0].F) && !math.IsNaN(s.F)):
// This new element is smaller than the previous biggest element - overwrite that. // This new element is smaller than the previous biggest element - overwrite that.
group.reverseHeap[0] = Sample{ group.reverseHeap[0] = Sample{
F: s.F, F: s.F,
@ -2819,9 +2825,10 @@ func PreprocessExpr(expr parser.Expr, start, end time.Time) parser.Expr {
func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool { func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool {
switch n := expr.(type) { switch n := expr.(type) {
case *parser.VectorSelector: case *parser.VectorSelector:
if n.StartOrEnd == parser.START { switch n.StartOrEnd {
case parser.START:
n.Timestamp = makeInt64Pointer(timestamp.FromTime(start)) n.Timestamp = makeInt64Pointer(timestamp.FromTime(start))
} else if n.StartOrEnd == parser.END { case parser.END:
n.Timestamp = makeInt64Pointer(timestamp.FromTime(end)) n.Timestamp = makeInt64Pointer(timestamp.FromTime(end))
} }
return n.Timestamp != nil return n.Timestamp != nil
@ -2878,9 +2885,10 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool {
if isInvariant { if isInvariant {
n.Expr = newStepInvariantExpr(n.Expr) n.Expr = newStepInvariantExpr(n.Expr)
} }
if n.StartOrEnd == parser.START { switch n.StartOrEnd {
case parser.START:
n.Timestamp = makeInt64Pointer(timestamp.FromTime(start)) n.Timestamp = makeInt64Pointer(timestamp.FromTime(start))
} else if n.StartOrEnd == parser.END { case parser.END:
n.Timestamp = makeInt64Pointer(timestamp.FromTime(end)) n.Timestamp = makeInt64Pointer(timestamp.FromTime(end))
} }
return n.Timestamp != nil return n.Timestamp != nil

View file

@ -804,12 +804,14 @@ func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) V
// === sgn(Vector parser.ValueTypeVector) Vector === // === sgn(Vector parser.ValueTypeVector) Vector ===
func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
return simpleFunc(vals, enh, func(v float64) float64 { return simpleFunc(vals, enh, func(v float64) float64 {
if v < 0 { switch {
case v < 0:
return -1 return -1
} else if v > 0 { case v > 0:
return 1 return 1
} default:
return v return v
}
}) })
} }

View file

@ -368,13 +368,14 @@ func Children(node Node) []Node {
case *AggregateExpr: case *AggregateExpr:
// While this does not look nice, it should avoid unnecessary allocations // While this does not look nice, it should avoid unnecessary allocations
// caused by slice resizing // caused by slice resizing
if n.Expr == nil && n.Param == nil { switch {
case n.Expr == nil && n.Param == nil:
return nil return nil
} else if n.Expr == nil { case n.Expr == nil:
return []Node{n.Param} return []Node{n.Param}
} else if n.Param == nil { case n.Param == nil:
return []Node{n.Expr} return []Node{n.Expr}
} else { default:
return []Node{n.Expr, n.Param} return []Node{n.Expr, n.Param}
} }
case *BinaryExpr: case *BinaryExpr:

View file

@ -347,9 +347,10 @@ func lexStatements(l *Lexer) stateFn {
switch r := l.next(); { switch r := l.next(); {
case r == eof: case r == eof:
if l.parenDepth != 0 { switch {
case l.parenDepth != 0:
return l.errorf("unclosed left parenthesis") return l.errorf("unclosed left parenthesis")
} else if l.bracketOpen { case l.bracketOpen:
return l.errorf("unclosed left bracket") return l.errorf("unclosed left bracket")
} }
l.emit(EOF) l.emit(EOF)
@ -371,12 +372,13 @@ func lexStatements(l *Lexer) stateFn {
case r == '^': case r == '^':
l.emit(POW) l.emit(POW)
case r == '=': case r == '=':
if t := l.peek(); t == '=' { switch t := l.peek(); t {
case '=':
l.next() l.next()
l.emit(EQLC) l.emit(EQLC)
} else if t == '~' { case '~':
return l.errorf("unexpected character after '=': %q", t) return l.errorf("unexpected character after '=': %q", t)
} else { default:
l.emit(EQL) l.emit(EQL)
} }
case r == '!': case r == '!':
@ -791,11 +793,12 @@ Loop:
default: default:
l.backup() l.backup()
word := l.input[l.start:l.pos] word := l.input[l.start:l.pos]
if kw, ok := key[strings.ToLower(word)]; ok { switch kw, ok := key[strings.ToLower(word)]; {
case ok:
l.emit(kw) l.emit(kw)
} else if !strings.Contains(word, ":") { case !strings.Contains(word, ":"):
l.emit(IDENTIFIER) l.emit(IDENTIFIER)
} else { default:
l.emit(METRIC_IDENTIFIER) l.emit(METRIC_IDENTIFIER)
} }
break Loop break Loop

View file

@ -270,14 +270,15 @@ var errUnexpected = errors.New("unexpected error")
// recover is the handler that turns panics into returns from the top level of Parse. // recover is the handler that turns panics into returns from the top level of Parse.
func (p *parser) recover(errp *error) { func (p *parser) recover(errp *error) {
e := recover() e := recover()
if _, ok := e.(runtime.Error); ok { switch _, ok := e.(runtime.Error); {
case ok:
// Print the stack trace but do not inhibit the running application. // Print the stack trace but do not inhibit the running application.
buf := make([]byte, 64<<10) buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)] buf = buf[:runtime.Stack(buf, false)]
fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf) fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf)
*errp = errUnexpected *errp = errUnexpected
} else if e != nil { case e != nil:
*errp = e.(error) *errp = e.(error)
} }
} }
@ -518,14 +519,13 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
p.addParseErrf(n.RHS.PositionRange(), "binary expression must contain only scalar and instant vector types") p.addParseErrf(n.RHS.PositionRange(), "binary expression must contain only scalar and instant vector types")
} }
if (lt != ValueTypeVector || rt != ValueTypeVector) && n.VectorMatching != nil { switch {
case (lt != ValueTypeVector || rt != ValueTypeVector) && n.VectorMatching != nil:
if len(n.VectorMatching.MatchingLabels) > 0 { if len(n.VectorMatching.MatchingLabels) > 0 {
p.addParseErrf(n.PositionRange(), "vector matching only allowed between instant vectors") p.addParseErrf(n.PositionRange(), "vector matching only allowed between instant vectors")
} }
n.VectorMatching = nil n.VectorMatching = nil
} else { case n.Op.IsSetOperator(): // Both operands are Vectors.
// Both operands are Vectors.
if n.Op.IsSetOperator() {
if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne { if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne {
p.addParseErrf(n.PositionRange(), "no grouping allowed for %q operation", n.Op) p.addParseErrf(n.PositionRange(), "no grouping allowed for %q operation", n.Op)
} }
@ -533,7 +533,6 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
p.addParseErrf(n.PositionRange(), "set operations must always be many-to-many") p.addParseErrf(n.PositionRange(), "set operations must always be many-to-many")
} }
} }
}
if (lt == ValueTypeScalar || rt == ValueTypeScalar) && n.Op.IsSetOperator() { if (lt == ValueTypeScalar || rt == ValueTypeScalar) && n.Op.IsSetOperator() {
p.addParseErrf(n.PositionRange(), "set operator %q not allowed in binary scalar expression", n.Op) p.addParseErrf(n.PositionRange(), "set operator %q not allowed in binary scalar expression", n.Op)
@ -708,9 +707,10 @@ func (p *parser) addOffset(e Node, offset time.Duration) {
} }
// it is already ensured by parseDuration func that there never will be a zero offset modifier // it is already ensured by parseDuration func that there never will be a zero offset modifier
if *orgoffsetp != 0 { switch {
case *orgoffsetp != 0:
p.addParseErrf(e.PositionRange(), "offset may not be set multiple times") p.addParseErrf(e.PositionRange(), "offset may not be set multiple times")
} else if orgoffsetp != nil { case orgoffsetp != nil:
*orgoffsetp = offset *orgoffsetp = offset
} }

View file

@ -124,17 +124,19 @@ func (node *MatrixSelector) String() string {
// Copy the Vector selector before changing the offset // Copy the Vector selector before changing the offset
vecSelector := *node.VectorSelector.(*VectorSelector) vecSelector := *node.VectorSelector.(*VectorSelector)
offset := "" offset := ""
if vecSelector.OriginalOffset > time.Duration(0) { switch {
case vecSelector.OriginalOffset > time.Duration(0):
offset = fmt.Sprintf(" offset %s", model.Duration(vecSelector.OriginalOffset)) offset = fmt.Sprintf(" offset %s", model.Duration(vecSelector.OriginalOffset))
} else if vecSelector.OriginalOffset < time.Duration(0) { case vecSelector.OriginalOffset < time.Duration(0):
offset = fmt.Sprintf(" offset -%s", model.Duration(-vecSelector.OriginalOffset)) offset = fmt.Sprintf(" offset -%s", model.Duration(-vecSelector.OriginalOffset))
} }
at := "" at := ""
if vecSelector.Timestamp != nil { switch {
case vecSelector.Timestamp != nil:
at = fmt.Sprintf(" @ %.3f", float64(*vecSelector.Timestamp)/1000.0) at = fmt.Sprintf(" @ %.3f", float64(*vecSelector.Timestamp)/1000.0)
} else if vecSelector.StartOrEnd == START { case vecSelector.StartOrEnd == START:
at = " @ start()" at = " @ start()"
} else if vecSelector.StartOrEnd == END { case vecSelector.StartOrEnd == END:
at = " @ end()" at = " @ end()"
} }
@ -162,17 +164,19 @@ func (node *SubqueryExpr) getSubqueryTimeSuffix() string {
step = model.Duration(node.Step).String() step = model.Duration(node.Step).String()
} }
offset := "" offset := ""
if node.OriginalOffset > time.Duration(0) { switch {
case node.OriginalOffset > time.Duration(0):
offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset)) offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset))
} else if node.OriginalOffset < time.Duration(0) { case node.OriginalOffset < time.Duration(0):
offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset)) offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset))
} }
at := "" at := ""
if node.Timestamp != nil { switch {
case node.Timestamp != nil:
at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0) at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0)
} else if node.StartOrEnd == START { case node.StartOrEnd == START:
at = " @ start()" at = " @ start()"
} else if node.StartOrEnd == END { case node.StartOrEnd == END:
at = " @ end()" at = " @ end()"
} }
return fmt.Sprintf("[%s:%s]%s%s", model.Duration(node.Range), step, at, offset) return fmt.Sprintf("[%s:%s]%s%s", model.Duration(node.Range), step, at, offset)
@ -207,17 +211,19 @@ func (node *VectorSelector) String() string {
labelStrings = append(labelStrings, matcher.String()) labelStrings = append(labelStrings, matcher.String())
} }
offset := "" offset := ""
if node.OriginalOffset > time.Duration(0) { switch {
case node.OriginalOffset > time.Duration(0):
offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset)) offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset))
} else if node.OriginalOffset < time.Duration(0) { case node.OriginalOffset < time.Duration(0):
offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset)) offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset))
} }
at := "" at := ""
if node.Timestamp != nil { switch {
case node.Timestamp != nil:
at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0) at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0)
} else if node.StartOrEnd == START { case node.StartOrEnd == START:
at = " @ start()" at = " @ start()"
} else if node.StartOrEnd == END { case node.StartOrEnd == END:
at = " @ end()" at = " @ end()"
} }

View file

@ -169,11 +169,12 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
} }
} }
if bucket.Lower < 0 && bucket.Upper > 0 { if bucket.Lower < 0 && bucket.Upper > 0 {
if len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0 { switch {
case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0:
// The result is in the zero bucket and the histogram has only // The result is in the zero bucket and the histogram has only
// positive buckets. So we consider 0 to be the lower bound. // positive buckets. So we consider 0 to be the lower bound.
bucket.Lower = 0 bucket.Lower = 0
} else if len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0 { case len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0:
// The result is in the zero bucket and the histogram has only // The result is in the zero bucket and the histogram has only
// negative buckets. So we consider 0 to be the upper bound. // negative buckets. So we consider 0 to be the upper bound.
bucket.Upper = 0 bucket.Upper = 0
@ -244,12 +245,13 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6
for it.Next() { for it.Next() {
b := it.At() b := it.At()
if b.Lower < 0 && b.Upper > 0 { if b.Lower < 0 && b.Upper > 0 {
if len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0 { switch {
case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0:
// This is the zero bucket and the histogram has only // This is the zero bucket and the histogram has only
// positive buckets. So we consider 0 to be the lower // positive buckets. So we consider 0 to be the lower
// bound. // bound.
b.Lower = 0 b.Lower = 0
} else if len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0 { case len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0:
// This is in the zero bucket and the histogram has only // This is in the zero bucket and the histogram has only
// negative buckets. So we consider 0 to be the upper // negative buckets. So we consider 0 to be the upper
// bound. // bound.

View file

@ -587,10 +587,10 @@ func TestAlertingRuleLimit(t *testing.T) {
evalTime := time.Unix(0, 0) evalTime := time.Unix(0, 0)
for _, test := range tests { for _, test := range tests {
_, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit) switch _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit); {
if err != nil { case err != nil:
require.EqualError(t, err, test.err) require.EqualError(t, err, test.err)
} else if test.err != "" { case test.err != "":
t.Errorf("Expected errror %s, got none", test.err) t.Errorf("Expected errror %s, got none", test.err)
} }
} }

View file

@ -481,17 +481,18 @@ func TestForStateRestore(t *testing.T) {
}) })
// Checking if we have restored it correctly. // Checking if we have restored it correctly.
if tst.noRestore { switch {
case tst.noRestore:
require.Equal(t, tst.num, len(got)) require.Equal(t, tst.num, len(got))
for _, e := range got { for _, e := range got {
require.Equal(t, e.ActiveAt, restoreTime) require.Equal(t, e.ActiveAt, restoreTime)
} }
} else if tst.gracePeriod { case tst.gracePeriod:
require.Equal(t, tst.num, len(got)) require.Equal(t, tst.num, len(got))
for _, e := range got { for _, e := range got {
require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime)) require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime))
} }
} else { default:
exp := tst.alerts exp := tst.alerts
require.Equal(t, len(exp), len(got)) require.Equal(t, len(exp), len(got))
sortAlerts(exp) sortAlerts(exp)

View file

@ -223,10 +223,10 @@ func TestRecordingRuleLimit(t *testing.T) {
evalTime := time.Unix(0, 0) evalTime := time.Unix(0, 0)
for _, test := range tests { for _, test := range tests {
_, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit) switch _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit); {
if err != nil { case err != nil:
require.EqualError(t, err, test.err) require.EqualError(t, err, test.err)
} else if test.err != "" { case test.err != "":
t.Errorf("Expected error %s, got none", test.err) t.Errorf("Expected error %s, got none", test.err)
} }
} }

View file

@ -288,10 +288,11 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error {
// Cleanup and reload pool if the configuration has changed. // Cleanup and reload pool if the configuration has changed.
var failed bool var failed bool
for name, sp := range m.scrapePools { for name, sp := range m.scrapePools {
if cfg, ok := m.scrapeConfigs[name]; !ok { switch cfg, ok := m.scrapeConfigs[name]; {
case !ok:
sp.stop() sp.stop()
delete(m.scrapePools, name) delete(m.scrapePools, name)
} else if !reflect.DeepEqual(sp.config, cfg) { case !reflect.DeepEqual(sp.config, cfg):
err := sp.reload(cfg) err := sp.reload(cfg)
if err != nil { if err != nil {
level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name) level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name)

View file

@ -503,9 +503,10 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
// Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage. // Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage.
nonEmpty := false nonEmpty := false
t.LabelsRange(func(l labels.Label) { nonEmpty = true }) t.LabelsRange(func(l labels.Label) { nonEmpty = true })
if nonEmpty { switch {
case nonEmpty:
all = append(all, t) all = append(all, t)
} else if !t.discoveredLabels.IsEmpty() { case !t.discoveredLabels.IsEmpty():
sp.droppedTargets = append(sp.droppedTargets, t) sp.droppedTargets = append(sp.droppedTargets, t)
} }
} }
@ -946,9 +947,10 @@ func (c *scrapeCache) iterDone(flushCache bool) {
count := len(c.series) + len(c.droppedSeries) + len(c.metadata) count := len(c.series) + len(c.droppedSeries) + len(c.metadata)
c.metaMtx.Unlock() c.metaMtx.Unlock()
if flushCache { switch {
case flushCache:
c.successfulCount = count c.successfulCount = count
} else if count > c.successfulCount*2+1000 { case count > c.successfulCount*2+1000:
// If a target had varying labels in scrapes that ultimately failed, // If a target had varying labels in scrapes that ultimately failed,
// the caches would grow indefinitely. Force a flush when this happens. // the caches would grow indefinitely. Force a flush when this happens.
// We use the heuristic that this is a doubling of the cache size // We use the heuristic that this is a doubling of the cache size

View file

@ -724,9 +724,10 @@ func TestScrapeLoopStop(t *testing.T) {
// All samples in a scrape must have the same timestamp. // All samples in a scrape must have the same timestamp.
var ts int64 var ts int64
for i, s := range appender.result { for i, s := range appender.result {
if i%6 == 0 { switch {
case i%6 == 0:
ts = s.t ts = s.t
} else if s.t != ts { case s.t != ts:
t.Fatalf("Unexpected multiple timestamps within single scrape") t.Fatalf("Unexpected multiple timestamps within single scrape")
} }
} }
@ -1139,10 +1140,11 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
numScrapes++ numScrapes++
if numScrapes == 1 { switch numScrapes {
case 1:
w.Write([]byte("metric_a 42\n")) w.Write([]byte("metric_a 42\n"))
return nil return nil
} else if numScrapes == 5 { case 5:
cancel() cancel()
} }
return errors.New("scrape failed") return errors.New("scrape failed")
@ -1200,13 +1202,14 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
numScrapes++ numScrapes++
if numScrapes == 1 { switch numScrapes {
case 1:
w.Write([]byte("metric_a 42\n")) w.Write([]byte("metric_a 42\n"))
return nil return nil
} else if numScrapes == 2 { case 2:
w.Write([]byte("7&-\n")) w.Write([]byte("7&-\n"))
return nil return nil
} else if numScrapes == 3 { case 3:
cancel() cancel()
} }
return errors.New("scrape failed") return errors.New("scrape failed")
@ -1265,14 +1268,15 @@ func TestScrapeLoopCache(t *testing.T) {
numScrapes := 0 numScrapes := 0
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
if numScrapes == 1 || numScrapes == 2 { switch numScrapes {
case 1, 2:
if _, ok := sl.cache.series["metric_a"]; !ok { if _, ok := sl.cache.series["metric_a"]; !ok {
t.Errorf("metric_a missing from cache after scrape %d", numScrapes) t.Errorf("metric_a missing from cache after scrape %d", numScrapes)
} }
if _, ok := sl.cache.series["metric_b"]; !ok { if _, ok := sl.cache.series["metric_b"]; !ok {
t.Errorf("metric_b missing from cache after scrape %d", numScrapes) t.Errorf("metric_b missing from cache after scrape %d", numScrapes)
} }
} else if numScrapes == 3 { case 3:
if _, ok := sl.cache.series["metric_a"]; !ok { if _, ok := sl.cache.series["metric_a"]; !ok {
t.Errorf("metric_a missing from cache after scrape %d", numScrapes) t.Errorf("metric_a missing from cache after scrape %d", numScrapes)
} }
@ -1283,13 +1287,14 @@ func TestScrapeLoopCache(t *testing.T) {
numScrapes++ numScrapes++
if numScrapes == 1 { switch numScrapes {
case 1:
w.Write([]byte("metric_a 42\nmetric_b 43\n")) w.Write([]byte("metric_a 42\nmetric_b 43\n"))
return nil return nil
} else if numScrapes == 3 { case 3:
w.Write([]byte("metric_a 44\n")) w.Write([]byte("metric_a 44\n"))
return nil return nil
} else if numScrapes == 4 { case 4:
cancel() cancel()
} }
return fmt.Errorf("scrape failed") return fmt.Errorf("scrape failed")
@ -2280,11 +2285,12 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
go func() { go func() {
_, err := ts.scrape(ctx, io.Discard) _, err := ts.scrape(ctx, io.Discard)
if err == nil { switch {
case err == nil:
errc <- errors.New("Expected error but got nil") errc <- errors.New("Expected error but got nil")
} else if ctx.Err() != context.Canceled { case ctx.Err() != context.Canceled:
errc <- errors.Errorf("Expected context cancellation error but got: %s", ctx.Err()) errc <- errors.Errorf("Expected context cancellation error but got: %s", ctx.Err())
} else { default:
close(errc) close(errc)
} }
}() }()

View file

@ -222,9 +222,10 @@ func (f *fanoutAppender) Rollback() (err error) {
for _, appender := range f.secondaries { for _, appender := range f.secondaries {
rollbackErr := appender.Rollback() rollbackErr := appender.Rollback()
if err == nil { switch {
case err == nil:
err = rollbackErr err = rollbackErr
} else if rollbackErr != nil { case rollbackErr != nil:
level.Error(f.logger).Log("msg", "Squashed rollback error on rollback", "err", rollbackErr) level.Error(f.logger).Log("msg", "Squashed rollback error on rollback", "err", rollbackErr)
} }
} }

View file

@ -197,13 +197,14 @@ func mergeStrings(a, b []string) []string {
res := make([]string, 0, maxl*10/9) res := make([]string, 0, maxl*10/9)
for len(a) > 0 && len(b) > 0 { for len(a) > 0 && len(b) > 0 {
if a[0] == b[0] { switch {
case a[0] == b[0]:
res = append(res, a[0]) res = append(res, a[0])
a, b = a[1:], b[1:] a, b = a[1:], b[1:]
} else if a[0] < b[0] { case a[0] < b[0]:
res = append(res, a[0]) res = append(res, a[0])
a = a[1:] a = a[1:]
} else { default:
res = append(res, b[0]) res = append(res, b[0])
b = b[1:] b = b[1:]
} }

View file

@ -291,13 +291,14 @@ func MergeLabels(primary, secondary []prompb.Label) []prompb.Label {
result := make([]prompb.Label, 0, len(primary)+len(secondary)) result := make([]prompb.Label, 0, len(primary)+len(secondary))
i, j := 0, 0 i, j := 0, 0
for i < len(primary) && j < len(secondary) { for i < len(primary) && j < len(secondary) {
if primary[i].Name < secondary[j].Name { switch {
case primary[i].Name < secondary[j].Name:
result = append(result, primary[i]) result = append(result, primary[i])
i++ i++
} else if primary[i].Name > secondary[j].Name { case primary[i].Name > secondary[j].Name:
result = append(result, secondary[j]) result = append(result, secondary[j])
j++ j++
} else { default:
result = append(result, primary[i]) result = append(result, primary[i])
i++ i++
j++ j++
@ -429,7 +430,8 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
return c.series.histograms[n+c.histogramsCur].Timestamp >= t return c.series.histograms[n+c.histogramsCur].Timestamp >= t
}) })
if c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms) { switch {
case c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms):
// If float samples and histogram samples have overlapping timestamps prefer the float samples. // If float samples and histogram samples have overlapping timestamps prefer the float samples.
if c.series.floats[c.floatsCur].Timestamp <= c.series.histograms[c.histogramsCur].Timestamp { if c.series.floats[c.floatsCur].Timestamp <= c.series.histograms[c.histogramsCur].Timestamp {
c.curValType = chunkenc.ValFloat c.curValType = chunkenc.ValFloat
@ -445,9 +447,9 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
c.floatsCur-- c.floatsCur--
} }
} }
} else if c.floatsCur < len(c.series.floats) { case c.floatsCur < len(c.series.floats):
c.curValType = chunkenc.ValFloat c.curValType = chunkenc.ValFloat
} else if c.histogramsCur < len(c.series.histograms) { case c.histogramsCur < len(c.series.histograms):
c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur]) c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur])
} }
@ -515,18 +517,19 @@ func (c *concreteSeriesIterator) Next() chunkenc.ValueType {
} }
c.curValType = chunkenc.ValNone c.curValType = chunkenc.ValNone
if peekFloatTS < peekHistTS { switch {
case peekFloatTS < peekHistTS:
c.floatsCur++ c.floatsCur++
c.curValType = chunkenc.ValFloat c.curValType = chunkenc.ValFloat
} else if peekHistTS < peekFloatTS { case peekHistTS < peekFloatTS:
c.histogramsCur++ c.histogramsCur++
c.curValType = chunkenc.ValHistogram c.curValType = chunkenc.ValHistogram
} else if peekFloatTS == noTS && peekHistTS == noTS { case peekFloatTS == noTS && peekHistTS == noTS:
// This only happens when the iterator is exhausted; we set the cursors off the end to prevent // This only happens when the iterator is exhausted; we set the cursors off the end to prevent
// Seek() from returning anything afterwards. // Seek() from returning anything afterwards.
c.floatsCur = len(c.series.floats) c.floatsCur = len(c.series.floats)
c.histogramsCur = len(c.series.histograms) c.histogramsCur = len(c.series.histograms)
} else { default:
// Prefer float samples to histogram samples if there's a conflict. We advance the cursor for histograms // Prefer float samples to histogram samples if there's a conflict. We advance the cursor for histograms
// anyway otherwise the histogram sample will get selected on the next call to Next(). // anyway otherwise the histogram sample will get selected on the next call to Next().
c.floatsCur++ c.floatsCur++

View file

@ -55,9 +55,10 @@ func (r *ewmaRate) tick() {
r.mutex.Lock() r.mutex.Lock()
defer r.mutex.Unlock() defer r.mutex.Unlock()
if r.init { switch {
case r.init:
r.lastRate += r.alpha * (instantRate - r.lastRate) r.lastRate += r.alpha * (instantRate - r.lastRate)
} else if newEvents > 0 { case newEvents > 0:
r.init = true r.init = true
r.lastRate = instantRate r.lastRate = instantRate
} }

View file

@ -1030,9 +1030,10 @@ func (t *QueueManager) calculateDesiredShards() int {
return t.numShards return t.numShards
} }
if numShards > t.cfg.MaxShards { switch {
case numShards > t.cfg.MaxShards:
numShards = t.cfg.MaxShards numShards = t.cfg.MaxShards
} else if numShards < t.cfg.MinShards { case numShards < t.cfg.MinShards:
numShards = t.cfg.MinShards numShards = t.cfg.MinShards
} }
return numShards return numShards
@ -1575,10 +1576,11 @@ func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l
} }
sleepDuration = backoff sleepDuration = backoff
if backoffErr.retryAfter > 0 { switch {
case backoffErr.retryAfter > 0:
sleepDuration = backoffErr.retryAfter sleepDuration = backoffErr.retryAfter
level.Info(l).Log("msg", "Retrying after duration specified by Retry-After header", "duration", sleepDuration) level.Info(l).Log("msg", "Retrying after duration specified by Retry-After header", "duration", sleepDuration)
} else if backoffErr.retryAfter < 0 { case backoffErr.retryAfter < 0:
level.Debug(l).Log("msg", "retry-after cannot be in past, retrying using default backoff mechanism") level.Debug(l).Log("msg", "retry-after cannot be in past, retrying using default backoff mechanism")
} }

View file

@ -951,7 +951,8 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int
return 0, storage.ErrOutOfOrderSample return 0, storage.ErrOutOfOrderSample
} }
if h != nil { switch {
case h != nil:
// NOTE: always modify pendingHistograms and histogramSeries together // NOTE: always modify pendingHistograms and histogramSeries together
a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{ a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{
Ref: series.ref, Ref: series.ref,
@ -959,7 +960,7 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int
H: h, H: h,
}) })
a.histogramSeries = append(a.histogramSeries, series) a.histogramSeries = append(a.histogramSeries, series)
} else if fh != nil { case fh != nil:
// NOTE: always modify pendingFloatHistograms and floatHistogramSeries together // NOTE: always modify pendingFloatHistograms and floatHistogramSeries together
a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{ a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{
Ref: series.ref, Ref: series.ref,

View file

@ -164,14 +164,15 @@ func (a *xorAppender) Append(t int64, v float64) {
var tDelta uint64 var tDelta uint64
num := binary.BigEndian.Uint16(a.b.bytes()) num := binary.BigEndian.Uint16(a.b.bytes())
if num == 0 { switch num {
case 0:
buf := make([]byte, binary.MaxVarintLen64) buf := make([]byte, binary.MaxVarintLen64)
for _, b := range buf[:binary.PutVarint(buf, t)] { for _, b := range buf[:binary.PutVarint(buf, t)] {
a.b.writeByte(b) a.b.writeByte(b)
} }
a.b.writeBits(math.Float64bits(v), 64) a.b.writeBits(math.Float64bits(v), 64)
} else if num == 1 { case 1:
tDelta = uint64(t - a.t) tDelta = uint64(t - a.t)
buf := make([]byte, binary.MaxVarintLen64) buf := make([]byte, binary.MaxVarintLen64)
@ -181,7 +182,7 @@ func (a *xorAppender) Append(t int64, v float64) {
a.writeVDelta(v) a.writeVDelta(v)
} else { default:
tDelta = uint64(t - a.t) tDelta = uint64(t - a.t)
dod := int64(tDelta - a.tDelta) dod := int64(tDelta - a.tDelta)

View file

@ -999,9 +999,10 @@ func (cdm *ChunkDiskMapper) DeleteCorrupted(originalErr error) error {
cdm.readPathMtx.RLock() cdm.readPathMtx.RLock()
lastSeq := 0 lastSeq := 0
for seg := range cdm.mmappedChunkFiles { for seg := range cdm.mmappedChunkFiles {
if seg >= cerr.FileIndex { switch {
case seg >= cerr.FileIndex:
segs = append(segs, seg) segs = append(segs, seg)
} else if seg > lastSeq { case seg > lastSeq:
lastSeq = seg lastSeq = seg
} }
} }

View file

@ -963,10 +963,11 @@ func (db *DB) ApplyConfig(conf *config.Config) error {
// Create WBL if it was not present and if OOO is enabled with WAL enabled. // Create WBL if it was not present and if OOO is enabled with WAL enabled.
var wblog *wlog.WL var wblog *wlog.WL
var err error var err error
if db.head.wbl != nil { switch {
case db.head.wbl != nil:
// The existing WBL from the disk might have been replayed while OOO was disabled. // The existing WBL from the disk might have been replayed while OOO was disabled.
wblog = db.head.wbl wblog = db.head.wbl
} else if !db.oooWasEnabled.Load() && oooTimeWindow > 0 && db.opts.WALSegmentSize >= 0 { case !db.oooWasEnabled.Load() && oooTimeWindow > 0 && db.opts.WALSegmentSize >= 0:
segmentSize := wlog.DefaultSegmentSize segmentSize := wlog.DefaultSegmentSize
// Wal is set to a custom size. // Wal is set to a custom size.
if db.opts.WALSegmentSize > 0 { if db.opts.WALSegmentSize > 0 {
@ -1532,10 +1533,11 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error {
} }
toDelete := filepath.Join(db.dir, ulid.String()) toDelete := filepath.Join(db.dir, ulid.String())
if _, err := os.Stat(toDelete); os.IsNotExist(err) { switch _, err := os.Stat(toDelete); {
case os.IsNotExist(err):
// Noop. // Noop.
continue continue
} else if err != nil { case err != nil:
return errors.Wrapf(err, "stat dir %v", toDelete) return errors.Wrapf(err, "stat dir %v", toDelete)
} }

View file

@ -344,9 +344,10 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
} }
if value.IsStaleNaN(v) { if value.IsStaleNaN(v) {
if s.lastHistogramValue != nil { switch {
case s.lastHistogramValue != nil:
return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil) return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil)
} else if s.lastFloatHistogramValue != nil { case s.lastFloatHistogramValue != nil:
return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v}) return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v})
} }
} }
@ -552,9 +553,10 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
return 0, err return 0, err
} }
if created { if created {
if h != nil { switch {
case h != nil:
s.lastHistogramValue = &histogram.Histogram{} s.lastHistogramValue = &histogram.Histogram{}
} else if fh != nil { case fh != nil:
s.lastFloatHistogramValue = &histogram.FloatHistogram{} s.lastFloatHistogramValue = &histogram.FloatHistogram{}
} }
a.series = append(a.series, record.RefSeries{ a.series = append(a.series, record.RefSeries{
@ -564,7 +566,8 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
} }
} }
if h != nil { switch {
case h != nil:
s.Lock() s.Lock()
if err := s.appendableHistogram(t, h); err != nil { if err := s.appendableHistogram(t, h); err != nil {
s.Unlock() s.Unlock()
@ -581,7 +584,7 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
H: h, H: h,
}) })
a.histogramSeries = append(a.histogramSeries, s) a.histogramSeries = append(a.histogramSeries, s)
} else if fh != nil { case fh != nil:
s.Lock() s.Lock()
if err := s.appendableFloatHistogram(t, fh); err != nil { if err := s.appendableFloatHistogram(t, fh); err != nil {
s.Unlock() s.Unlock()
@ -938,7 +941,10 @@ func (a *headAppender) Commit() (err error) {
var ok, chunkCreated bool var ok, chunkCreated bool
if err == nil && oooSample { switch {
case err != nil:
// Do nothing here.
case oooSample:
// Sample is OOO and OOO handling is enabled // Sample is OOO and OOO handling is enabled
// and the delta is within the OOO tolerance. // and the delta is within the OOO tolerance.
var mmapRef chunks.ChunkDiskMapperRef var mmapRef chunks.ChunkDiskMapperRef
@ -976,7 +982,7 @@ func (a *headAppender) Commit() (err error) {
// TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305.
samplesAppended-- samplesAppended--
} }
} else if err == nil { default:
ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange) ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange)
if ok { if ok {
if s.T < inOrderMint { if s.T < inOrderMint {
@ -1177,14 +1183,15 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
app.RecodeHistogram(h, pBackwardInserts, nBackwardInserts) app.RecodeHistogram(h, pBackwardInserts, nBackwardInserts)
} }
// We have 3 cases here // We have 3 cases here
// - !okToAppend -> We need to cut a new chunk. // - !okToAppend or counterReset -> We need to cut a new chunk.
// - okToAppend but we have inserts → Existing chunk needs // - okToAppend but we have inserts → Existing chunk needs
// recoding before we can append our histogram. // recoding before we can append our histogram.
// - okToAppend and no inserts → Chunk is ready to support our histogram. // - okToAppend and no inserts → Chunk is ready to support our histogram.
if !okToAppend || counterReset { switch {
case !okToAppend || counterReset:
c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange) c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange)
chunkCreated = true chunkCreated = true
} else if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 { case len(pForwardInserts) > 0 || len(nForwardInserts) > 0:
// New buckets have appeared. We need to recode all // New buckets have appeared. We need to recode all
// prior histogram samples within the chunk before we // prior histogram samples within the chunk before we
// can process this one. // can process this one.
@ -1270,14 +1277,15 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram,
app.RecodeHistogramm(fh, pBackwardInserts, nBackwardInserts) app.RecodeHistogramm(fh, pBackwardInserts, nBackwardInserts)
} }
// We have 3 cases here // We have 3 cases here
// - !okToAppend -> We need to cut a new chunk. // - !okToAppend or counterReset -> We need to cut a new chunk.
// - okToAppend but we have inserts → Existing chunk needs // - okToAppend but we have inserts → Existing chunk needs
// recoding before we can append our histogram. // recoding before we can append our histogram.
// - okToAppend and no inserts → Chunk is ready to support our histogram. // - okToAppend and no inserts → Chunk is ready to support our histogram.
if !okToAppend || counterReset { switch {
case !okToAppend || counterReset:
c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange) c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange)
chunkCreated = true chunkCreated = true
} else if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 { case len(pForwardInserts) > 0 || len(nForwardInserts) > 0:
// New buckets have appeared. We need to recode all // New buckets have appeared. We need to recode all
// prior histogram samples within the chunk before we // prior histogram samples within the chunk before we
// can process this one. // can process this one.

View file

@ -424,7 +424,8 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper
break break
} }
if chunkRef == meta.OOOLastRef { switch {
case chunkRef == meta.OOOLastRef:
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
meta: chunks.Meta{ meta: chunks.Meta{
MinTime: meta.OOOLastMinTime, MinTime: meta.OOOLastMinTime,
@ -435,7 +436,7 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper
origMinT: c.minTime, origMinT: c.minTime,
origMaxT: c.maxTime, origMaxT: c.maxTime,
}) })
} else if c.OverlapsClosedInterval(mint, maxt) { case c.OverlapsClosedInterval(mint, maxt):
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
meta: chunks.Meta{ meta: chunks.Meta{
MinTime: c.minTime, MinTime: c.minTime,
@ -594,13 +595,15 @@ type boundedIterator struct {
func (b boundedIterator) Next() chunkenc.ValueType { func (b boundedIterator) Next() chunkenc.ValueType {
for b.Iterator.Next() == chunkenc.ValFloat { for b.Iterator.Next() == chunkenc.ValFloat {
t, _ := b.Iterator.At() t, _ := b.Iterator.At()
if t < b.minT { switch {
case t < b.minT:
continue continue
} else if t > b.maxT { case t > b.maxT:
return chunkenc.ValNone return chunkenc.ValNone
} default:
return chunkenc.ValFloat return chunkenc.ValFloat
} }
}
return chunkenc.ValNone return chunkenc.ValNone
} }

View file

@ -2960,10 +2960,11 @@ func TestAppendHistogram(t *testing.T) {
actHistograms := make([]tsdbutil.Sample, 0, len(expHistograms)) actHistograms := make([]tsdbutil.Sample, 0, len(expHistograms))
actFloatHistograms := make([]tsdbutil.Sample, 0, len(expFloatHistograms)) actFloatHistograms := make([]tsdbutil.Sample, 0, len(expFloatHistograms))
for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() { for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() {
if typ == chunkenc.ValHistogram { switch typ {
case chunkenc.ValHistogram:
ts, h := it.AtHistogram() ts, h := it.AtHistogram()
actHistograms = append(actHistograms, sample{t: ts, h: h}) actHistograms = append(actHistograms, sample{t: ts, h: h})
} else if typ == chunkenc.ValFloatHistogram { case chunkenc.ValFloatHistogram:
ts, fh := it.AtFloatHistogram() ts, fh := it.AtFloatHistogram()
actFloatHistograms = append(actFloatHistograms, sample{t: ts, fh: fh}) actFloatHistograms = append(actFloatHistograms, sample{t: ts, fh: fh})
} }
@ -3565,14 +3566,15 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) {
for i, eh := range expHistograms { for i, eh := range expHistograms {
ah := actHistograms[i] ah := actHistograms[i]
if floatHistogram { if floatHistogram {
if value.IsStaleNaN(eh.fh.Sum) { switch {
case value.IsStaleNaN(eh.fh.Sum):
actNumStale++ actNumStale++
require.True(t, value.IsStaleNaN(ah.fh.Sum)) require.True(t, value.IsStaleNaN(ah.fh.Sum))
// To make require.Equal work. // To make require.Equal work.
ah.fh.Sum = 0 ah.fh.Sum = 0
eh.fh = eh.fh.Copy() eh.fh = eh.fh.Copy()
eh.fh.Sum = 0 eh.fh.Sum = 0
} else if i > 0 { case i > 0:
prev := expHistograms[i-1] prev := expHistograms[i-1]
if prev.fh == nil || value.IsStaleNaN(prev.fh.Sum) { if prev.fh == nil || value.IsStaleNaN(prev.fh.Sum) {
eh.fh.CounterResetHint = histogram.UnknownCounterReset eh.fh.CounterResetHint = histogram.UnknownCounterReset
@ -3580,14 +3582,15 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) {
} }
require.Equal(t, eh, ah) require.Equal(t, eh, ah)
} else { } else {
if value.IsStaleNaN(eh.h.Sum) { switch {
case value.IsStaleNaN(eh.h.Sum):
actNumStale++ actNumStale++
require.True(t, value.IsStaleNaN(ah.h.Sum)) require.True(t, value.IsStaleNaN(ah.h.Sum))
// To make require.Equal work. // To make require.Equal work.
ah.h.Sum = 0 ah.h.Sum = 0
eh.h = eh.h.Copy() eh.h = eh.h.Copy()
eh.h.Sum = 0 eh.h.Sum = 0
} else if i > 0 { case i > 0:
prev := expHistograms[i-1] prev := expHistograms[i-1]
if prev.h == nil || value.IsStaleNaN(prev.h.Sum) { if prev.h == nil || value.IsStaleNaN(prev.h.Sum) {
eh.h.CounterResetHint = histogram.UnknownCounterReset eh.h.CounterResetHint = histogram.UnknownCounterReset
@ -4488,19 +4491,19 @@ func TestHistogramValidation(t *testing.T) {
for testName, tc := range tests { for testName, tc := range tests {
t.Run(testName, func(t *testing.T) { t.Run(testName, func(t *testing.T) {
err := ValidateHistogram(tc.h) switch err := ValidateHistogram(tc.h); {
if tc.errMsg != "" { case tc.errMsg != "":
require.ErrorContains(t, err, tc.errMsg) require.ErrorContains(t, err, tc.errMsg)
} else { default:
require.NoError(t, err) require.NoError(t, err)
} }
err = ValidateFloatHistogram(tc.h.ToFloat()) switch err := ValidateFloatHistogram(tc.h.ToFloat()); {
if tc.errMsgFloat != "" { case tc.errMsgFloat != "":
require.ErrorContains(t, err, tc.errMsgFloat) require.ErrorContains(t, err, tc.errMsgFloat)
} else if tc.errMsg != "" { case tc.errMsg != "":
require.ErrorContains(t, err, tc.errMsg) require.ErrorContains(t, err, tc.errMsg)
} else { default:
require.NoError(t, err) require.NoError(t, err)
} }
}) })

View file

@ -565,14 +565,13 @@ func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) {
for _, it := range p { for _, it := range p {
// NOTE: mergedPostings struct requires the user to issue an initial Next. // NOTE: mergedPostings struct requires the user to issue an initial Next.
if it.Next() { switch {
case it.Next():
ph = append(ph, it) ph = append(ph, it)
} else { case it.Err() != nil:
if it.Err() != nil {
return &mergedPostings{err: it.Err()}, true return &mergedPostings{err: it.Err()}, true
} }
} }
}
if len(ph) == 0 { if len(ph) == 0 {
return nil, false return nil, false
@ -704,16 +703,16 @@ func (rp *removedPostings) Next() bool {
return true return true
} }
fcur, rcur := rp.full.At(), rp.remove.At() switch fcur, rcur := rp.full.At(), rp.remove.At(); {
if fcur < rcur { case fcur < rcur:
rp.cur = fcur rp.cur = fcur
rp.fok = rp.full.Next() rp.fok = rp.full.Next()
return true return true
} else if rcur < fcur { case rcur < fcur:
// Forward the remove postings to the right position. // Forward the remove postings to the right position.
rp.rok = rp.remove.Seek(fcur) rp.rok = rp.remove.Seek(fcur)
} else { default:
// Skip the current posting. // Skip the current posting.
rp.fok = rp.full.Next() rp.fok = rp.full.Next()
} }
@ -848,9 +847,10 @@ func (it *bigEndianPostings) Err() error {
func FindIntersectingPostings(p Postings, candidates []Postings) (indexes []int, err error) { func FindIntersectingPostings(p Postings, candidates []Postings) (indexes []int, err error) {
h := make(postingsWithIndexHeap, 0, len(candidates)) h := make(postingsWithIndexHeap, 0, len(candidates))
for idx, it := range candidates { for idx, it := range candidates {
if it.Next() { switch {
case it.Next():
h = append(h, postingsWithIndex{index: idx, p: it}) h = append(h, postingsWithIndex{index: idx, p: it})
} else if it.Err() != nil { case it.Err() != nil:
return nil, it.Err() return nil, it.Err()
} }
} }

View file

@ -123,7 +123,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
} }
} }
// There is nothing to do if we did not collect any chunk // There is nothing to do if we did not collect any chunk.
if len(tmpChks) == 0 { if len(tmpChks) == 0 {
return nil return nil
} }
@ -136,14 +136,15 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
// chunks Meta the first chunk that overlaps with others. // chunks Meta the first chunk that overlaps with others.
// Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650) // Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650)
// In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to // In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to
// to return chunk Metas for chunk 5 and chunk 6 // to return chunk Metas for chunk 5 and chunk 6e
*chks = append(*chks, tmpChks[0]) *chks = append(*chks, tmpChks[0])
maxTime := tmpChks[0].MaxTime // tracks the maxTime of the previous "to be merged chunk" maxTime := tmpChks[0].MaxTime // Tracks the maxTime of the previous "to be merged chunk".
for _, c := range tmpChks[1:] { for _, c := range tmpChks[1:] {
if c.MinTime > maxTime { switch {
case c.MinTime > maxTime:
*chks = append(*chks, c) *chks = append(*chks, c)
maxTime = c.MaxTime maxTime = c.MaxTime
} else if c.MaxTime > maxTime { case c.MaxTime > maxTime:
maxTime = c.MaxTime maxTime = c.MaxTime
(*chks)[len(*chks)-1].MaxTime = c.MaxTime (*chks)[len(*chks)-1].MaxTime = c.MaxTime
} }

View file

@ -239,18 +239,20 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings,
} }
for _, m := range ms { for _, m := range ms {
if m.Name == "" && m.Value == "" { // Special-case for AllPostings, used in tests at least. switch {
case m.Name == "" && m.Value == "": // Special-case for AllPostings, used in tests at least.
k, v := index.AllPostingsKey() k, v := index.AllPostingsKey()
allPostings, err := ix.Postings(k, v) allPostings, err := ix.Postings(k, v)
if err != nil { if err != nil {
return nil, err return nil, err
} }
its = append(its, allPostings) its = append(its, allPostings)
} else if labelMustBeSet[m.Name] { case labelMustBeSet[m.Name]:
// If this matcher must be non-empty, we can be smarter. // If this matcher must be non-empty, we can be smarter.
matchesEmpty := m.Matches("") matchesEmpty := m.Matches("")
isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp
if isNot && matchesEmpty { // l!="foo" switch {
case isNot && matchesEmpty: // l!="foo"
// If the label can't be empty and is a Not and the inner matcher // If the label can't be empty and is a Not and the inner matcher
// doesn't match empty, then subtract it out at the end. // doesn't match empty, then subtract it out at the end.
inverse, err := m.Inverse() inverse, err := m.Inverse()
@ -263,7 +265,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings,
return nil, err return nil, err
} }
notIts = append(notIts, it) notIts = append(notIts, it)
} else if isNot && !matchesEmpty { // l!="" case isNot && !matchesEmpty: // l!=""
// If the label can't be empty and is a Not, but the inner matcher can // If the label can't be empty and is a Not, but the inner matcher can
// be empty we need to use inversePostingsForMatcher. // be empty we need to use inversePostingsForMatcher.
inverse, err := m.Inverse() inverse, err := m.Inverse()
@ -279,7 +281,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings,
return index.EmptyPostings(), nil return index.EmptyPostings(), nil
} }
its = append(its, it) its = append(its, it)
} else { // l="a" default: // l="a"
// Non-Not matcher, use normal postingsForMatcher. // Non-Not matcher, use normal postingsForMatcher.
it, err := postingsForMatcher(ix, m) it, err := postingsForMatcher(ix, m)
if err != nil { if err != nil {
@ -290,7 +292,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings,
} }
its = append(its, it) its = append(its, it)
} }
} else { // l="" default: // l=""
// If the matchers for a labelname selects an empty value, it selects all // If the matchers for a labelname selects an empty value, it selects all
// the series which don't have the label name set too. See: // the series which don't have the label name set too. See:
// https://github.com/prometheus/prometheus/issues/3575 and // https://github.com/prometheus/prometheus/issues/3575 and
@ -966,23 +968,24 @@ func (m *mergedStringIter) Next() bool {
return false return false
} }
if !m.aok { switch {
case !m.aok:
m.cur = m.b.At() m.cur = m.b.At()
m.bok = m.b.Next() m.bok = m.b.Next()
m.err = m.b.Err() m.err = m.b.Err()
} else if !m.bok { case !m.bok:
m.cur = m.a.At() m.cur = m.a.At()
m.aok = m.a.Next() m.aok = m.a.Next()
m.err = m.a.Err() m.err = m.a.Err()
} else if m.b.At() > m.a.At() { case m.b.At() > m.a.At():
m.cur = m.a.At() m.cur = m.a.At()
m.aok = m.a.Next() m.aok = m.a.Next()
m.err = m.a.Err() m.err = m.a.Err()
} else if m.a.At() > m.b.At() { case m.a.At() > m.b.At():
m.cur = m.b.At() m.cur = m.b.At()
m.bok = m.b.Next() m.bok = m.b.Next()
m.err = m.b.Err() m.err = m.b.Err()
} else { // Equal. default: // Equal.
m.cur = m.b.At() m.cur = m.b.At()
m.aok = m.a.Next() m.aok = m.a.Next()
m.err = m.a.Err() m.err = m.a.Err()

View file

@ -190,9 +190,10 @@ type Stone struct {
func ReadTombstones(dir string) (Reader, int64, error) { func ReadTombstones(dir string) (Reader, int64, error) {
b, err := os.ReadFile(filepath.Join(dir, TombstonesFilename)) b, err := os.ReadFile(filepath.Join(dir, TombstonesFilename))
if os.IsNotExist(err) { switch {
case os.IsNotExist(err):
return NewMemTombstones(), 0, nil return NewMemTombstones(), 0, nil
} else if err != nil { case err != nil:
return nil, 0, err return nil, 0, err
} }

View file

@ -522,9 +522,10 @@ func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) {
} }
}() }()
if n, err := f.Read(metab); err != nil { switch n, err := f.Read(metab); {
case err != nil:
return nil, errors.Wrapf(err, "validate meta %q", f.Name()) return nil, errors.Wrapf(err, "validate meta %q", f.Name())
} else if n != 8 { case n != 8:
return nil, errors.Errorf("invalid header size %d in %q", n, f.Name()) return nil, errors.Errorf("invalid header size %d in %q", n, f.Name())
} }
@ -1063,9 +1064,10 @@ func (r *walReader) entry(cr io.Reader) (WALEntryType, byte, []byte, error) {
tr := io.TeeReader(cr, r.crc32) tr := io.TeeReader(cr, r.crc32)
b := make([]byte, 6) b := make([]byte, 6)
if n, err := tr.Read(b); err != nil { switch n, err := tr.Read(b); {
case err != nil:
return 0, 0, nil, err return 0, 0, nil, err
} else if n != 6 { case n != 6:
return 0, 0, nil, r.corruptionErr("invalid entry header size %d", n) return 0, 0, nil, r.corruptionErr("invalid entry header size %d", n)
} }
@ -1087,15 +1089,17 @@ func (r *walReader) entry(cr io.Reader) (WALEntryType, byte, []byte, error) {
} }
buf := r.buf[:length] buf := r.buf[:length]
if n, err := tr.Read(buf); err != nil { switch n, err := tr.Read(buf); {
case err != nil:
return 0, 0, nil, err return 0, 0, nil, err
} else if n != length { case n != length:
return 0, 0, nil, r.corruptionErr("invalid entry body size %d", n) return 0, 0, nil, r.corruptionErr("invalid entry body size %d", n)
} }
if n, err := cr.Read(b[:4]); err != nil { switch n, err := cr.Read(b[:4]); {
case err != nil:
return 0, 0, nil, err return 0, 0, nil, err
} else if n != 4 { case n != 4:
return 0, 0, nil, r.corruptionErr("invalid checksum length %d", n) return 0, 0, nil, r.corruptionErr("invalid checksum length %d", n)
} }
if exp, has := binary.BigEndian.Uint32(b[:4]), r.crc32.Sum32(); has != exp { if exp, has := binary.BigEndian.Uint32(b[:4]), r.crc32.Sum32(); has != exp {

View file

@ -126,9 +126,10 @@ func (r *LiveReader) Next() bool {
// we return EOF and the user can try again later. If we have a full // we return EOF and the user can try again later. If we have a full
// page, buildRecord is guaranteed to return a record or a non-EOF; it // page, buildRecord is guaranteed to return a record or a non-EOF; it
// has checks the records fit in pages. // has checks the records fit in pages.
if ok, err := r.buildRecord(); ok { switch ok, err := r.buildRecord(); {
case ok:
return true return true
} else if err != nil && err != io.EOF { case err != nil && err != io.EOF:
r.err = err r.err = err
return false return false
} }

View file

@ -405,9 +405,10 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
// Ignore errors reading to end of segment whilst replaying the WAL. // Ignore errors reading to end of segment whilst replaying the WAL.
if !tail { if !tail {
if err != nil && errors.Cause(err) != io.EOF { switch {
case err != nil && errors.Cause(err) != io.EOF:
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err) level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err)
} else if reader.Offset() != size { case reader.Offset() != size:
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
} }
return nil return nil
@ -425,9 +426,10 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
// Ignore all errors reading to end of segment whilst replaying the WAL. // Ignore all errors reading to end of segment whilst replaying the WAL.
if !tail { if !tail {
if err != nil && errors.Cause(err) != io.EOF { switch {
case err != nil && errors.Cause(err) != io.EOF:
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err)
} else if reader.Offset() != size { case reader.Offset() != size:
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
} }
return nil return nil

View file

@ -176,11 +176,11 @@ func (tc *ZookeeperTreeCache) loop(path string) {
node = childNode node = childNode
} }
err := tc.recursiveNodeUpdate(ev.Path, node) switch err := tc.recursiveNodeUpdate(ev.Path, node); {
if err != nil { case err != nil:
level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", err) level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", err)
failure() failure()
} else if tc.head.data == nil { case tc.head.data == nil:
level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", "path no longer exists", "path", tc.prefix) level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", "path no longer exists", "path", tc.prefix)
failure() failure()
} }
@ -214,13 +214,14 @@ func (tc *ZookeeperTreeCache) loop(path string) {
func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTreeCacheNode) error { func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTreeCacheNode) error {
data, _, dataWatcher, err := tc.conn.GetW(path) data, _, dataWatcher, err := tc.conn.GetW(path)
if errors.Is(err, zk.ErrNoNode) { switch {
case errors.Is(err, zk.ErrNoNode):
tc.recursiveDelete(path, node) tc.recursiveDelete(path, node)
if node == tc.head { if node == tc.head {
return fmt.Errorf("path %s does not exist", path) return fmt.Errorf("path %s does not exist", path)
} }
return nil return nil
} else if err != nil { case err != nil:
return err return err
} }
@ -230,10 +231,11 @@ func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTr
} }
children, _, childWatcher, err := tc.conn.ChildrenW(path) children, _, childWatcher, err := tc.conn.ChildrenW(path)
if errors.Is(err, zk.ErrNoNode) { switch {
case errors.Is(err, zk.ErrNoNode):
tc.recursiveDelete(path, node) tc.recursiveDelete(path, node)
return nil return nil
} else if err != nil { case err != nil:
return err return err
} }

View file

@ -989,12 +989,14 @@ func (api *API) targets(r *http.Request) apiFuncResult {
ScrapeURL: target.URL().String(), ScrapeURL: target.URL().String(),
GlobalURL: globalURL.String(), GlobalURL: globalURL.String(),
LastError: func() string { LastError: func() string {
if err == nil && lastErrStr == "" { switch {
case err == nil && lastErrStr == "":
return "" return ""
} else if err != nil { case err != nil:
return errors.Wrapf(err, lastErrStr).Error() return errors.Wrapf(err, lastErrStr).Error()
} default:
return lastErrStr return lastErrStr
}
}(), }(),
LastScrape: target.LastScrape(), LastScrape: target.LastScrape(),
LastScrapeDuration: target.LastScrapeDuration().Seconds(), LastScrapeDuration: target.LastScrapeDuration().Seconds(),

View file

@ -388,13 +388,13 @@ func TestFederationWithNativeHistograms(t *testing.T) {
break break
} }
require.NoError(t, err) require.NoError(t, err)
if et == textparse.EntryHelp {
metricFamilies++
}
if et == textparse.EntryHistogram || et == textparse.EntrySeries { if et == textparse.EntryHistogram || et == textparse.EntrySeries {
p.Metric(&l) p.Metric(&l)
} }
if et == textparse.EntryHistogram { switch et {
case textparse.EntryHelp:
metricFamilies++
case textparse.EntryHistogram:
_, parsedTimestamp, h, fh := p.Histogram() _, parsedTimestamp, h, fh := p.Histogram()
require.Nil(t, h) require.Nil(t, h)
actVec = append(actVec, promql.Sample{ actVec = append(actVec, promql.Sample{
@ -402,7 +402,7 @@ func TestFederationWithNativeHistograms(t *testing.T) {
H: fh, H: fh,
Metric: l, Metric: l,
}) })
} else if et == textparse.EntrySeries { case textparse.EntrySeries:
_, parsedTimestamp, f := p.Series() _, parsedTimestamp, f := p.Series()
actVec = append(actVec, promql.Sample{ actVec = append(actVec, promql.Sample{
T: *parsedTimestamp, T: *parsedTimestamp,