Merge branch 'main' into linter/nilerr

Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
This commit is contained in:
Matthieu MOREL 2023-04-19 19:56:39 +02:00 committed by GitHub
commit bae9a21200
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
98 changed files with 452 additions and 376 deletions

View file

@ -12,6 +12,7 @@
// limitations under the License.
// The main package for the Prometheus server executable.
// nolint:revive // Many unsued function arguments in this file by design.
package main
import (

View file

@ -72,9 +72,11 @@ Loop:
if !startedOk {
t.Fatal("prometheus didn't start in the specified timeout")
}
if err := prom.Process.Kill(); err == nil {
switch err := prom.Process.Kill(); {
case err == nil:
t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal")
} else if stoppedErr != nil && stoppedErr.Error() != "signal: interrupt" { // TODO - find a better way to detect when the process didn't exit as expected!
case stoppedErr != nil && stoppedErr.Error() != "signal: interrupt":
// TODO: find a better way to detect when the process didn't exit as expected!
t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr)
}
}

View file

@ -44,7 +44,7 @@ func sortSamples(samples []backfillSample) {
})
}
func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample {
func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { // nolint:revive
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
samples := []backfillSample{}
for ss.Next() {

View file

@ -68,7 +68,7 @@ func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient que
}
// loadGroups parses groups from a list of recording rule files.
func (importer *ruleImporter) loadGroups(ctx context.Context, filenames []string) (errs []error) {
func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string) (errs []error) {
groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...)
if errs != nil {
return errs

View file

@ -35,7 +35,7 @@ type mockQueryRangeAPI struct {
samples model.Matrix
}
func (mockAPI mockQueryRangeAPI) QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) {
func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { // nolint:revive
return mockAPI.samples, v1.Warnings{}, nil
}
@ -161,7 +161,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
}
}
func newTestRuleImporter(ctx context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
logger := log.NewNopLogger()
cfg := ruleImporterConfig{
outputDir: tmpDir,

View file

@ -403,14 +403,15 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error)
return nil, nil, err
}
var block tsdb.BlockReader
if blockID != "" {
switch {
case blockID != "":
for _, b := range blocks {
if b.Meta().ULID.String() == blockID {
block = b
break
}
}
} else if len(blocks) > 0 {
case len(blocks) > 0:
block = blocks[len(blocks)-1]
}
if block == nil {

View file

@ -434,7 +434,7 @@ func (tg *testGroup) maxEvalTime() time.Duration {
}
func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, qu storage.Queryable) (promql.Vector, error) {
q, err := engine.NewInstantQuery(qu, nil, qs, t)
q, err := engine.NewInstantQuery(ctx, qu, nil, qs, t)
if err != nil {
return nil, err
}

View file

@ -164,7 +164,7 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery {
return d
}
func (d *EC2Discovery) ec2Client(ctx context.Context) (*ec2.EC2, error) {
func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) {
if d.ec2 != nil {
return d.ec2, nil
}

View file

@ -285,21 +285,22 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms
for _, lname := range conf.NameList(name) {
response, err := lookupFromAnyServer(lname, qtype, conf, logger)
if err != nil {
switch {
case err != nil:
// We can't go home yet, because a later name
// may give us a valid, successful answer. However
// we can no longer say "this name definitely doesn't
// exist", because we did not get that answer for
// at least one name.
allResponsesValid = false
} else if response.Rcode == dns.RcodeSuccess {
case response.Rcode == dns.RcodeSuccess:
// Outcome 1: GOLD!
return response, nil
}
}
if allResponsesValid {
// Outcome 2: everyone says NXDOMAIN, that's good enough for me
// Outcome 2: everyone says NXDOMAIN, that's good enough for me.
return &dns.Msg{}, nil
}
// Outcome 3: boned.

View file

@ -59,7 +59,7 @@ type hcloudDiscovery struct {
}
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, error) {
func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) {
d := &hcloudDiscovery{
port: conf.Port,
}

View file

@ -51,7 +51,7 @@ type robotDiscovery struct {
}
// newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets.
func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, error) {
func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) {
d := &robotDiscovery{
port: conf.Port,
endpoint: conf.robotEndpoint,
@ -69,7 +69,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro
return d, nil
}
func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
req, err := http.NewRequest("GET", d.endpoint+"/server", nil)
if err != nil {
return nil, err

View file

@ -60,7 +60,7 @@ type serverDiscovery struct {
datacenterID string
}
func newServerDiscovery(conf *SDConfig, logger log.Logger) (*serverDiscovery, error) {
func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) {
d := &serverDiscovery{
port: conf.Port,
datacenterID: conf.DatacenterID,

View file

@ -122,11 +122,11 @@ func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer
)
}
func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code, method, host string) {
func (clientGoRequestMetricAdapter) Increment(_ context.Context, code, _, _ string) {
clientGoRequestResultMetricVec.WithLabelValues(code).Inc()
}
func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) {
func (clientGoRequestMetricAdapter) Observe(_ context.Context, _ string, u url.URL, latency time.Duration) {
clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds())
}
@ -169,7 +169,7 @@ func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetr
return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name)
}
func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric {
func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(string) workqueue.CounterMetric {
// Retries are not used so the metric is omitted.
return noopMetric{}
}

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// nolint:revive // Many legitimately empty blocks in this file.
package kubernetes
import (

View file

@ -190,7 +190,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group)
}
go func() {
for e.process(ctx, ch) {
for e.process(ctx, ch) { // nolint:revive
}
}()

View file

@ -89,7 +89,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
}
go func() {
for i.process(ctx, ch) {
for i.process(ctx, ch) { // nolint:revive
}
}()

View file

@ -96,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
}
go func() {
for n.process(ctx, ch) {
for n.process(ctx, ch) { // nolint:revive
}
}()

View file

@ -132,7 +132,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
}
go func() {
for p.process(ctx, ch) {
for p.process(ctx, ch) { // nolint:revive
}
}()

View file

@ -92,7 +92,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
}
go func() {
for s.process(ctx, ch) {
for s.process(ctx, ch) { // nolint:revive
}
}()

View file

@ -686,12 +686,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
case tgs := <-provUpdates:
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
for _, got := range discoveryManager.allGroups() {
assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string {
return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v",
x,
got,
expected)
})
assertEqualGroups(t, got, tc.expectedTargets[x])
}
}
}
@ -699,7 +694,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
}
}
func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) {
func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) {
t.Helper()
// Need to sort by the groups's source as the received order is not guaranteed.
@ -1079,9 +1074,7 @@ func TestCoordinationWithReceiver(t *testing.T) {
if _, ok := tgs[k]; !ok {
t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs)
}
assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string {
return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected)
})
assertEqualGroups(t, tgs[k], expected.tgs[k])
}
}
}

View file

@ -686,12 +686,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
case tgs := <-provUpdates:
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
for _, got := range discoveryManager.allGroups() {
assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string {
return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v",
x,
got,
expected)
})
assertEqualGroups(t, got, tc.expectedTargets[x])
}
}
}
@ -699,7 +694,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
}
}
func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) {
func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) {
t.Helper()
// Need to sort by the groups's source as the received order is not guaranteed.
@ -1129,7 +1124,7 @@ type lockStaticConfig struct {
}
func (s lockStaticConfig) Name() string { return "lockstatic" }
func (s lockStaticConfig) NewDiscoverer(options DiscovererOptions) (Discoverer, error) {
func (s lockStaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) {
return (lockStaticDiscoverer)(s), nil
}
@ -1330,9 +1325,7 @@ func TestCoordinationWithReceiver(t *testing.T) {
if _, ok := tgs[k]; !ok {
t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs)
}
assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string {
return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected)
})
assertEqualGroups(t, tgs[k], expected.tgs[k])
}
}
}
@ -1399,7 +1392,7 @@ func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) {
// TestTargetSetTargetGroupsUpdateDuringApplyConfig is used to detect races when
// ApplyConfig happens at the same time as targets update.
func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) {
func TestTargetSetTargetGroupsUpdateDuringApplyConfig(*testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
discoveryManager := NewManager(ctx, log.NewNopLogger())

View file

@ -136,9 +136,10 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) {
return nil, err
}
if len(conf.AuthToken) > 0 {
switch {
case len(conf.AuthToken) > 0:
rt, err = newAuthTokenRoundTripper(conf.AuthToken, rt)
} else if len(conf.AuthTokenFile) > 0 {
case len(conf.AuthTokenFile) > 0:
rt, err = newAuthTokenFileRoundTripper(conf.AuthTokenFile, rt)
}
if err != nil {

View file

@ -161,7 +161,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
return d, nil
}
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) {
opts := &nomad.QueryOptions{
AllowStale: d.allowStale,
}

View file

@ -102,7 +102,7 @@ func (d *dedicatedServerDiscovery) getSource() string {
return fmt.Sprintf("%s_%s", d.config.Name(), d.getService())
}
func (d *dedicatedServerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
client, err := createClient(d.config)
if err != nil {
return nil, err

View file

@ -117,7 +117,7 @@ func (d *vpsDiscovery) getSource() string {
return fmt.Sprintf("%s_%s", d.config.Name(), d.getService())
}
func (d *vpsDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
client, err := createClient(d.config)
if err != nil {
return nil, err

View file

@ -193,7 +193,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
}
for _, pathUpdate := range d.pathUpdates {
// Drain event channel in case the treecache leaks goroutines otherwise.
for range pathUpdate {
for range pathUpdate { // nolint:revive
}
}
d.conn.Close()

View file

@ -824,10 +824,11 @@ mergeLoop: // Merge together all buckets from the original schema that fall into
origIdx += span.Offset
}
currIdx := i.targetIdx(origIdx)
if firstPass {
switch {
case firstPass:
i.currIdx = currIdx
firstPass = false
} else if currIdx != i.currIdx {
case currIdx != i.currIdx:
// Reached next bucket in targetSchema.
// Do not actually forward to the next bucket, but break out.
break mergeLoop

View file

@ -56,8 +56,14 @@ func (ls labelSlice) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] }
func (ls labelSlice) Less(i, j int) bool { return ls[i].Name < ls[j].Name }
func decodeSize(data string, index int) (int, int) {
var size int
for shift := uint(0); ; shift += 7 {
// Fast-path for common case of a single byte, value 0..127.
b := data[index]
index++
if b < 0x80 {
return int(b), index
}
size := int(b & 0x7F)
for shift := uint(7); ; shift += 7 {
// Just panic if we go of the end of data, since all Labels strings are constructed internally and
// malformed data indicates a bug, or memory corruption.
b := data[index]

View file

@ -238,9 +238,10 @@ func (p *PromParser) Metric(l *labels.Labels) string {
return s
}
// Exemplar writes the exemplar of the current sample into the passed
// exemplar. It returns if an exemplar exists.
func (p *PromParser) Exemplar(e *exemplar.Exemplar) bool {
// Exemplar implements the Parser interface. However, since the classic
// Prometheus text format does not support exemplars, this implementation simply
// returns false and does nothing else.
func (p *PromParser) Exemplar(*exemplar.Exemplar) bool {
return false
}

View file

@ -27,7 +27,7 @@ import (
"github.com/prometheus/prometheus/util/teststorage"
)
func setupRangeQueryTestData(stor *teststorage.TestStorage, engine *Engine, interval, numIntervals int) error {
func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *Engine, interval, numIntervals int) error {
metrics := []labels.Labels{}
metrics = append(metrics, labels.FromStrings("__name__", "a_one"))
metrics = append(metrics, labels.FromStrings("__name__", "b_one"))
@ -240,16 +240,17 @@ func BenchmarkRangeQuery(b *testing.B) {
for _, c := range cases {
name := fmt.Sprintf("expr=%s,steps=%d", c.expr, c.steps)
b.Run(name, func(b *testing.B) {
ctx := context.Background()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
qry, err := engine.NewRangeQuery(
stor, nil, c.expr,
ctx, stor, nil, c.expr,
time.Unix(int64((numIntervals-c.steps)*10), 0),
time.Unix(int64(numIntervals*10), 0), time.Second*10)
if err != nil {
b.Fatal(err)
}
res := qry.Exec(context.Background())
res := qry.Exec(ctx)
if res.Err != nil {
b.Fatal(res.Err)
}

View file

@ -400,7 +400,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) {
}
// NewInstantQuery returns an evaluation query for the given expression at the given time.
func (ng *Engine) NewInstantQuery(q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) {
func (ng *Engine) NewInstantQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) {
expr, err := parser.ParseExpr(qs)
if err != nil {
return nil, err
@ -416,7 +416,7 @@ func (ng *Engine) NewInstantQuery(q storage.Queryable, opts *QueryOpts, qs strin
// NewRangeQuery returns an evaluation query for the given time range and with
// the resolution set by the interval.
func (ng *Engine) NewRangeQuery(q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) {
func (ng *Engine) NewRangeQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) {
expr, err := parser.ParseExpr(qs)
if err != nil {
return nil, err
@ -1956,7 +1956,7 @@ func (ev *evaluator) matrixIterSlice(
// (b) the number of samples is relatively small.
// so a linear search will be as fast as a binary search.
var drop int
for drop = 0; floats[drop].T < mint; drop++ {
for drop = 0; floats[drop].T < mint; drop++ { // nolint:revive
}
ev.currentSamples -= drop
copy(floats, floats[drop:])
@ -1978,7 +1978,7 @@ func (ev *evaluator) matrixIterSlice(
// (b) the number of samples is relatively small.
// so a linear search will be as fast as a binary search.
var drop int
for drop = 0; histograms[drop].T < mint; drop++ {
for drop = 0; histograms[drop].T < mint; drop++ { // nolint:revive
}
ev.currentSamples -= drop
copy(histograms, histograms[drop:])
@ -2095,13 +2095,13 @@ func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching,
}
func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
if matching.Card != parser.CardManyToMany {
switch {
case matching.Card != parser.CardManyToMany:
panic("set operations must only use many-to-many matching")
}
if len(lhs) == 0 { // Short-circuit.
case len(lhs) == 0: // Short-circuit.
enh.Out = append(enh.Out, rhs...)
return enh.Out
} else if len(rhs) == 0 {
case len(rhs) == 0:
enh.Out = append(enh.Out, lhs...)
return enh.Out
}
@ -2220,13 +2220,14 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
hl, hr = hr, hl
}
floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr)
if returnBool {
switch {
case returnBool:
if keep {
floatValue = 1.0
} else {
floatValue = 0.0
}
} else if !keep {
case !keep:
continue
}
metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh)
@ -2530,9 +2531,10 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
mean: s.F,
groupCount: 1,
}
if s.H == nil {
switch {
case s.H == nil:
newAgg.hasFloat = true
} else if op == parser.SUM {
case op == parser.SUM:
newAgg.histogramValue = s.H.Copy()
newAgg.hasHistogram = true
}
@ -2542,9 +2544,10 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
inputVecLen := int64(len(vec))
resultSize := k
if k > inputVecLen {
switch {
case k > inputVecLen:
resultSize = inputVecLen
} else if k == 0 {
case k == 0:
resultSize = 1
}
switch op {
@ -2637,12 +2640,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
case parser.TOPK:
// We build a heap of up to k elements, with the smallest element at heap[0].
if int64(len(group.heap)) < k {
switch {
case int64(len(group.heap)) < k:
heap.Push(&group.heap, &Sample{
F: s.F,
Metric: s.Metric,
})
} else if group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)) {
case group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)):
// This new element is bigger than the previous smallest element - overwrite that.
group.heap[0] = Sample{
F: s.F,
@ -2655,12 +2659,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
case parser.BOTTOMK:
// We build a heap of up to k elements, with the biggest element at heap[0].
if int64(len(group.reverseHeap)) < k {
switch {
case int64(len(group.reverseHeap)) < k:
heap.Push(&group.reverseHeap, &Sample{
F: s.F,
Metric: s.Metric,
})
} else if group.reverseHeap[0].F > s.F || (math.IsNaN(group.reverseHeap[0].F) && !math.IsNaN(s.F)) {
case group.reverseHeap[0].F > s.F || (math.IsNaN(group.reverseHeap[0].F) && !math.IsNaN(s.F)):
// This new element is smaller than the previous biggest element - overwrite that.
group.reverseHeap[0] = Sample{
F: s.F,
@ -2819,9 +2824,10 @@ func PreprocessExpr(expr parser.Expr, start, end time.Time) parser.Expr {
func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool {
switch n := expr.(type) {
case *parser.VectorSelector:
if n.StartOrEnd == parser.START {
switch n.StartOrEnd {
case parser.START:
n.Timestamp = makeInt64Pointer(timestamp.FromTime(start))
} else if n.StartOrEnd == parser.END {
case parser.END:
n.Timestamp = makeInt64Pointer(timestamp.FromTime(end))
}
return n.Timestamp != nil
@ -2878,9 +2884,10 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool {
if isInvariant {
n.Expr = newStepInvariantExpr(n.Expr)
}
if n.StartOrEnd == parser.START {
switch n.StartOrEnd {
case parser.START:
n.Timestamp = makeInt64Pointer(timestamp.FromTime(start))
} else if n.StartOrEnd == parser.END {
case parser.END:
n.Timestamp = makeInt64Pointer(timestamp.FromTime(end))
}
return n.Timestamp != nil

View file

@ -231,14 +231,14 @@ func TestQueryError(t *testing.T) {
ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx()
vectorQuery, err := engine.NewInstantQuery(queryable, nil, "foo", time.Unix(1, 0))
vectorQuery, err := engine.NewInstantQuery(ctx, queryable, nil, "foo", time.Unix(1, 0))
require.NoError(t, err)
res := vectorQuery.Exec(ctx)
require.Error(t, res.Err, "expected error on failed select but got none")
require.True(t, errors.Is(res.Err, errStorage), "expected error doesn't match")
matrixQuery, err := engine.NewInstantQuery(queryable, nil, "foo[1m]", time.Unix(1, 0))
matrixQuery, err := engine.NewInstantQuery(ctx, queryable, nil, "foo[1m]", time.Unix(1, 0))
require.NoError(t, err)
res = matrixQuery.Exec(ctx)
@ -564,14 +564,15 @@ func TestSelectHintsSetCorrectly(t *testing.T) {
query Query
err error
)
ctx := context.Background()
if tc.end == 0 {
query, err = engine.NewInstantQuery(hintsRecorder, nil, tc.query, timestamp.Time(tc.start))
query, err = engine.NewInstantQuery(ctx, hintsRecorder, nil, tc.query, timestamp.Time(tc.start))
} else {
query, err = engine.NewRangeQuery(hintsRecorder, nil, tc.query, timestamp.Time(tc.start), timestamp.Time(tc.end), time.Second)
query, err = engine.NewRangeQuery(ctx, hintsRecorder, nil, tc.query, timestamp.Time(tc.start), timestamp.Time(tc.end), time.Second)
}
require.NoError(t, err)
res := query.Exec(context.Background())
res := query.Exec(ctx)
require.NoError(t, res.Err)
require.Equal(t, tc.expected, hintsRecorder.hints)
@ -727,9 +728,9 @@ load 10s
var err error
var qry Query
if c.Interval == 0 {
qry, err = test.QueryEngine().NewInstantQuery(test.Queryable(), nil, c.Query, c.Start)
qry, err = test.QueryEngine().NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start)
} else {
qry, err = test.QueryEngine().NewRangeQuery(test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval)
qry, err = test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval)
}
require.NoError(t, err)
@ -1204,9 +1205,9 @@ load 10s
var err error
var qry Query
if c.Interval == 0 {
qry, err = engine.NewInstantQuery(test.Queryable(), opts, c.Query, c.Start)
qry, err = engine.NewInstantQuery(test.context, test.Queryable(), opts, c.Query, c.Start)
} else {
qry, err = engine.NewRangeQuery(test.Queryable(), opts, c.Query, c.Start, c.End, c.Interval)
qry, err = engine.NewRangeQuery(test.context, test.Queryable(), opts, c.Query, c.Start, c.End, c.Interval)
}
require.NoError(t, err)
@ -1387,9 +1388,9 @@ load 10s
var err error
var qry Query
if c.Interval == 0 {
qry, err = engine.NewInstantQuery(test.Queryable(), nil, c.Query, c.Start)
qry, err = engine.NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start)
} else {
qry, err = engine.NewRangeQuery(test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval)
qry, err = engine.NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval)
}
require.NoError(t, err)
@ -1628,9 +1629,9 @@ load 1ms
var err error
var qry Query
if c.end == 0 {
qry, err = test.QueryEngine().NewInstantQuery(test.Queryable(), nil, c.query, start)
qry, err = test.QueryEngine().NewInstantQuery(test.context, test.Queryable(), nil, c.query, start)
} else {
qry, err = test.QueryEngine().NewRangeQuery(test.Queryable(), nil, c.query, start, end, interval)
qry, err = test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.query, start, end, interval)
}
require.NoError(t, err)
@ -1961,7 +1962,7 @@ func TestSubquerySelector(t *testing.T) {
engine := test.QueryEngine()
for _, c := range tst.cases {
t.Run(c.Query, func(t *testing.T) {
qry, err := engine.NewInstantQuery(test.Queryable(), nil, c.Query, c.Start)
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start)
require.NoError(t, err)
res := qry.Exec(test.Context())
@ -2909,6 +2910,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
}
func TestEngineOptsValidation(t *testing.T) {
ctx := context.Background()
cases := []struct {
opts EngineOpts
query string
@ -2968,8 +2970,8 @@ func TestEngineOptsValidation(t *testing.T) {
for _, c := range cases {
eng := NewEngine(c.opts)
_, err1 := eng.NewInstantQuery(nil, nil, c.query, time.Unix(10, 0))
_, err2 := eng.NewRangeQuery(nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second)
_, err1 := eng.NewInstantQuery(ctx, nil, nil, c.query, time.Unix(10, 0))
_, err2 := eng.NewRangeQuery(ctx, nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second)
if c.fail {
require.Equal(t, c.expError, err1)
require.Equal(t, c.expError, err2)
@ -3116,7 +3118,7 @@ func TestRangeQuery(t *testing.T) {
err = test.Run()
require.NoError(t, err)
qry, err := test.QueryEngine().NewRangeQuery(test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval)
qry, err := test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval)
require.NoError(t, err)
res := qry.Exec(test.Context())
@ -3147,7 +3149,7 @@ func TestNativeHistogramRate(t *testing.T) {
engine := test.QueryEngine()
queryString := fmt.Sprintf("rate(%s[1m])", seriesName)
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond)))
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond)))
require.NoError(t, err)
res := qry.Exec(test.Context())
require.NoError(t, res.Err)
@ -3191,7 +3193,7 @@ func TestNativeFloatHistogramRate(t *testing.T) {
engine := test.QueryEngine()
queryString := fmt.Sprintf("rate(%s[1m])", seriesName)
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond)))
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond)))
require.NoError(t, err)
res := qry.Exec(test.Context())
require.NoError(t, res.Err)
@ -3255,7 +3257,7 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) {
require.NoError(t, app.Commit())
queryString := fmt.Sprintf("histogram_count(%s)", seriesName)
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res := qry.Exec(test.Context())
@ -3273,7 +3275,7 @@ func TestNativeHistogram_HistogramCountAndSum(t *testing.T) {
}
queryString = fmt.Sprintf("histogram_sum(%s)", seriesName)
qry, err = engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
qry, err = engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res = qry.Exec(test.Context())
@ -3509,7 +3511,7 @@ func TestNativeHistogram_HistogramQuantile(t *testing.T) {
for j, sc := range c.subCases {
t.Run(fmt.Sprintf("%d %s", j, sc.quantile), func(t *testing.T) {
queryString := fmt.Sprintf("histogram_quantile(%s, %s)", sc.quantile, seriesName)
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res := qry.Exec(test.Context())
@ -3940,7 +3942,7 @@ func TestNativeHistogram_HistogramFraction(t *testing.T) {
for j, sc := range c.subCases {
t.Run(fmt.Sprintf("%d %s %s", j, sc.lower, sc.upper), func(t *testing.T) {
queryString := fmt.Sprintf("histogram_fraction(%s, %s, %s)", sc.lower, sc.upper, seriesName)
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res := qry.Exec(test.Context())
@ -4077,7 +4079,7 @@ func TestNativeHistogram_Sum_Count_AddOperator(t *testing.T) {
require.NoError(t, app.Commit())
queryAndCheck := func(queryString string, exp Vector) {
qry, err := engine.NewInstantQuery(test.Queryable(), nil, queryString, timestamp.Time(ts))
qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res := qry.Exec(test.Context())
@ -4186,7 +4188,7 @@ metric 0 1 2
opts := &QueryOpts{
LookbackDelta: c.queryLookback,
}
qry, err := eng.NewInstantQuery(test.Queryable(), opts, query, c.ts)
qry, err := eng.NewInstantQuery(test.context, test.Queryable(), opts, query, c.ts)
require.NoError(t, err)
res := qry.Exec(test.Context())

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// nolint:revive // Many unsued function arguments in this file by design.
package promql
import (
@ -803,12 +804,14 @@ func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) V
// === sgn(Vector parser.ValueTypeVector) Vector ===
func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
return simpleFunc(vals, enh, func(v float64) float64 {
if v < 0 {
switch {
case v < 0:
return -1
} else if v > 0 {
case v > 0:
return 1
default:
return v
}
return v
})
}

View file

@ -56,10 +56,11 @@ func TestDeriv(t *testing.T) {
require.NoError(t, a.Commit())
query, err := engine.NewInstantQuery(storage, nil, "deriv(foo[30m])", timestamp.Time(1493712846939))
ctx := context.Background()
query, err := engine.NewInstantQuery(ctx, storage, nil, "deriv(foo[30m])", timestamp.Time(1493712846939))
require.NoError(t, err)
result := query.Exec(context.Background())
result := query.Exec(ctx)
require.NoError(t, result.Err)
vec, _ := result.Vector()

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// nolint:revive // Many legitimately empty blocks in this file.
package parser
import (
@ -293,7 +294,7 @@ func (l *Lexer) accept(valid string) bool {
// acceptRun consumes a run of runes from the valid set.
func (l *Lexer) acceptRun(valid string) {
for strings.ContainsRune(valid, l.next()) {
// consume
// Consume.
}
l.backup()
}
@ -346,9 +347,10 @@ func lexStatements(l *Lexer) stateFn {
switch r := l.next(); {
case r == eof:
if l.parenDepth != 0 {
switch {
case l.parenDepth != 0:
return l.errorf("unclosed left parenthesis")
} else if l.bracketOpen {
case l.bracketOpen:
return l.errorf("unclosed left bracket")
}
l.emit(EOF)
@ -370,12 +372,13 @@ func lexStatements(l *Lexer) stateFn {
case r == '^':
l.emit(POW)
case r == '=':
if t := l.peek(); t == '=' {
switch t := l.peek(); t {
case '=':
l.next()
l.emit(EQLC)
} else if t == '~' {
case '~':
return l.errorf("unexpected character after '=': %q", t)
} else {
default:
l.emit(EQL)
}
case r == '!':
@ -790,11 +793,12 @@ Loop:
default:
l.backup()
word := l.input[l.start:l.pos]
if kw, ok := key[strings.ToLower(word)]; ok {
switch kw, ok := key[strings.ToLower(word)]; {
case ok:
l.emit(kw)
} else if !strings.Contains(word, ":") {
case !strings.Contains(word, ":"):
l.emit(IDENTIFIER)
} else {
default:
l.emit(METRIC_IDENTIFIER)
}
break Loop

View file

@ -270,14 +270,15 @@ var errUnexpected = errors.New("unexpected error")
// recover is the handler that turns panics into returns from the top level of Parse.
func (p *parser) recover(errp *error) {
e := recover()
if _, ok := e.(runtime.Error); ok {
switch _, ok := e.(runtime.Error); {
case ok:
// Print the stack trace but do not inhibit the running application.
buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)]
fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf)
*errp = errUnexpected
} else if e != nil {
case e != nil:
*errp = e.(error)
}
}
@ -332,7 +333,7 @@ func (p *parser) Lex(lval *yySymType) int {
// It is a no-op since the parsers error routines are triggered
// by mechanisms that allow more fine-grained control
// For more information, see https://pkg.go.dev/golang.org/x/tools/cmd/goyacc.
func (p *parser) Error(e string) {
func (p *parser) Error(string) {
}
// InjectItem allows injecting a single Item at the beginning of the token stream
@ -481,9 +482,9 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
// This is made a function instead of a variable, so it is lazily evaluated on demand.
opRange := func() (r PositionRange) {
// Remove whitespace at the beginning and end of the range.
for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ {
for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ { // nolint:revive
}
for r.End = n.RHS.PositionRange().Start - 1; isSpace(rune(p.lex.input[r.End])); r.End-- {
for r.End = n.RHS.PositionRange().Start - 1; isSpace(rune(p.lex.input[r.End])); r.End-- { // nolint:revive
}
return
}
@ -518,13 +519,13 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
p.addParseErrf(n.RHS.PositionRange(), "binary expression must contain only scalar and instant vector types")
}
if (lt != ValueTypeVector || rt != ValueTypeVector) && n.VectorMatching != nil {
switch {
case (lt != ValueTypeVector || rt != ValueTypeVector) && n.VectorMatching != nil:
if len(n.VectorMatching.MatchingLabels) > 0 {
p.addParseErrf(n.PositionRange(), "vector matching only allowed between instant vectors")
}
n.VectorMatching = nil
// Both operands are Vectors.
} else if n.Op.IsSetOperator() {
case n.Op.IsSetOperator(): // Both operands are Vectors.
if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne {
p.addParseErrf(n.PositionRange(), "no grouping allowed for %q operation", n.Op)
}
@ -706,9 +707,10 @@ func (p *parser) addOffset(e Node, offset time.Duration) {
}
// it is already ensured by parseDuration func that there never will be a zero offset modifier
if *orgoffsetp != 0 {
switch {
case *orgoffsetp != 0:
p.addParseErrf(e.PositionRange(), "offset may not be set multiple times")
} else if orgoffsetp != nil {
case orgoffsetp != nil:
*orgoffsetp = offset
}

View file

@ -124,9 +124,10 @@ func (node *MatrixSelector) String() string {
// Copy the Vector selector before changing the offset
vecSelector := *node.VectorSelector.(*VectorSelector)
offset := ""
if vecSelector.OriginalOffset > time.Duration(0) {
switch {
case vecSelector.OriginalOffset > time.Duration(0):
offset = fmt.Sprintf(" offset %s", model.Duration(vecSelector.OriginalOffset))
} else if vecSelector.OriginalOffset < time.Duration(0) {
case vecSelector.OriginalOffset < time.Duration(0):
offset = fmt.Sprintf(" offset -%s", model.Duration(-vecSelector.OriginalOffset))
}
at := ""
@ -163,9 +164,10 @@ func (node *SubqueryExpr) getSubqueryTimeSuffix() string {
step = model.Duration(node.Step).String()
}
offset := ""
if node.OriginalOffset > time.Duration(0) {
switch {
case node.OriginalOffset > time.Duration(0):
offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset))
} else if node.OriginalOffset < time.Duration(0) {
case node.OriginalOffset < time.Duration(0):
offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset))
}
at := ""
@ -209,9 +211,10 @@ func (node *VectorSelector) String() string {
labelStrings = append(labelStrings, matcher.String())
}
offset := ""
if node.OriginalOffset > time.Duration(0) {
switch {
case node.OriginalOffset > time.Duration(0):
offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset))
} else if node.OriginalOffset < time.Duration(0) {
case node.OriginalOffset < time.Duration(0):
offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset))
}
at := ""

View file

@ -81,14 +81,15 @@ func TestConcurrentRangeQueries(t *testing.T) {
defer func() {
sem <- struct{}{}
}()
ctx := context.Background()
qry, err := engine.NewRangeQuery(
stor, nil, c.expr,
ctx, stor, nil, c.expr,
time.Unix(int64((numIntervals-c.steps)*10), 0),
time.Unix(int64(numIntervals*10), 0), time.Second*10)
if err != nil {
return err
}
res := qry.Exec(context.Background())
res := qry.Exec(ctx)
if res.Err != nil {
t.Logf("Query: %q, steps: %d, result: %s", c.expr, c.steps, res.Err)
return res.Err

View file

@ -169,11 +169,12 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
}
}
if bucket.Lower < 0 && bucket.Upper > 0 {
if len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0 {
switch {
case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0:
// The result is in the zero bucket and the histogram has only
// positive buckets. So we consider 0 to be the lower bound.
bucket.Lower = 0
} else if len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0 {
case len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0:
// The result is in the zero bucket and the histogram has only
// negative buckets. So we consider 0 to be the upper bound.
bucket.Upper = 0
@ -244,12 +245,13 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6
for it.Next() {
b := it.At()
if b.Lower < 0 && b.Upper > 0 {
if len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0 {
switch {
case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0:
// This is the zero bucket and the histogram has only
// positive buckets. So we consider 0 to be the lower
// bound.
b.Lower = 0
} else if len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0 {
case len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0:
// This is in the zero bucket and the histogram has only
// negative buckets. So we consider 0 to be the upper
// bound.

View file

@ -538,7 +538,7 @@ func (t *Test) exec(tc testCommand) error {
}
queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...)
for _, iq := range queries {
q, err := t.QueryEngine().NewInstantQuery(t.storage, nil, iq.expr, iq.evalTime)
q, err := t.QueryEngine().NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime)
if err != nil {
return err
}
@ -560,7 +560,7 @@ func (t *Test) exec(tc testCommand) error {
// Check query returns same result in range mode,
// by checking against the middle step.
q, err = t.queryEngine.NewRangeQuery(t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
q, err = t.queryEngine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
if err != nil {
return err
}

View file

@ -587,10 +587,10 @@ func TestAlertingRuleLimit(t *testing.T) {
evalTime := time.Unix(0, 0)
for _, test := range tests {
_, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit)
if err != nil {
switch _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit); {
case err != nil:
require.EqualError(t, err, test.err)
} else if test.err != "" {
case test.err != "":
t.Errorf("Expected errror %s, got none", test.err)
}
}

View file

@ -189,7 +189,7 @@ type QueryFunc func(ctx context.Context, q string, t time.Time) (promql.Vector,
// It converts scalar into vector results.
func EngineQueryFunc(engine *promql.Engine, q storage.Queryable) QueryFunc {
return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
q, err := engine.NewInstantQuery(q, nil, qs, t)
q, err := engine.NewInstantQuery(ctx, q, nil, qs, t)
if err != nil {
return nil, err
}

View file

@ -780,13 +780,13 @@ func TestUpdate(t *testing.T) {
rgs.Groups[i].Interval = model.Duration(10)
}
}
reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs)
reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs)
// Update limit and reload.
for i := range rgs.Groups {
rgs.Groups[i].Limit = 1
}
reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs)
reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs)
// Change group rules and reload.
for i, g := range rgs.Groups {
@ -794,7 +794,7 @@ func TestUpdate(t *testing.T) {
rgs.Groups[i].Rules[j].Expr.SetString(fmt.Sprintf("%s * 0", r.Expr.Value))
}
}
reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs)
reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs)
}
// ruleGroupsTest for running tests over rules.
@ -837,7 +837,7 @@ func formatRules(r *rulefmt.RuleGroups) ruleGroupsTest {
}
}
func reloadAndValidate(rgs *rulefmt.RuleGroups, t *testing.T, tmpFile *os.File, ruleManager *Manager, expected map[string]labels.Labels, ogs map[string]*Group) {
func reloadAndValidate(rgs *rulefmt.RuleGroups, t *testing.T, tmpFile *os.File, ruleManager *Manager, ogs map[string]*Group) {
bs, err := yaml.Marshal(formatRules(rgs))
require.NoError(t, err)
tmpFile.Seek(0, 0)

View file

@ -30,19 +30,19 @@ type unknownRule struct{}
func (u unknownRule) Name() string { return "" }
func (u unknownRule) Labels() labels.Labels { return labels.EmptyLabels() }
func (u unknownRule) Eval(ctx context.Context, time time.Time, queryFunc QueryFunc, url *url.URL, i int) (promql.Vector, error) {
func (u unknownRule) Eval(context.Context, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error) {
return nil, nil
}
func (u unknownRule) String() string { return "" }
func (u unknownRule) Query() parser.Expr { return nil }
func (u unknownRule) SetLastError(err error) {}
func (u unknownRule) LastError() error { return nil }
func (u unknownRule) SetHealth(health RuleHealth) {}
func (u unknownRule) Health() RuleHealth { return "" }
func (u unknownRule) SetEvaluationDuration(duration time.Duration) {}
func (u unknownRule) GetEvaluationDuration() time.Duration { return 0 }
func (u unknownRule) SetEvaluationTimestamp(time time.Time) {}
func (u unknownRule) GetEvaluationTimestamp() time.Time { return time.Time{} }
func (u unknownRule) String() string { return "" }
func (u unknownRule) Query() parser.Expr { return nil }
func (u unknownRule) SetLastError(error) {}
func (u unknownRule) LastError() error { return nil }
func (u unknownRule) SetHealth(RuleHealth) {}
func (u unknownRule) Health() RuleHealth { return "" }
func (u unknownRule) SetEvaluationDuration(time.Duration) {}
func (u unknownRule) GetEvaluationDuration() time.Duration { return 0 }
func (u unknownRule) SetEvaluationTimestamp(time.Time) {}
func (u unknownRule) GetEvaluationTimestamp() time.Time { return time.Time{} }
func TestNewRuleDetailPanics(t *testing.T) {
require.PanicsWithValue(t, `unknown rule type "rules.unknownRule"`, func() {

View file

@ -223,10 +223,10 @@ func TestRecordingRuleLimit(t *testing.T) {
evalTime := time.Unix(0, 0)
for _, test := range tests {
_, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit)
if err != nil {
switch _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit); {
case err != nil:
require.EqualError(t, err, test.err)
} else if test.err != "" {
case test.err != "":
t.Errorf("Expected error %s, got none", test.err)
}
}

View file

@ -288,10 +288,11 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error {
// Cleanup and reload pool if the configuration has changed.
var failed bool
for name, sp := range m.scrapePools {
if cfg, ok := m.scrapeConfigs[name]; !ok {
switch cfg, ok := m.scrapeConfigs[name]; {
case !ok:
sp.stop()
delete(m.scrapePools, name)
} else if !reflect.DeepEqual(sp.config, cfg) {
case !reflect.DeepEqual(sp.config, cfg):
err := sp.reload(cfg)
if err != nil {
level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name)

View file

@ -503,9 +503,10 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
// Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage.
nonEmpty := false
t.LabelsRange(func(l labels.Label) { nonEmpty = true })
if nonEmpty {
switch {
case nonEmpty:
all = append(all, t)
} else if !t.discoveredLabels.IsEmpty() {
case !t.discoveredLabels.IsEmpty():
sp.droppedTargets = append(sp.droppedTargets, t)
}
}
@ -946,9 +947,10 @@ func (c *scrapeCache) iterDone(flushCache bool) {
count := len(c.series) + len(c.droppedSeries) + len(c.metadata)
c.metaMtx.Unlock()
if flushCache {
switch {
case flushCache:
c.successfulCount = count
} else if count > c.successfulCount*2+1000 {
case count > c.successfulCount*2+1000:
// If a target had varying labels in scrapes that ultimately failed,
// the caches would grow indefinitely. Force a flush when this happens.
// We use the heuristic that this is a doubling of the cache size

View file

@ -724,9 +724,10 @@ func TestScrapeLoopStop(t *testing.T) {
// All samples in a scrape must have the same timestamp.
var ts int64
for i, s := range appender.result {
if i%6 == 0 {
switch {
case i%6 == 0:
ts = s.t
} else if s.t != ts {
case s.t != ts:
t.Fatalf("Unexpected multiple timestamps within single scrape")
}
}
@ -1139,10 +1140,11 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
numScrapes++
if numScrapes == 1 {
switch numScrapes {
case 1:
w.Write([]byte("metric_a 42\n"))
return nil
} else if numScrapes == 5 {
case 5:
cancel()
}
return errors.New("scrape failed")
@ -1199,14 +1201,14 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
// Succeed once, several failures, then stop.
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
numScrapes++
switch {
case numScrapes == 1:
switch numScrapes {
case 1:
w.Write([]byte("metric_a 42\n"))
return nil
case numScrapes == 2:
case 2:
w.Write([]byte("7&-\n"))
return nil
case numScrapes == 3:
case 3:
cancel()
}
return errors.New("scrape failed")
@ -1265,14 +1267,15 @@ func TestScrapeLoopCache(t *testing.T) {
numScrapes := 0
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
if numScrapes == 1 || numScrapes == 2 {
switch numScrapes {
case 1, 2:
if _, ok := sl.cache.series["metric_a"]; !ok {
t.Errorf("metric_a missing from cache after scrape %d", numScrapes)
}
if _, ok := sl.cache.series["metric_b"]; !ok {
t.Errorf("metric_b missing from cache after scrape %d", numScrapes)
}
} else if numScrapes == 3 {
case 3:
if _, ok := sl.cache.series["metric_a"]; !ok {
t.Errorf("metric_a missing from cache after scrape %d", numScrapes)
}
@ -1282,14 +1285,14 @@ func TestScrapeLoopCache(t *testing.T) {
}
numScrapes++
switch {
case numScrapes == 1:
switch numScrapes {
case 1:
w.Write([]byte("metric_a 42\nmetric_b 43\n"))
return nil
case numScrapes == 3:
case 3:
w.Write([]byte("metric_a 44\n"))
return nil
case numScrapes == 4:
case 4:
cancel()
}
return fmt.Errorf("scrape failed")
@ -2406,7 +2409,7 @@ type testScraper struct {
scrapeFunc func(context.Context, io.Writer) error
}
func (ts *testScraper) offset(interval time.Duration, jitterSeed uint64) time.Duration {
func (ts *testScraper) offset(time.Duration, uint64) time.Duration {
return ts.offsetDur
}
@ -2868,7 +2871,7 @@ func TestScrapeAddFast(t *testing.T) {
require.NoError(t, slApp.Commit())
}
func TestReuseCacheRace(t *testing.T) {
func TestReuseCacheRace(*testing.T) {
var (
app = &nopAppendable{}
cfg = &config.ScrapeConfig{

View file

@ -134,7 +134,7 @@ func TestTargetURL(t *testing.T) {
require.Equal(t, expectedURL, target.URL())
}
func newTestTarget(targetURL string, deadline time.Duration, lbls labels.Labels) *Target {
func newTestTarget(targetURL string, _ time.Duration, lbls labels.Labels) *Target {
lb := labels.NewBuilder(lbls)
lb.Set(model.SchemeLabel, "http")
lb.Set(model.AddressLabel, strings.TrimPrefix(targetURL, "http://"))

View file

@ -188,8 +188,8 @@ func BenchmarkBufferedSeriesIterator(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for it.Next() != chunkenc.ValNone {
// scan everything
for it.Next() != chunkenc.ValNone { // nolint:revive
// Scan everything.
}
require.NoError(b, it.Err())
}

View file

@ -222,9 +222,10 @@ func (f *fanoutAppender) Rollback() (err error) {
for _, appender := range f.secondaries {
rollbackErr := appender.Rollback()
if err == nil {
switch {
case err == nil:
err = rollbackErr
} else if rollbackErr != nil {
case rollbackErr != nil:
level.Error(f.logger).Log("msg", "Squashed rollback error on rollback", "err", rollbackErr)
}
}

View file

@ -233,7 +233,7 @@ func (errQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) storage
return storage.ErrSeriesSet(errSelect)
}
func (errQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) {
func (errQuerier) LabelValues(string, ...*labels.Matcher) ([]string, storage.Warnings, error) {
return nil, nil, errors.New("label values error")
}

View file

@ -99,7 +99,7 @@ type MockQueryable struct {
MockQuerier Querier
}
func (q *MockQueryable) Querier(ctx context.Context, mint, maxt int64) (Querier, error) {
func (q *MockQueryable) Querier(context.Context, int64, int64) (Querier, error) {
return q.MockQuerier, nil
}
@ -118,11 +118,11 @@ type MockQuerier struct {
SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
}
func (q *MockQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, Warnings, error) {
func (q *MockQuerier) LabelValues(string, ...*labels.Matcher) ([]string, Warnings, error) {
return nil, nil, nil
}
func (q *MockQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, Warnings, error) {
func (q *MockQuerier) LabelNames(...*labels.Matcher) ([]string, Warnings, error) {
return nil, nil, nil
}

View file

@ -82,8 +82,8 @@ func BenchmarkMemoizedSeriesIterator(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for it.Next() != chunkenc.ValNone {
// scan everything
for it.Next() != chunkenc.ValNone { // nolint:revive
// Scan everything.
}
require.NoError(b, it.Err())
}

View file

@ -723,12 +723,11 @@ func (c *compactChunkIterator) Next() bool {
break
}
if next.MinTime == prev.MinTime &&
next.MaxTime == prev.MaxTime &&
bytes.Equal(next.Chunk.Bytes(), prev.Chunk.Bytes()) {
// 1:1 duplicates, skip it.
} else {
// We operate on same series, so labels does not matter here.
// Only do something if it is not a perfect duplicate.
if next.MinTime != prev.MinTime ||
next.MaxTime != prev.MaxTime ||
!bytes.Equal(next.Chunk.Bytes(), prev.Chunk.Bytes()) {
// We operate on same series, so labels do not matter here.
overlapping = append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), next))
if next.MaxTime > oMaxTime {
oMaxTime = next.MaxTime

View file

@ -524,7 +524,7 @@ func TestDecodeWriteRequest(t *testing.T) {
require.Equal(t, writeRequestFixture, actual)
}
func TestNilHistogramProto(t *testing.T) {
func TestNilHistogramProto(*testing.T) {
// This function will panic if it impromperly handles nil
// values, causing the test to fail.
HistogramProtoToHistogram(prompb.Histogram{})

View file

@ -55,9 +55,10 @@ func (r *ewmaRate) tick() {
r.mutex.Lock()
defer r.mutex.Unlock()
if r.init {
switch {
case r.init:
r.lastRate += r.alpha * (instantRate - r.lastRate)
} else if newEvents > 0 {
case newEvents > 0:
r.init = true
r.lastRate = instantRate
}

View file

@ -1030,9 +1030,10 @@ func (t *QueueManager) calculateDesiredShards() int {
return t.numShards
}
if numShards > t.cfg.MaxShards {
switch {
case numShards > t.cfg.MaxShards:
numShards = t.cfg.MaxShards
} else if numShards < t.cfg.MinShards {
case numShards < t.cfg.MinShards:
numShards = t.cfg.MinShards
}
return numShards
@ -1575,10 +1576,11 @@ func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l
}
sleepDuration = backoff
if backoffErr.retryAfter > 0 {
switch {
case backoffErr.retryAfter > 0:
sleepDuration = backoffErr.retryAfter
level.Info(l).Log("msg", "Retrying after duration specified by Retry-After header", "duration", sleepDuration)
} else if backoffErr.retryAfter < 0 {
case backoffErr.retryAfter < 0:
level.Debug(l).Log("msg", "retry-after cannot be in past, retrying using default backoff mechanism")
}

View file

@ -362,7 +362,7 @@ func TestReshard(t *testing.T) {
c.waitForExpectedData(t)
}
func TestReshardRaceWithStop(t *testing.T) {
func TestReshardRaceWithStop(*testing.T) {
c := NewTestWriteClient()
var m *QueueManager
h := sync.Mutex{}
@ -864,10 +864,10 @@ func (c *TestBlockingWriteClient) Endpoint() string {
// For benchmarking the send and not the receive side.
type NopWriteClient struct{}
func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} }
func (c *NopWriteClient) Store(_ context.Context, req []byte) error { return nil }
func (c *NopWriteClient) Name() string { return "nopwriteclient" }
func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" }
func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} }
func (c *NopWriteClient) Store(context.Context, []byte) error { return nil }
func (c *NopWriteClient) Name() string { return "nopwriteclient" }
func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" }
func BenchmarkSampleSend(b *testing.B) {
// Send one sample per series, which is the typical remote_write case

View file

@ -294,7 +294,7 @@ func (m *mockAppendable) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e
return 0, nil
}
func (m *mockAppendable) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if t < m.latestHistogram {
return 0, storage.ErrOutOfOrderSample
}

View file

@ -732,22 +732,22 @@ func (db *DB) StartTime() (int64, error) {
}
// Querier implements the Storage interface.
func (db *DB) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
func (db *DB) Querier(context.Context, int64, int64) (storage.Querier, error) {
return nil, ErrUnsupported
}
// ChunkQuerier implements the Storage interface.
func (db *DB) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) {
func (db *DB) ChunkQuerier(context.Context, int64, int64) (storage.ChunkQuerier, error) {
return nil, ErrUnsupported
}
// ExemplarQuerier implements the Storage interface.
func (db *DB) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) {
func (db *DB) ExemplarQuerier(context.Context) (storage.ExemplarQuerier, error) {
return nil, ErrUnsupported
}
// Appender implements storage.Storage.
func (db *DB) Appender(_ context.Context) storage.Appender {
func (db *DB) Appender(context.Context) storage.Appender {
return db.appenderPool.Get().(storage.Appender)
}
@ -823,7 +823,7 @@ func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v flo
return 0, storage.ErrOutOfOrderSample
}
// NOTE: always modify pendingSamples and sampleSeries together
// NOTE: always modify pendingSamples and sampleSeries together.
a.pendingSamples = append(a.pendingSamples, record.RefSample{
Ref: series.ref,
T: t,
@ -849,8 +849,8 @@ func (a *appender) getOrCreate(l labels.Labels) (series *memSeries, created bool
return series, true
}
func (a *appender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
// series references and chunk references are identical for agent mode.
func (a *appender) AppendExemplar(ref storage.SeriesRef, _ labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
// Series references and chunk references are identical for agent mode.
headRef := chunks.HeadSeriesRef(ref)
s := a.series.GetByID(headRef)
@ -951,7 +951,8 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int
return 0, storage.ErrOutOfOrderSample
}
if h != nil {
switch {
case h != nil:
// NOTE: always modify pendingHistograms and histogramSeries together
a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{
Ref: series.ref,
@ -959,7 +960,7 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int
H: h,
})
a.histogramSeries = append(a.histogramSeries, series)
} else if fh != nil {
case fh != nil:
// NOTE: always modify pendingFloatHistograms and floatHistogramSeries together
a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{
Ref: series.ref,
@ -973,7 +974,7 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int
return storage.SeriesRef(series.ref), nil
}
func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
func (a *appender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) {
// TODO: Wire metadata in the Agent's appender.
return 0, nil
}

View file

@ -107,7 +107,7 @@ func (c *FloatHistogramChunk) Appender() (Appender, error) {
// To get an appender, we must know the state it would have if we had
// appended all existing data from scratch. We iterate through the end
// and populate via the iterator's state.
for it.Next() == ValFloatHistogram {
for it.Next() == ValFloatHistogram { // nolint:revive
}
if err := it.Err(); err != nil {
return nil, err

View file

@ -111,7 +111,7 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) {
// 3. Now recycle an iterator that was never used to access anything.
itX := c.Iterator(nil)
for itX.Next() == ValFloatHistogram {
for itX.Next() == ValFloatHistogram { // nolint:revive
// Just iterate through without accessing anything.
}
it3 := c.iterator(itX)

View file

@ -126,7 +126,7 @@ func (c *HistogramChunk) Appender() (Appender, error) {
// To get an appender, we must know the state it would have if we had
// appended all existing data from scratch. We iterate through the end
// and populate via the iterator's state.
for it.Next() == ValHistogram {
for it.Next() == ValHistogram { // nolint:revive
}
if err := it.Err(); err != nil {
return nil, err

View file

@ -116,7 +116,7 @@ func TestHistogramChunkSameBuckets(t *testing.T) {
// 3. Now recycle an iterator that was never used to access anything.
itX := c.Iterator(nil)
for itX.Next() == ValHistogram {
for itX.Next() == ValHistogram { // nolint:revive
// Just iterate through without accessing anything.
}
it3 := c.iterator(itX)

View file

@ -99,7 +99,7 @@ func (c *XORChunk) Appender() (Appender, error) {
// To get an appender we must know the state it would have if we had
// appended all existing data from scratch.
// We iterate through the end and populate via the iterator's state.
for it.Next() != ValNone {
for it.Next() != ValNone { // nolint:revive
}
if err := it.Err(); err != nil {
return nil, err
@ -152,26 +152,25 @@ type xorAppender struct {
trailing uint8
}
func (a *xorAppender) AppendHistogram(t int64, h *histogram.Histogram) {
func (a *xorAppender) AppendHistogram(int64, *histogram.Histogram) {
panic("appended a histogram to an xor chunk")
}
func (a *xorAppender) AppendFloatHistogram(t int64, h *histogram.FloatHistogram) {
func (a *xorAppender) AppendFloatHistogram(int64, *histogram.FloatHistogram) {
panic("appended a float histogram to an xor chunk")
}
func (a *xorAppender) Append(t int64, v float64) {
var tDelta uint64
num := binary.BigEndian.Uint16(a.b.bytes())
switch {
case num == 0:
switch num {
case 0:
buf := make([]byte, binary.MaxVarintLen64)
for _, b := range buf[:binary.PutVarint(buf, t)] {
a.b.writeByte(b)
}
a.b.writeBits(math.Float64bits(v), 64)
case num == 1:
case 1:
tDelta = uint64(t - a.t)
buf := make([]byte, binary.MaxVarintLen64)

View file

@ -999,9 +999,10 @@ func (cdm *ChunkDiskMapper) DeleteCorrupted(originalErr error) error {
cdm.readPathMtx.RLock()
lastSeq := 0
for seg := range cdm.mmappedChunkFiles {
if seg >= cerr.FileIndex {
switch {
case seg >= cerr.FileIndex:
segs = append(segs, seg)
} else if seg > lastSeq {
case seg > lastSeq:
lastSeq = seg
}
}

View file

@ -392,10 +392,10 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
// Compact creates a new block in the compactor's directory from the blocks in the
// provided directories.
func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (uid ulid.ULID, err error) {
return c.CompactWithPopulateBlockFunc(dest, dirs, open, DefaultPopulateBlockFunc{})
return c.CompactWithBlockPopulator(dest, dirs, open, DefaultBlockPopulator{})
}
func (c *LeveledCompactor) CompactWithPopulateBlockFunc(dest string, dirs []string, open []*Block, populateBlockFunc PopulateBlockFunc) (uid ulid.ULID, err error) {
func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, open []*Block, blockPopulator BlockPopulator) (uid ulid.ULID, err error) {
var (
blocks []BlockReader
bs []*Block
@ -439,7 +439,7 @@ func (c *LeveledCompactor) CompactWithPopulateBlockFunc(dest string, dirs []stri
uid = ulid.MustNew(ulid.Now(), rand.Reader)
meta := CompactBlockMetas(uid, metas...)
err = c.write(dest, meta, populateBlockFunc, blocks...)
err = c.write(dest, meta, blockPopulator, blocks...)
if err == nil {
if meta.Stats.NumSamples == 0 {
for _, b := range bs {
@ -505,7 +505,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, p
}
}
err := c.write(dest, meta, DefaultPopulateBlockFunc{}, b)
err := c.write(dest, meta, DefaultBlockPopulator{}, b)
if err != nil {
return uid, err
}
@ -550,7 +550,7 @@ func (w *instrumentedChunkWriter) WriteChunks(chunks ...chunks.Meta) error {
}
// write creates a new block that is the union of the provided blocks into dir.
func (c *LeveledCompactor) write(dest string, meta *BlockMeta, populateBlockFunc PopulateBlockFunc, blocks ...BlockReader) (err error) {
func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator BlockPopulator, blocks ...BlockReader) (err error) {
dir := filepath.Join(dest, meta.ULID.String())
tmp := dir + tmpForCreationBlockDirSuffix
var closers []io.Closer
@ -598,7 +598,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, populateBlockFunc
}
closers = append(closers, indexw)
if err := populateBlockFunc.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, indexw, chunkw); err != nil {
if err := blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, indexw, chunkw); err != nil {
return errors.Wrap(err, "populate block")
}
@ -663,16 +663,16 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, populateBlockFunc
return nil
}
type PopulateBlockFunc interface {
type BlockPopulator interface {
PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) error
}
type DefaultPopulateBlockFunc struct{}
type DefaultBlockPopulator struct{}
// PopulateBlock fills the index and chunk writers with new data gathered as the union
// of the provided blocks. It returns meta information for the new block.
// It expects sorted blocks input by mint.
func (c DefaultPopulateBlockFunc) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) (err error) {
func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) (err error) {
if len(blocks) == 0 {
return errors.New("cannot populate block from no readers")
}

View file

@ -441,7 +441,7 @@ func TestCompactionFailWillCleanUpTempDir(t *testing.T) {
tmpdir := t.TempDir()
require.Error(t, compactor.write(tmpdir, &BlockMeta{}, DefaultPopulateBlockFunc{}, erringBReader{}))
require.Error(t, compactor.write(tmpdir, &BlockMeta{}, DefaultBlockPopulator{}, erringBReader{}))
_, err = os.Stat(filepath.Join(tmpdir, BlockMeta{}.ULID.String()) + tmpForCreationBlockDirSuffix)
require.True(t, os.IsNotExist(err), "directory is not cleaned up")
}
@ -467,8 +467,8 @@ func (erringBReader) Size() int64 { return 0 }
type nopChunkWriter struct{}
func (nopChunkWriter) WriteChunks(chunks ...chunks.Meta) error { return nil }
func (nopChunkWriter) Close() error { return nil }
func (nopChunkWriter) WriteChunks(...chunks.Meta) error { return nil }
func (nopChunkWriter) Close() error { return nil }
func samplesForRange(minTime, maxTime int64, maxSamplesPerChunk int) (ret [][]sample) {
var curr []sample
@ -953,8 +953,8 @@ func TestCompaction_populateBlock(t *testing.T) {
}
iw := &mockIndexWriter{}
populateBlockFunc := DefaultPopulateBlockFunc{}
err = populateBlockFunc.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, iw, nopChunkWriter{})
blockPopulator := DefaultBlockPopulator{}
err = blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, iw, nopChunkWriter{})
if tc.expErr != nil {
require.Error(t, err)
require.Equal(t, tc.expErr.Error(), err.Error())

View file

@ -963,10 +963,11 @@ func (db *DB) ApplyConfig(conf *config.Config) error {
// Create WBL if it was not present and if OOO is enabled with WAL enabled.
var wblog *wlog.WL
var err error
if db.head.wbl != nil {
switch {
case db.head.wbl != nil:
// The existing WBL from the disk might have been replayed while OOO was disabled.
wblog = db.head.wbl
} else if !db.oooWasEnabled.Load() && oooTimeWindow > 0 && db.opts.WALSegmentSize >= 0 {
case !db.oooWasEnabled.Load() && oooTimeWindow > 0 && db.opts.WALSegmentSize >= 0:
segmentSize := wlog.DefaultSegmentSize
// Wal is set to a custom size.
if db.opts.WALSegmentSize > 0 {
@ -1532,10 +1533,11 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error {
}
toDelete := filepath.Join(db.dir, ulid.String())
if _, err := os.Stat(toDelete); os.IsNotExist(err) {
switch _, err := os.Stat(toDelete); {
case os.IsNotExist(err):
// Noop.
continue
} else if err != nil {
case err != nil:
return errors.Wrapf(err, "stat dir %v", toDelete)
}

View file

@ -1426,11 +1426,11 @@ type mockCompactorFailing struct {
max int
}
func (*mockCompactorFailing) Plan(dir string) ([]string, error) {
func (*mockCompactorFailing) Plan(string) ([]string, error) {
return nil, nil
}
func (c *mockCompactorFailing) Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error) {
func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ *BlockMeta) (ulid.ULID, error) {
if len(c.blocks) >= c.max {
return ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail")
}
@ -1458,7 +1458,7 @@ func (*mockCompactorFailing) Compact(string, []string, []*Block) (ulid.ULID, err
return ulid.ULID{}, nil
}
func (*mockCompactorFailing) CompactOOO(dest string, oooHead *OOOCompactionHead) (result []ulid.ULID, err error) {
func (*mockCompactorFailing) CompactOOO(string, *OOOCompactionHead) (result []ulid.ULID, err error) {
return nil, fmt.Errorf("mock compaction failing CompactOOO")
}

View file

@ -24,4 +24,4 @@ import (
//
// The blank import above is actually what invokes the test of this package. If
// the import succeeds (the code compiles), the test passed.
func Test(t *testing.T) {}
func Test(*testing.T) {}

View file

@ -574,7 +574,7 @@ const cardinalityCacheExpirationTime = time.Duration(30) * time.Second
func (h *Head) Init(minValidTime int64) error {
h.minValidTime.Store(minValidTime)
defer func() {
h.postings.EnsureOrder()
h.postings.EnsureOrder(h.opts.WALReplayConcurrency)
}()
defer h.gc() // After loading the wal remove the obsolete data from the head.
defer func() {

View file

@ -344,9 +344,10 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
}
if value.IsStaleNaN(v) {
if s.lastHistogramValue != nil {
switch {
case s.lastHistogramValue != nil:
return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil)
} else if s.lastFloatHistogramValue != nil {
case s.lastFloatHistogramValue != nil:
return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v})
}
}
@ -552,9 +553,10 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
return 0, err
}
if created {
if h != nil {
switch {
case h != nil:
s.lastHistogramValue = &histogram.Histogram{}
} else if fh != nil {
case fh != nil:
s.lastFloatHistogramValue = &histogram.FloatHistogram{}
}
a.series = append(a.series, record.RefSeries{
@ -564,7 +566,8 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
}
}
if h != nil {
switch {
case h != nil:
s.Lock()
if err := s.appendableHistogram(t, h); err != nil {
s.Unlock()
@ -581,7 +584,7 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
H: h,
})
a.histogramSeries = append(a.histogramSeries, s)
} else if fh != nil {
case fh != nil:
s.Lock()
if err := s.appendableFloatHistogram(t, fh); err != nil {
s.Unlock()
@ -938,7 +941,10 @@ func (a *headAppender) Commit() (err error) {
var ok, chunkCreated bool
if err == nil && oooSample {
switch {
case err != nil:
// Do nothing here.
case oooSample:
// Sample is OOO and OOO handling is enabled
// and the delta is within the OOO tolerance.
var mmapRef chunks.ChunkDiskMapperRef
@ -976,7 +982,7 @@ func (a *headAppender) Commit() (err error) {
// TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305.
samplesAppended--
}
} else if err == nil {
default:
ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange)
if ok {
if s.T < inOrderMint {
@ -1177,14 +1183,15 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
app.RecodeHistogram(h, pBackwardInserts, nBackwardInserts)
}
// We have 3 cases here
// - !okToAppend -> We need to cut a new chunk.
// - !okToAppend or counterReset -> We need to cut a new chunk.
// - okToAppend but we have inserts → Existing chunk needs
// recoding before we can append our histogram.
// - okToAppend and no inserts → Chunk is ready to support our histogram.
if !okToAppend || counterReset {
switch {
case !okToAppend || counterReset:
c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange)
chunkCreated = true
} else if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
case len(pForwardInserts) > 0 || len(nForwardInserts) > 0:
// New buckets have appeared. We need to recode all
// prior histogram samples within the chunk before we
// can process this one.
@ -1270,14 +1277,15 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram,
app.RecodeHistogramm(fh, pBackwardInserts, nBackwardInserts)
}
// We have 3 cases here
// - !okToAppend -> We need to cut a new chunk.
// - !okToAppend or counterReset -> We need to cut a new chunk.
// - okToAppend but we have inserts → Existing chunk needs
// recoding before we can append our histogram.
// - okToAppend and no inserts → Chunk is ready to support our histogram.
if !okToAppend || counterReset {
switch {
case !okToAppend || counterReset:
c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange)
chunkCreated = true
} else if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
case len(pForwardInserts) > 0 || len(nForwardInserts) > 0:
// New buckets have appeared. We need to recode all
// prior histogram samples within the chunk before we
// can process this one.

View file

@ -424,7 +424,8 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper
break
}
if chunkRef == meta.OOOLastRef {
switch {
case chunkRef == meta.OOOLastRef:
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
meta: chunks.Meta{
MinTime: meta.OOOLastMinTime,
@ -435,7 +436,7 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper
origMinT: c.minTime,
origMaxT: c.maxTime,
})
} else if c.OverlapsClosedInterval(mint, maxt) {
case c.OverlapsClosedInterval(mint, maxt):
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
meta: chunks.Meta{
MinTime: c.minTime,
@ -594,12 +595,14 @@ type boundedIterator struct {
func (b boundedIterator) Next() chunkenc.ValueType {
for b.Iterator.Next() == chunkenc.ValFloat {
t, _ := b.Iterator.At()
if t < b.minT {
switch {
case t < b.minT:
continue
} else if t > b.maxT {
case t > b.maxT:
return chunkenc.ValNone
default:
return chunkenc.ValFloat
}
return chunkenc.ValFloat
}
return chunkenc.ValNone
}

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// nolint:revive // Many legitimately empty blocks in this file.
package tsdb
import (
@ -103,7 +104,7 @@ func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) {
b.Cleanup(func() { require.NoError(b, h.Close()) })
ts := int64(1000)
append := func() error {
appendSamples := func() error {
var err error
app := h.Appender(context.Background())
for _, s := range series[:seriesCount] {
@ -120,13 +121,13 @@ func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) {
}
// Init series, that's not what we're benchmarking here.
require.NoError(b, append())
require.NoError(b, appendSamples())
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.NoError(b, append())
require.NoError(b, appendSamples())
}
})
}
@ -2959,10 +2960,11 @@ func TestAppendHistogram(t *testing.T) {
actHistograms := make([]tsdbutil.Sample, 0, len(expHistograms))
actFloatHistograms := make([]tsdbutil.Sample, 0, len(expFloatHistograms))
for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() {
if typ == chunkenc.ValHistogram {
switch typ {
case chunkenc.ValHistogram:
ts, h := it.AtHistogram()
actHistograms = append(actHistograms, sample{t: ts, h: h})
} else if typ == chunkenc.ValFloatHistogram {
case chunkenc.ValFloatHistogram:
ts, fh := it.AtFloatHistogram()
actFloatHistograms = append(actFloatHistograms, sample{t: ts, fh: fh})
}
@ -3564,14 +3566,15 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) {
for i, eh := range expHistograms {
ah := actHistograms[i]
if floatHistogram {
if value.IsStaleNaN(eh.fh.Sum) {
switch {
case value.IsStaleNaN(eh.fh.Sum):
actNumStale++
require.True(t, value.IsStaleNaN(ah.fh.Sum))
// To make require.Equal work.
ah.fh.Sum = 0
eh.fh = eh.fh.Copy()
eh.fh.Sum = 0
} else if i > 0 {
case i > 0:
prev := expHistograms[i-1]
if prev.fh == nil || value.IsStaleNaN(prev.fh.Sum) {
eh.fh.CounterResetHint = histogram.UnknownCounterReset
@ -3579,14 +3582,15 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) {
}
require.Equal(t, eh, ah)
} else {
if value.IsStaleNaN(eh.h.Sum) {
switch {
case value.IsStaleNaN(eh.h.Sum):
actNumStale++
require.True(t, value.IsStaleNaN(ah.h.Sum))
// To make require.Equal work.
ah.h.Sum = 0
eh.h = eh.h.Copy()
eh.h.Sum = 0
} else if i > 0 {
case i > 0:
prev := expHistograms[i-1]
if prev.h == nil || value.IsStaleNaN(prev.h.Sum) {
eh.h.CounterResetHint = histogram.UnknownCounterReset
@ -4487,15 +4491,13 @@ func TestHistogramValidation(t *testing.T) {
for testName, tc := range tests {
t.Run(testName, func(t *testing.T) {
err := ValidateHistogram(tc.h)
if tc.errMsg != "" {
switch err := ValidateHistogram(tc.h); {
case tc.errMsg != "":
require.ErrorContains(t, err, tc.errMsg)
} else {
default:
require.NoError(t, err)
}
err = ValidateFloatHistogram(tc.h.ToFloat())
switch {
switch err := ValidateFloatHistogram(tc.h.ToFloat()); {
case tc.errMsgFloat != "":
require.ErrorContains(t, err, tc.errMsgFloat)
case tc.errMsg != "":

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// nolint:revive // Many legitimately empty blocks in this file.
package tsdb
import (

View file

@ -224,7 +224,10 @@ func (p *MemPostings) All() Postings {
// EnsureOrder ensures that all postings lists are sorted. After it returns all further
// calls to add and addFor will insert new IDs in a sorted manner.
func (p *MemPostings) EnsureOrder() {
// Parameter numberOfConcurrentProcesses is used to specify the maximal number of
// CPU cores used for this operation. If it is <= 0, GOMAXPROCS is used.
// GOMAXPROCS was the default before introducing this parameter.
func (p *MemPostings) EnsureOrder(numberOfConcurrentProcesses int) {
p.mtx.Lock()
defer p.mtx.Unlock()
@ -232,13 +235,16 @@ func (p *MemPostings) EnsureOrder() {
return
}
n := runtime.GOMAXPROCS(0)
concurrency := numberOfConcurrentProcesses
if concurrency <= 0 {
concurrency = runtime.GOMAXPROCS(0)
}
workc := make(chan *[][]storage.SeriesRef)
var wg sync.WaitGroup
wg.Add(n)
wg.Add(concurrency)
for i := 0; i < n; i++ {
for i := 0; i < concurrency; i++ {
go func() {
for job := range workc {
for _, l := range *job {
@ -559,9 +565,10 @@ func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) {
for _, it := range p {
// NOTE: mergedPostings struct requires the user to issue an initial Next.
if it.Next() {
switch {
case it.Next():
ph = append(ph, it)
} else if it.Err() != nil {
case it.Err() != nil:
return &mergedPostings{err: it.Err()}, true
}
}
@ -695,9 +702,7 @@ func (rp *removedPostings) Next() bool {
rp.fok = rp.full.Next()
return true
}
fcur, rcur := rp.full.At(), rp.remove.At()
switch {
switch fcur, rcur := rp.full.At(), rp.remove.At(); {
case fcur < rcur:
rp.cur = fcur
rp.fok = rp.full.Next()
@ -841,9 +846,10 @@ func (it *bigEndianPostings) Err() error {
func FindIntersectingPostings(p Postings, candidates []Postings) (indexes []int, err error) {
h := make(postingsWithIndexHeap, 0, len(candidates))
for idx, it := range candidates {
if it.Next() {
switch {
case it.Next():
h = append(h, postingsWithIndex{index: idx, p: it})
} else if it.Err() != nil {
case it.Err() != nil:
return nil, it.Err()
}
}

View file

@ -54,7 +54,7 @@ func TestMemPostings_ensureOrder(t *testing.T) {
p.m["a"][v] = l
}
p.EnsureOrder()
p.EnsureOrder(0)
for _, e := range p.m {
for _, l := range e {
@ -114,7 +114,7 @@ func BenchmarkMemPostings_ensureOrder(b *testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
p.EnsureOrder()
p.EnsureOrder(0)
p.ordered = false
}
})

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// nolint:revive // Many unsued function arguments in this file by design.
package tsdb
import (
@ -122,7 +123,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
}
}
// There is nothing to do if we did not collect any chunk
// There is nothing to do if we did not collect any chunk.
if len(tmpChks) == 0 {
return nil
}
@ -135,14 +136,15 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
// chunks Meta the first chunk that overlaps with others.
// Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650)
// In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to
// to return chunk Metas for chunk 5 and chunk 6
// to return chunk Metas for chunk 5 and chunk 6e
*chks = append(*chks, tmpChks[0])
maxTime := tmpChks[0].MaxTime // tracks the maxTime of the previous "to be merged chunk"
maxTime := tmpChks[0].MaxTime // Tracks the maxTime of the previous "to be merged chunk".
for _, c := range tmpChks[1:] {
if c.MinTime > maxTime {
switch {
case c.MinTime > maxTime:
*chks = append(*chks, c)
maxTime = c.MaxTime
} else if c.MaxTime > maxTime {
case c.MaxTime > maxTime:
maxTime = c.MaxTime
(*chks)[len(*chks)-1].MaxTime = c.MaxTime
}

View file

@ -1087,7 +1087,7 @@ func newNopChunkReader() ChunkReader {
}
}
func (cr nopChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
func (cr nopChunkReader) Chunk(chunks.Meta) (chunkenc.Chunk, error) {
return cr.emptyChunk, nil
}

View file

@ -250,7 +250,7 @@ func BenchmarkQuerierSelect(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
ss := q.Select(sorted, nil, matcher)
for ss.Next() {
for ss.Next() { // nolint:revive
}
require.NoError(b, ss.Err())
}

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// nolint:revive // Many unsued function arguments in this file by design.
package tsdb
import (

View file

@ -190,9 +190,10 @@ type Stone struct {
func ReadTombstones(dir string) (Reader, int64, error) {
b, err := os.ReadFile(filepath.Join(dir, TombstonesFilename))
if os.IsNotExist(err) {
switch {
case os.IsNotExist(err):
return NewMemTombstones(), 0, nil
} else if err != nil {
case err != nil:
return nil, 0, err
}

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// nolint:revive // Many unsued function arguments in this file by design.
package tsdb
import (
@ -521,9 +522,10 @@ func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) {
}
}()
if n, err := f.Read(metab); err != nil {
switch n, err := f.Read(metab); {
case err != nil:
return nil, errors.Wrapf(err, "validate meta %q", f.Name())
} else if n != 8 {
case n != 8:
return nil, errors.Errorf("invalid header size %d in %q", n, f.Name())
}
@ -1062,9 +1064,10 @@ func (r *walReader) entry(cr io.Reader) (WALEntryType, byte, []byte, error) {
tr := io.TeeReader(cr, r.crc32)
b := make([]byte, 6)
if n, err := tr.Read(b); err != nil {
switch n, err := tr.Read(b); {
case err != nil:
return 0, 0, nil, err
} else if n != 6 {
case n != 6:
return 0, 0, nil, r.corruptionErr("invalid entry header size %d", n)
}
@ -1086,15 +1089,17 @@ func (r *walReader) entry(cr io.Reader) (WALEntryType, byte, []byte, error) {
}
buf := r.buf[:length]
if n, err := tr.Read(buf); err != nil {
switch n, err := tr.Read(buf); {
case err != nil:
return 0, 0, nil, err
} else if n != length {
case n != length:
return 0, 0, nil, r.corruptionErr("invalid entry body size %d", n)
}
if n, err := cr.Read(b[:4]); err != nil {
switch n, err := cr.Read(b[:4]); {
case err != nil:
return 0, 0, nil, err
} else if n != 4 {
case n != 4:
return 0, 0, nil, r.corruptionErr("invalid checksum length %d", n)
}
if exp, has := binary.BigEndian.Uint32(b[:4]), r.crc32.Sum32(); has != exp {

View file

@ -126,9 +126,10 @@ func (r *LiveReader) Next() bool {
// we return EOF and the user can try again later. If we have a full
// page, buildRecord is guaranteed to return a record or a non-EOF; it
// has checks the records fit in pages.
if ok, err := r.buildRecord(); ok {
switch ok, err := r.buildRecord(); {
case ok:
return true
} else if err != nil && err != io.EOF {
case err != nil && err != io.EOF:
r.err = err
return false
}

View file

@ -533,7 +533,7 @@ func TestReaderData(t *testing.T) {
require.NoError(t, err)
reader := fn(sr)
for reader.Next() {
for reader.Next() { // nolint:revive
}
require.NoError(t, reader.Err())

View file

@ -405,9 +405,10 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
// Ignore errors reading to end of segment whilst replaying the WAL.
if !tail {
if err != nil && errors.Cause(err) != io.EOF {
switch {
case err != nil && errors.Cause(err) != io.EOF:
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err)
} else if reader.Offset() != size {
case reader.Offset() != size:
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
}
return nil
@ -425,9 +426,10 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
// Ignore all errors reading to end of segment whilst replaying the WAL.
if !tail {
if err != nil && errors.Cause(err) != io.EOF {
switch {
case err != nil && errors.Cause(err) != io.EOF:
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err)
} else if reader.Offset() != size {
case reader.Offset() != size:
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
}
return nil

View file

@ -164,7 +164,7 @@ func TestWALRepair_ReadingError(t *testing.T) {
sr := NewSegmentBufReader(s)
require.NoError(t, err)
r := NewReader(sr)
for r.Next() {
for r.Next() { // nolint:revive
}
// Close the segment so we don't break things on Windows.

View file

@ -22,7 +22,7 @@ import (
type counter int
func (c *counter) Log(keyvals ...interface{}) error {
func (c *counter) Log(...interface{}) error {
(*c)++
return nil
}

View file

@ -37,6 +37,6 @@ func (c *MockContext) Err() error {
}
// Value ignores the Value and always returns nil
func (c *MockContext) Value(key interface{}) interface{} {
func (c *MockContext) Value(interface{}) interface{} {
return nil
}

View file

@ -22,7 +22,7 @@ type roundTrip struct {
theError error
}
func (rt *roundTrip) RoundTrip(r *http.Request) (*http.Response, error) {
func (rt *roundTrip) RoundTrip(*http.Request) (*http.Response, error) {
return rt.theResponse, rt.theError
}

View file

@ -116,7 +116,7 @@ func (tc *ZookeeperTreeCache) Stop() {
tc.stop <- struct{}{}
go func() {
// Drain tc.head.events so that go routines can make progress and exit.
for range tc.head.events {
for range tc.head.events { // nolint:revive
}
}()
go func() {
@ -176,11 +176,11 @@ func (tc *ZookeeperTreeCache) loop(path string) {
node = childNode
}
err := tc.recursiveNodeUpdate(ev.Path, node)
if err != nil {
switch err := tc.recursiveNodeUpdate(ev.Path, node); {
case err != nil:
level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", err)
failure()
} else if tc.head.data == nil {
case tc.head.data == nil:
level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", "path no longer exists", "path", tc.prefix)
failure()
}
@ -214,13 +214,14 @@ func (tc *ZookeeperTreeCache) loop(path string) {
func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTreeCacheNode) error {
data, _, dataWatcher, err := tc.conn.GetW(path)
if errors.Is(err, zk.ErrNoNode) {
switch {
case errors.Is(err, zk.ErrNoNode):
tc.recursiveDelete(path, node)
if node == tc.head {
return fmt.Errorf("path %s does not exist", path)
}
return nil
} else if err != nil {
case err != nil:
return err
}
@ -230,10 +231,11 @@ func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTr
}
children, _, childWatcher, err := tc.conn.ChildrenW(path)
if errors.Is(err, zk.ErrNoNode) {
switch {
case errors.Is(err, zk.ErrNoNode):
tc.recursiveDelete(path, node)
return nil
} else if err != nil {
case err != nil:
return err
}

View file

@ -117,7 +117,7 @@ type RulesRetriever interface {
type StatsRenderer func(context.Context, *stats.Statistics, string) stats.QueryStats
func defaultStatsRenderer(ctx context.Context, s *stats.Statistics, param string) stats.QueryStats {
func defaultStatsRenderer(_ context.Context, s *stats.Statistics, param string) stats.QueryStats {
if param != "" {
return stats.NewQueryStats(s)
}
@ -177,8 +177,8 @@ type TSDBAdminStats interface {
// QueryEngine defines the interface for the *promql.Engine, so it can be replaced, wrapped or mocked.
type QueryEngine interface {
SetQueryLogger(l promql.QueryLogger)
NewInstantQuery(q storage.Queryable, opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error)
NewRangeQuery(q storage.Queryable, opts *promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error)
NewInstantQuery(ctx context.Context, q storage.Queryable, opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error)
NewRangeQuery(ctx context.Context, q storage.Queryable, opts *promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error)
}
// API can register a set of endpoints in a router and handle
@ -392,7 +392,7 @@ func invalidParamError(err error, parameter string) apiFuncResult {
}, nil, nil}
}
func (api *API) options(r *http.Request) apiFuncResult {
func (api *API) options(*http.Request) apiFuncResult {
return apiFuncResult{nil, nil, nil, nil}
}
@ -417,7 +417,7 @@ func (api *API) query(r *http.Request) (result apiFuncResult) {
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
qry, err := api.QueryEngine.NewInstantQuery(api.Queryable, opts, r.FormValue("query"), ts)
qry, err := api.QueryEngine.NewInstantQuery(ctx, api.Queryable, opts, r.FormValue("query"), ts)
if err != nil {
return invalidParamError(err, "query")
}
@ -520,7 +520,7 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) {
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
qry, err := api.QueryEngine.NewRangeQuery(api.Queryable, opts, r.FormValue("query"), start, end, step)
qry, err := api.QueryEngine.NewRangeQuery(ctx, api.Queryable, opts, r.FormValue("query"), start, end, step)
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
@ -989,12 +989,14 @@ func (api *API) targets(r *http.Request) apiFuncResult {
ScrapeURL: target.URL().String(),
GlobalURL: globalURL.String(),
LastError: func() string {
if err == nil && lastErrStr == "" {
switch {
case err == nil && lastErrStr == "":
return ""
} else if err != nil {
case err != nil:
return errors.Wrapf(err, lastErrStr).Error()
default:
return lastErrStr
}
return lastErrStr
}(),
LastScrape: target.LastScrape(),
LastScrapeDuration: target.LastScrapeDuration().Seconds(),
@ -1565,7 +1567,7 @@ func (api *API) snapshot(r *http.Request) apiFuncResult {
}{name}, nil, nil, nil}
}
func (api *API) cleanTombstones(r *http.Request) apiFuncResult {
func (api *API) cleanTombstones(*http.Request) apiFuncResult {
if !api.enableAdmin {
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
}
@ -1764,7 +1766,7 @@ func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
stream.WriteObjectEnd()
}
func marshalSeriesJSONIsEmpty(ptr unsafe.Pointer) bool {
func marshalSeriesJSONIsEmpty(unsafe.Pointer) bool {
return false
}
@ -1817,7 +1819,7 @@ func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
stream.WriteObjectEnd()
}
func marshalSampleJSONIsEmpty(ptr unsafe.Pointer) bool {
func marshalSampleJSONIsEmpty(unsafe.Pointer) bool {
return false
}
@ -1841,7 +1843,7 @@ func marshalHPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
stream.WriteArrayEnd()
}
func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool {
func marshalPointJSONIsEmpty(unsafe.Pointer) bool {
return false
}
@ -1878,6 +1880,6 @@ func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
stream.WriteObjectEnd()
}
func marshalExemplarJSONEmpty(ptr unsafe.Pointer) bool {
func marshalExemplarJSONEmpty(unsafe.Pointer) bool {
return false
}

View file

@ -2560,9 +2560,9 @@ type fakeDB struct {
err error
}
func (f *fakeDB) CleanTombstones() error { return f.err }
func (f *fakeDB) Delete(mint, maxt int64, ms ...*labels.Matcher) error { return f.err }
func (f *fakeDB) Snapshot(dir string, withHead bool) error { return f.err }
func (f *fakeDB) CleanTombstones() error { return f.err }
func (f *fakeDB) Delete(int64, int64, ...*labels.Matcher) error { return f.err }
func (f *fakeDB) Snapshot(string, bool) error { return f.err }
func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) {
dbDir, err := os.MkdirTemp("", "tsdb-api-ready")
if err != nil {

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// nolint:revive // Many unsued function arguments in this file by design.
package v1
import (

View file

@ -388,13 +388,13 @@ func TestFederationWithNativeHistograms(t *testing.T) {
break
}
require.NoError(t, err)
if et == textparse.EntryHelp {
metricFamilies++
}
if et == textparse.EntryHistogram || et == textparse.EntrySeries {
p.Metric(&l)
}
if et == textparse.EntryHistogram {
switch et {
case textparse.EntryHelp:
metricFamilies++
case textparse.EntryHistogram:
_, parsedTimestamp, h, fh := p.Histogram()
require.Nil(t, h)
actVec = append(actVec, promql.Sample{
@ -402,7 +402,7 @@ func TestFederationWithNativeHistograms(t *testing.T) {
H: fh,
Metric: l,
})
} else if et == textparse.EntrySeries {
case textparse.EntrySeries:
_, parsedTimestamp, f := p.Series()
actVec = append(actVec, promql.Sample{
T: *parsedTimestamp,

View file

@ -755,14 +755,14 @@ func toFloat64(f *io_prometheus_client.MetricFamily) float64 {
return math.NaN()
}
func (h *Handler) version(w http.ResponseWriter, r *http.Request) {
func (h *Handler) version(w http.ResponseWriter, _ *http.Request) {
dec := json.NewEncoder(w)
if err := dec.Encode(h.versionInfo); err != nil {
http.Error(w, fmt.Sprintf("error encoding JSON: %s", err), http.StatusInternalServerError)
}
}
func (h *Handler) quit(w http.ResponseWriter, r *http.Request) {
func (h *Handler) quit(w http.ResponseWriter, _ *http.Request) {
var closed bool
h.quitOnce.Do(func() {
closed = true
@ -774,7 +774,7 @@ func (h *Handler) quit(w http.ResponseWriter, r *http.Request) {
}
}
func (h *Handler) reload(w http.ResponseWriter, r *http.Request) {
func (h *Handler) reload(w http.ResponseWriter, _ *http.Request) {
rc := make(chan error)
h.reloadCh <- rc
if err := <-rc; err != nil {