Update exp package (#12650)

This commit is contained in:
Goutham Veeramachaneni 2023-09-21 22:53:51 +02:00 committed by GitHub
parent f8dd8770ac
commit 86729d4d7b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
22 changed files with 86 additions and 64 deletions

View file

@ -459,7 +459,7 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten
postingInfos := []postingInfo{}
printInfo := func(postingInfos []postingInfo) {
slices.SortFunc(postingInfos, func(a, b postingInfo) bool { return a.metric > b.metric })
slices.SortFunc(postingInfos, func(a, b postingInfo) int { return int(b.metric) - int(a.metric) })
for i, pc := range postingInfos {
if i >= limit {

2
go.mod
View file

@ -185,7 +185,7 @@ require (
go.opentelemetry.io/otel/metric v1.16.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b
golang.org/x/mod v0.12.0 // indirect
golang.org/x/term v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect

4
go.sum
View file

@ -853,8 +853,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw=
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b h1:r+vk0EmXNmekl0S0BascoeeoHk/L7wmaW2QF90K+kYI=
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=

View file

@ -19,6 +19,7 @@ import (
"bytes"
"encoding/json"
"strconv"
"strings"
"github.com/cespare/xxhash/v2"
"github.com/prometheus/common/model"
@ -362,7 +363,7 @@ func EmptyLabels() Labels {
func New(ls ...Label) Labels {
set := make(Labels, 0, len(ls))
set = append(set, ls...)
slices.SortFunc(set, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(set, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
return set
}
@ -386,7 +387,7 @@ func FromStrings(ss ...string) Labels {
res = append(res, Label{Name: ss[i], Value: ss[i+1]})
}
slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
return res
}
@ -591,7 +592,7 @@ func (b *Builder) Labels() Labels {
}
if len(b.add) > 0 { // Base is already in order, so we only need to sort if we add to it.
res = append(res, b.add...)
slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
}
return res
}
@ -618,7 +619,7 @@ func (b *ScratchBuilder) Add(name, value string) {
// Sort the labels added so far by name.
func (b *ScratchBuilder) Sort() {
slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
}
// Assign is for when you already have a Labels which you want this ScratchBuilder to return.

View file

@ -20,6 +20,7 @@ import (
"encoding/json"
"reflect"
"strconv"
"strings"
"unsafe"
"github.com/cespare/xxhash/v2"
@ -412,7 +413,7 @@ func yoloBytes(s string) (b []byte) {
// New returns a sorted Labels from the given labels.
// The caller has to guarantee that all label names are unique.
func New(ls ...Label) Labels {
slices.SortFunc(ls, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(ls, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
size := labelsSize(ls)
buf := make([]byte, size)
marshalLabelsToSizedBuffer(ls, buf)
@ -671,7 +672,7 @@ func (b *Builder) Labels() Labels {
return b.base
}
slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
slices.Sort(b.del)
a, d := 0, 0
@ -830,7 +831,7 @@ func (b *ScratchBuilder) Add(name, value string) {
// Sort the labels added so far by name.
func (b *ScratchBuilder) Sort() {
slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name })
slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) })
}
// Assign is for when you already have a Labels which you want this ScratchBuilder to return.

View file

@ -81,8 +81,15 @@ func bucketQuantile(q float64, buckets buckets) float64 {
if q > 1 {
return math.Inf(+1)
}
slices.SortFunc(buckets, func(a, b bucket) bool {
return a.upperBound < b.upperBound
slices.SortFunc(buckets, func(a, b bucket) int {
// We don't expect the bucket boundary to be a NaN.
if a.upperBound < b.upperBound {
return -1
}
if a.upperBound > b.upperBound {
return +1
}
return 0
})
if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) {
return math.NaN()

View file

@ -19,6 +19,7 @@ import (
"fmt"
"math"
"net/url"
"strings"
"sync"
"time"
@ -490,9 +491,11 @@ func (g *Group) AlertingRules() []*AlertingRule {
alerts = append(alerts, alertingRule)
}
}
slices.SortFunc(alerts, func(a, b *AlertingRule) bool {
return a.State() > b.State() ||
(a.State() == b.State() && a.Name() < b.Name())
slices.SortFunc(alerts, func(a, b *AlertingRule) int {
if a.State() == b.State() {
return strings.Compare(a.Name(), b.Name())
}
return int(b.State() - a.State())
})
return alerts
}
@ -1203,11 +1206,15 @@ func (m *Manager) RuleGroups() []*Group {
rgs = append(rgs, g)
}
slices.SortFunc(rgs, func(a, b *Group) bool {
if a.file != b.file {
return a.file < b.file
slices.SortFunc(rgs, func(a, b *Group) int {
fileCompare := strings.Compare(a.file, b.file)
// If its 0, then the file names are the same.
// Lets look at the group names in that case.
if fileCompare != 0 {
return fileCompare
}
return a.name < b.name
return strings.Compare(a.name, b.name)
})
return rgs

View file

@ -732,8 +732,8 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re
}
func resolveConflictingExposedLabels(lb *labels.Builder, conflictingExposedLabels []labels.Label) {
slices.SortStableFunc(conflictingExposedLabels, func(a, b labels.Label) bool {
return len(a.Name) < len(b.Name)
slices.SortStableFunc(conflictingExposedLabels, func(a, b labels.Label) int {
return len(a.Name) - len(b.Name)
})
for _, l := range conflictingExposedLabels {

View file

@ -187,8 +187,8 @@ func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet
}
if sortSeries {
slices.SortFunc(series, func(a, b storage.Series) bool {
return labels.Compare(a.Labels(), b.Labels()) < 0
slices.SortFunc(series, func(a, b storage.Series) int {
return labels.Compare(a.Labels(), b.Labels())
})
}
return &concreteSeriesSet{

View file

@ -16,6 +16,7 @@ package remote
import (
"context"
"net/http"
"strings"
"sync"
"github.com/go-kit/log"
@ -93,8 +94,8 @@ func (h *readHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
Value: value,
})
}
slices.SortFunc(sortedExternalLabels, func(a, b prompb.Label) bool {
return a.Name < b.Name
slices.SortFunc(sortedExternalLabels, func(a, b prompb.Label) int {
return strings.Compare(a.Name, b.Name)
})
responseType, err := NegotiateResponseType(req.AcceptedResponseTypes)

View file

@ -200,8 +200,8 @@ func (c *LeveledCompactor) Plan(dir string) ([]string, error) {
}
func (c *LeveledCompactor) plan(dms []dirMeta) ([]string, error) {
slices.SortFunc(dms, func(a, b dirMeta) bool {
return a.meta.MinTime < b.meta.MinTime
slices.SortFunc(dms, func(a, b dirMeta) int {
return int(a.meta.MinTime - b.meta.MinTime)
})
res := c.selectOverlappingDirs(dms)
@ -380,8 +380,8 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
for s := range sources {
res.Compaction.Sources = append(res.Compaction.Sources, s)
}
slices.SortFunc(res.Compaction.Sources, func(a, b ulid.ULID) bool {
return a.Compare(b) < 0
slices.SortFunc(res.Compaction.Sources, func(a, b ulid.ULID) int {
return a.Compare(b)
})
res.MinTime = mint

View file

@ -579,8 +579,8 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) {
return nil, nil
}
slices.SortFunc(loadable, func(a, b *Block) bool {
return a.Meta().MinTime < b.Meta().MinTime
slices.SortFunc(loadable, func(a, b *Block) int {
return int(a.Meta().MinTime - b.Meta().MinTime)
})
blockMetas := make([]BlockMeta, 0, len(loadable))
@ -1447,8 +1447,8 @@ func (db *DB) reloadBlocks() (err error) {
}
db.metrics.blocksBytes.Set(float64(blocksSize))
slices.SortFunc(toLoad, func(a, b *Block) bool {
return a.Meta().MinTime < b.Meta().MinTime
slices.SortFunc(toLoad, func(a, b *Block) int {
return int(a.Meta().MinTime - b.Meta().MinTime)
})
// Swap new blocks first for subsequently created readers to be seen.
@ -1517,8 +1517,8 @@ func deletableBlocks(db *DB, blocks []*Block) map[ulid.ULID]struct{} {
// Sort the blocks by time - newest to oldest (largest to smallest timestamp).
// This ensures that the retentions will remove the oldest blocks.
slices.SortFunc(blocks, func(a, b *Block) bool {
return a.Meta().MaxTime > b.Meta().MaxTime
slices.SortFunc(blocks, func(a, b *Block) int {
return int(b.Meta().MaxTime - a.Meta().MaxTime)
})
for _, block := range blocks {

View file

@ -185,8 +185,8 @@ func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*label
}
}
slices.SortFunc(ret, func(a, b exemplar.QueryResult) bool {
return labels.Compare(a.SeriesLabels, b.SeriesLabels) < 0
slices.SortFunc(ret, func(a, b exemplar.QueryResult) int {
return labels.Compare(a.SeriesLabels, b.SeriesLabels)
})
return ret, nil

View file

@ -136,8 +136,8 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings {
return index.ErrPostings(errors.Wrap(err, "expand postings"))
}
slices.SortFunc(series, func(a, b *memSeries) bool {
return labels.Compare(a.lset, b.lset) < 0
slices.SortFunc(series, func(a, b *memSeries) int {
return labels.Compare(a.lset, b.lset)
})
// Convert back to list.

View file

@ -19,6 +19,7 @@ import (
"encoding/binary"
"runtime"
"sort"
"strings"
"sync"
"github.com/pkg/errors"
@ -108,11 +109,14 @@ func (p *MemPostings) SortedKeys() []labels.Label {
}
p.mtx.RUnlock()
slices.SortFunc(keys, func(a, b labels.Label) bool {
if a.Name != b.Name {
return a.Name < b.Name
slices.SortFunc(keys, func(a, b labels.Label) int {
nameCompare := strings.Compare(a.Name, b.Name)
// If names are the same, compare values.
if nameCompare != 0 {
return nameCompare
}
return a.Value < b.Value
return strings.Compare(a.Value, b.Value)
})
return keys
}

View file

@ -63,8 +63,6 @@ func (m *maxHeap) push(item Stat) {
}
func (m *maxHeap) get() []Stat {
slices.SortFunc(m.Items, func(a, b Stat) bool {
return a.Count > b.Count
})
slices.SortFunc(m.Items, func(a, b Stat) int { return int(b.Count - a.Count) })
return m.Items
}

View file

@ -177,18 +177,18 @@ type chunkMetaAndChunkDiskMapperRef struct {
origMaxT int64
}
func refLessByMinTimeAndMinRef(a, b chunkMetaAndChunkDiskMapperRef) bool {
func refLessByMinTimeAndMinRef(a, b chunkMetaAndChunkDiskMapperRef) int {
if a.meta.MinTime == b.meta.MinTime {
return a.meta.Ref < b.meta.Ref
return int(a.meta.Ref - b.meta.Ref)
}
return a.meta.MinTime < b.meta.MinTime
return int(a.meta.MinTime - b.meta.MinTime)
}
func lessByMinTimeAndMinRef(a, b chunks.Meta) bool {
func lessByMinTimeAndMinRef(a, b chunks.Meta) int {
if a.MinTime == b.MinTime {
return a.Ref < b.Ref
return int(a.Ref - b.Ref)
}
return a.MinTime < b.MinTime
return int(a.MinTime - b.MinTime)
}
func (oh *OOOHeadIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) {

View file

@ -279,8 +279,12 @@ func PostingsForMatchers(ctx context.Context, ix IndexReader, ms ...*labels.Matc
// there is no chance that the set we subtract from
// contains postings of series that didn't exist when
// we constructed the set we subtract by.
slices.SortStableFunc(ms, func(i, j *labels.Matcher) bool {
return !isSubtractingMatcher(i) && isSubtractingMatcher(j)
slices.SortStableFunc(ms, func(i, j *labels.Matcher) int {
if !isSubtractingMatcher(i) && isSubtractingMatcher(j) {
return -1
}
return +1
})
for _, m := range ms {

View file

@ -374,8 +374,8 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) {
refs = append(refs, checkpointRef{name: fi.Name(), index: idx})
}
slices.SortFunc(refs, func(a, b checkpointRef) bool {
return a.index < b.index
slices.SortFunc(refs, func(a, b checkpointRef) int {
return a.index - b.index
})
return refs, nil

View file

@ -909,8 +909,8 @@ func listSegments(dir string) (refs []segmentRef, err error) {
}
refs = append(refs, segmentRef{name: fn, index: k})
}
slices.SortFunc(refs, func(a, b segmentRef) bool {
return a.index < b.index
slices.SortFunc(refs, func(a, b segmentRef) int {
return a.index - b.index
})
for i := 0; i < len(refs)-1; i++ {
if refs[i].index+1 != refs[i+1].index {

View file

@ -85,9 +85,7 @@ func (t *TimerGroup) String() string {
for _, timer := range t.timers {
timers = append(timers, timer)
}
slices.SortFunc(timers, func(a, b *Timer) bool {
return a.created < b.created
})
slices.SortFunc(timers, func(a, b *Timer) int { return a.created - b.created })
result := &bytes.Buffer{}
for _, timer := range timers {
fmt.Fprintf(result, "%s\n", timer)

View file

@ -17,6 +17,7 @@ import (
"fmt"
"net/http"
"sort"
"strings"
"github.com/go-kit/log/level"
"github.com/gogo/protobuf/proto"
@ -169,10 +170,10 @@ Loop:
return
}
slices.SortFunc(vec, func(a, b promql.Sample) bool {
slices.SortFunc(vec, func(a, b promql.Sample) int {
ni := a.Metric.Get(labels.MetricName)
nj := b.Metric.Get(labels.MetricName)
return ni < nj
return strings.Compare(ni, nj)
})
externalLabels := h.config.GlobalConfig.ExternalLabels.Map()