chore: enable all rules of perfsprint lint

Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
This commit is contained in:
Matthieu MOREL 2024-11-07 08:37:24 +01:00
parent cd1f8ac129
commit 2cc716d2cf
37 changed files with 62 additions and 58 deletions

View file

@ -108,8 +108,16 @@ linters-settings:
gofumpt: gofumpt:
extra-rules: true extra-rules: true
perfsprint: perfsprint:
# Optimizes even if it requires an int or uint type cast.
int-conversion: true
# Optimizes into `err.Error()` even if it is only equivalent for non-nil errors.
err-error: true
# Optimizes `fmt.Errorf`. # Optimizes `fmt.Errorf`.
errorf: true errorf: true
# Optimizes `fmt.Sprintf` with only one argument.
sprintf1: true
# Optimizes into strings concatenation.
strconcat: false
revive: revive:
# By default, revive will enable only the linting rules that are named in the configuration file. # By default, revive will enable only the linting rules that are named in the configuration file.
# So, it's needed to explicitly enable all required rules here. # So, it's needed to explicitly enable all required rules here.

View file

@ -153,7 +153,7 @@ func init() {
// serverOnlyFlag creates server-only kingpin flag. // serverOnlyFlag creates server-only kingpin flag.
func serverOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagClause { func serverOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagClause {
return app.Flag(name, fmt.Sprintf("%s Use with server mode only.", help)). return app.Flag(name, help+" Use with server mode only.").
PreAction(func(parseContext *kingpin.ParseContext) error { PreAction(func(parseContext *kingpin.ParseContext) error {
// This will be invoked only if flag is actually provided by user. // This will be invoked only if flag is actually provided by user.
serverOnlyFlags = append(serverOnlyFlags, "--"+name) serverOnlyFlags = append(serverOnlyFlags, "--"+name)
@ -163,7 +163,7 @@ func serverOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagCl
// agentOnlyFlag creates agent-only kingpin flag. // agentOnlyFlag creates agent-only kingpin flag.
func agentOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagClause { func agentOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagClause {
return app.Flag(name, fmt.Sprintf("%s Use with agent mode only.", help)). return app.Flag(name, help+" Use with agent mode only.").
PreAction(func(parseContext *kingpin.ParseContext) error { PreAction(func(parseContext *kingpin.ParseContext) error {
// This will be invoked only if flag is actually provided by user. // This will be invoked only if flag is actually provided by user.
agentOnlyFlags = append(agentOnlyFlags, "--"+name) agentOnlyFlags = append(agentOnlyFlags, "--"+name)

View file

@ -60,7 +60,7 @@ func (p *queryLogTest) skip(t *testing.T) {
t.Skip("changing prefix has no effect on rules") t.Skip("changing prefix has no effect on rules")
} }
// Some systems don't support IPv4 or IPv6. // Some systems don't support IPv4 or IPv6.
l, err := net.Listen("tcp", fmt.Sprintf("%s:0", p.host)) l, err := net.Listen("tcp", p.host+":0")
if err != nil { if err != nil {
t.Skip("ip version not supported") t.Skip("ip version not supported")
} }

View file

@ -71,7 +71,7 @@ const (
) )
var ( var (
userAgent = fmt.Sprintf("Prometheus/%s", version.Version) userAgent = "Prometheus/" + version.Version
// DefaultSDConfig is the default Azure SD configuration. // DefaultSDConfig is the default Azure SD configuration.
DefaultSDConfig = SDConfig{ DefaultSDConfig = SDConfig{

View file

@ -132,7 +132,7 @@ func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.Discove
Transport: rt, Transport: rt,
Timeout: time.Duration(conf.RefreshInterval), Timeout: time.Duration(conf.RefreshInterval),
}, },
godo.SetUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)), godo.SetUserAgent("Prometheus/"+version.Version),
) )
if err != nil { if err != nil {
return nil, fmt.Errorf("error setting up digital ocean agent: %w", err) return nil, fmt.Errorf("error setting up digital ocean agent: %w", err)

View file

@ -56,7 +56,7 @@ const tokenID = "3c9a75a2-24fd-4508-b4f2-11f18aa97411"
// HandleDropletsList mocks droplet list. // HandleDropletsList mocks droplet list.
func (m *SDMock) HandleDropletsList() { func (m *SDMock) HandleDropletsList() {
m.Mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", tokenID) { if r.Header.Get("Authorization") != "Bearer "+tokenID {
w.WriteHeader(http.StatusForbidden) w.WriteHeader(http.StatusForbidden)
return return
} }

View file

@ -23,7 +23,7 @@ import (
"github.com/prometheus/common/version" "github.com/prometheus/common/version"
) )
var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) var userAgent = "Prometheus/" + version.Version
type Applications struct { type Applications struct {
VersionsDelta int `xml:"versions__delta"` VersionsDelta int `xml:"versions__delta"`

View file

@ -56,7 +56,7 @@ const hcloudTestToken = "LRK9DAWQ1ZAEFSrCNEEzLCUwhYX1U3g7wMg4dTlkkDC96fyDuyJ39nV
// HandleHcloudServers mocks the cloud servers list endpoint. // HandleHcloudServers mocks the cloud servers list endpoint.
func (m *SDMock) HandleHcloudServers() { func (m *SDMock) HandleHcloudServers() {
m.Mux.HandleFunc("/servers", func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc("/servers", func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", hcloudTestToken) { if r.Header.Get("Authorization") != "Bearer "+hcloudTestToken {
w.WriteHeader(http.StatusUnauthorized) w.WriteHeader(http.StatusUnauthorized)
return return
} }
@ -432,7 +432,7 @@ func (m *SDMock) HandleHcloudServers() {
// HandleHcloudNetworks mocks the cloud networks list endpoint. // HandleHcloudNetworks mocks the cloud networks list endpoint.
func (m *SDMock) HandleHcloudNetworks() { func (m *SDMock) HandleHcloudNetworks() {
m.Mux.HandleFunc("/networks", func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc("/networks", func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", hcloudTestToken) { if r.Header.Get("Authorization") != "Bearer "+hcloudTestToken {
w.WriteHeader(http.StatusUnauthorized) w.WriteHeader(http.StatusUnauthorized)
return return
} }

View file

@ -39,7 +39,7 @@ const (
hetznerLabelRobotCancelled = hetznerRobotLabelPrefix + "cancelled" hetznerLabelRobotCancelled = hetznerRobotLabelPrefix + "cancelled"
) )
var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) var userAgent = "Prometheus/" + version.Version
// Discovery periodically performs Hetzner Robot requests. It implements // Discovery periodically performs Hetzner Robot requests. It implements
// the Discoverer interface. // the Discoverer interface.

View file

@ -44,7 +44,7 @@ var (
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
RefreshInterval: model.Duration(60 * time.Second), RefreshInterval: model.Duration(60 * time.Second),
} }
userAgent = fmt.Sprintf("Prometheus/%s", version.Version) userAgent = "Prometheus/" + version.Version
matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`) matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`)
) )

View file

@ -15,7 +15,6 @@ package ionos
import ( import (
"context" "context"
"fmt"
"log/slog" "log/slog"
"net" "net"
"net/http" "net/http"
@ -77,7 +76,7 @@ func newServerDiscovery(conf *SDConfig, _ *slog.Logger) (*serverDiscovery, error
Transport: rt, Transport: rt,
Timeout: time.Duration(conf.RefreshInterval), Timeout: time.Duration(conf.RefreshInterval),
} }
cfg.UserAgent = fmt.Sprintf("Prometheus/%s", version.Version) cfg.UserAgent = "Prometheus/" + version.Version
d.client = ionoscloud.NewAPIClient(cfg) d.client = ionoscloud.NewAPIClient(cfg)

View file

@ -98,7 +98,7 @@ func mockIONOSServers(w http.ResponseWriter, r *http.Request) {
return return
} }
if r.URL.Path != fmt.Sprintf("%s/datacenters/%s/servers", ionoscloud.DefaultIonosBasePath, ionosTestDatacenterID) { if r.URL.Path != fmt.Sprintf("%s/datacenters/%s/servers", ionoscloud.DefaultIonosBasePath, ionosTestDatacenterID) {
http.Error(w, fmt.Sprintf("bad url: %s", r.URL.Path), http.StatusNotFound) http.Error(w, "bad url: "+r.URL.Path, http.StatusNotFound)
return return
} }
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")

View file

@ -61,7 +61,7 @@ const (
var ( var (
// Http header. // Http header.
userAgent = fmt.Sprintf("Prometheus/%s", version.Version) userAgent = "Prometheus/" + version.Version
// DefaultSDConfig is the default Kubernetes SD configuration. // DefaultSDConfig is the default Kubernetes SD configuration.
DefaultSDConfig = SDConfig{ DefaultSDConfig = SDConfig{
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,

View file

@ -15,7 +15,6 @@ package kubernetes
import ( import (
"context" "context"
"fmt"
"testing" "testing"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -55,7 +54,7 @@ func makeMultiPortService() *v1.Service {
func makeSuffixedService(suffix string) *v1.Service { func makeSuffixedService(suffix string) *v1.Service {
return &v1.Service{ return &v1.Service{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("testservice%s", suffix), Name: "testservice" + suffix,
Namespace: "default", Namespace: "default",
}, },
Spec: v1.ServiceSpec{ Spec: v1.ServiceSpec{

View file

@ -165,7 +165,7 @@ func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.Discove
Timeout: time.Duration(conf.RefreshInterval), Timeout: time.Duration(conf.RefreshInterval),
}, },
) )
client.SetUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)) client.SetUserAgent("Prometheus/" + version.Version)
d.client = &client d.client = &client
d.Discovery = refresh.NewDiscovery( d.Discovery = refresh.NewDiscovery(

View file

@ -15,7 +15,6 @@ package linode
import ( import (
"encoding/json" "encoding/json"
"fmt"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"os" "os"
@ -56,7 +55,7 @@ func (m *SDMock) Setup() {
func (m *SDMock) SetupHandlers() { func (m *SDMock) SetupHandlers() {
for _, handler := range []string{"/v4/account/events", "/v4/linode/instances", "/v4/networking/ips", "/v4/networking/ipv6/ranges"} { for _, handler := range []string{"/v4/account/events", "/v4/linode/instances", "/v4/networking/ips", "/v4/networking/ipv6/ranges"} {
m.Mux.HandleFunc(handler, func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc(handler, func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", tokenID) { if r.Header.Get("Authorization") != "Bearer "+tokenID {
w.WriteHeader(http.StatusUnauthorized) w.WriteHeader(http.StatusUnauthorized)
return return
} }

View file

@ -38,7 +38,7 @@ const (
swarmLabel = model.MetaLabelPrefix + "dockerswarm_" swarmLabel = model.MetaLabelPrefix + "dockerswarm_"
) )
var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) var userAgent = "Prometheus/" + version.Version
// DefaultDockerSwarmSDConfig is the default Docker Swarm SD configuration. // DefaultDockerSwarmSDConfig is the default Docker Swarm SD configuration.
var DefaultDockerSwarmSDConfig = DockerSwarmSDConfig{ var DefaultDockerSwarmSDConfig = DockerSwarmSDConfig{

View file

@ -64,7 +64,7 @@ var (
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
} }
matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`) matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`)
userAgent = fmt.Sprintf("Prometheus/%s", version.Version) userAgent = "Prometheus/" + version.Version
) )
func init() { func init() {

View file

@ -93,7 +93,7 @@ func newBaremetalDiscovery(conf *SDConfig) (*baremetalDiscovery, error) {
Transport: rt, Transport: rt,
Timeout: time.Duration(conf.RefreshInterval), Timeout: time.Duration(conf.RefreshInterval),
}), }),
scw.WithUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)), scw.WithUserAgent("Prometheus/"+version.Version),
scw.WithProfile(profile), scw.WithProfile(profile),
) )
if err != nil { if err != nil {

View file

@ -104,7 +104,7 @@ func newInstanceDiscovery(conf *SDConfig) (*instanceDiscovery, error) {
Transport: rt, Transport: rt,
Timeout: time.Duration(conf.RefreshInterval), Timeout: time.Duration(conf.RefreshInterval),
}), }),
scw.WithUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)), scw.WithUserAgent("Prometheus/"+version.Version),
scw.WithProfile(profile), scw.WithProfile(profile),
) )
if err != nil { if err != nil {

View file

@ -55,7 +55,7 @@ const APIKey = "ABCBTDG35OTGH2UKCC3S6CNMDUPCN3ZWSGFQ"
// HandleInstanceList mocks vultr instances list. // HandleInstanceList mocks vultr instances list.
func (m *SDMock) HandleInstanceList() { func (m *SDMock) HandleInstanceList() {
m.Mux.HandleFunc("/v2/instances", func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc("/v2/instances", func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", APIKey) { if r.Header.Get("Authorization") != "Bearer "+APIKey {
w.WriteHeader(http.StatusForbidden) w.WriteHeader(http.StatusForbidden)
return return
} }

View file

@ -135,7 +135,7 @@ func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.Discove
Timeout: time.Duration(conf.RefreshInterval), Timeout: time.Duration(conf.RefreshInterval),
}) })
d.client.SetUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)) d.client.SetUserAgent("Prometheus/" + version.Version)
if err != nil { if err != nil {
return nil, fmt.Errorf("error setting up vultr agent: %w", err) return nil, fmt.Errorf("error setting up vultr agent: %w", err)

View file

@ -30,7 +30,7 @@ import (
"github.com/prometheus/common/version" "github.com/prometheus/common/version"
) )
var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) var userAgent = "Prometheus/" + version.Version
// ResourceClient exposes the xDS protocol for a single resource type. // ResourceClient exposes the xDS protocol for a single resource type.
// See https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#rest-json-polling-subscriptions . // See https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#rest-json-polling-subscriptions .
@ -140,7 +140,7 @@ func makeXDSResourceHTTPEndpointURL(protocolVersion ProtocolVersion, serverURL *
return nil, errors.New("invalid xDS server URL protocol. must be either 'http' or 'https'") return nil, errors.New("invalid xDS server URL protocol. must be either 'http' or 'https'")
} }
serverURL.Path = path.Join(serverURL.Path, string(protocolVersion), fmt.Sprintf("discovery:%s", resourceType)) serverURL.Path = path.Join(serverURL.Path, string(protocolVersion), "discovery:"+resourceType)
return serverURL, nil return serverURL, nil
} }

View file

@ -56,7 +56,7 @@ const (
alertmanagerLabel = "alertmanager" alertmanagerLabel = "alertmanager"
) )
var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) var userAgent = "Prometheus/" + version.Version
// Alert is a generic representation of an alert in the Prometheus eco-system. // Alert is a generic representation of an alert in the Prometheus eco-system.
type Alert struct { type Alert struct {

View file

@ -571,7 +571,7 @@ func BenchmarkParser(b *testing.B) {
}) })
} }
for _, c := range errCases { for _, c := range errCases {
name := fmt.Sprintf("%s (should fail)", c) name := c + " (should fail)"
b.Run(name, func(b *testing.B) { b.Run(name, func(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {

View file

@ -102,15 +102,15 @@ type (
) )
func (e ErrQueryTimeout) Error() string { func (e ErrQueryTimeout) Error() string {
return fmt.Sprintf("query timed out in %s", string(e)) return "query timed out in " + string(e)
} }
func (e ErrQueryCanceled) Error() string { func (e ErrQueryCanceled) Error() string {
return fmt.Sprintf("query was canceled in %s", string(e)) return "query was canceled in " + string(e)
} }
func (e ErrTooManySamples) Error() string { func (e ErrTooManySamples) Error() string {
return fmt.Sprintf("query processing would load too many samples into memory in %s", string(e)) return "query processing would load too many samples into memory in " + string(e)
} }
func (e ErrStorage) Error() string { func (e ErrStorage) Error() string {

View file

@ -58,7 +58,7 @@ func (e *AggregateExpr) Pretty(level int) string {
s += "(\n" s += "(\n"
if e.Op.IsAggregatorWithParam() { if e.Op.IsAggregatorWithParam() {
s += fmt.Sprintf("%s,\n", e.Param.Pretty(level+1)) s += e.Param.Pretty(level+1) + ",\n"
} }
s += fmt.Sprintf("%s\n%s)", e.Expr.Pretty(level+1), indent(level)) s += fmt.Sprintf("%s\n%s)", e.Expr.Pretty(level+1), indent(level))
return s return s
@ -97,7 +97,7 @@ func (e Expressions) Pretty(level int) string {
// Do not prefix the indent since respective nodes will indent itself. // Do not prefix the indent since respective nodes will indent itself.
s := "" s := ""
for i := range e { for i := range e {
s += fmt.Sprintf("%s,\n", e[i].Pretty(level)) s += e[i].Pretty(level) + ",\n"
} }
return s[:len(s)-2] return s[:len(s)-2]
} }

View file

@ -589,7 +589,7 @@ func (r *AlertingRule) String() string {
byt, err := yaml.Marshal(ar) byt, err := yaml.Marshal(ar)
if err != nil { if err != nil {
return fmt.Sprintf("error marshaling alerting rule: %s", err.Error()) return "error marshaling alerting rule: " + err.Error()
} }
return string(byt) return string(byt)

View file

@ -15,7 +15,6 @@ package rules
import ( import (
"context" "context"
"fmt"
"io/fs" "io/fs"
"math" "math"
"os" "os"
@ -195,7 +194,7 @@ func TestAlertingRule(t *testing.T) {
func TestForStateAddSamples(t *testing.T) { func TestForStateAddSamples(t *testing.T) {
for _, queryOffset := range []time.Duration{0, time.Minute} { for _, queryOffset := range []time.Duration{0, time.Minute} {
t.Run(fmt.Sprintf("queryOffset %s", queryOffset.String()), func(t *testing.T) { t.Run("queryOffset "+queryOffset.String(), func(t *testing.T) {
storage := promqltest.LoadedStorage(t, ` storage := promqltest.LoadedStorage(t, `
load 5m load 5m
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85 http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85
@ -357,7 +356,7 @@ func sortAlerts(items []*Alert) {
func TestForStateRestore(t *testing.T) { func TestForStateRestore(t *testing.T) {
for _, queryOffset := range []time.Duration{0, time.Minute} { for _, queryOffset := range []time.Duration{0, time.Minute} {
t.Run(fmt.Sprintf("queryOffset %s", queryOffset.String()), func(t *testing.T) { t.Run("queryOffset "+queryOffset.String(), func(t *testing.T) {
storage := promqltest.LoadedStorage(t, ` storage := promqltest.LoadedStorage(t, `
load 5m load 5m
http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 50 0 0 25 0 0 40 0 120 http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 50 0 0 25 0 0 40 0 120
@ -842,7 +841,7 @@ func TestUpdate(t *testing.T) {
// Change group rules and reload. // Change group rules and reload.
for i, g := range rgs.Groups { for i, g := range rgs.Groups {
for j, r := range g.Rules { for j, r := range g.Rules {
rgs.Groups[i].Rules[j].Expr.SetString(fmt.Sprintf("%s * 0", r.Expr.Value)) rgs.Groups[i].Rules[j].Expr.SetString(r.Expr.Value + " * 0")
} }
} }
reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs) reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs)

View file

@ -786,7 +786,7 @@ func acceptEncodingHeader(enableCompression bool) string {
return "identity" return "identity"
} }
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version) var UserAgent = "Prometheus/" + version.Version
func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) { func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) {
if s.req == nil { if s.req == nil {

View file

@ -66,7 +66,7 @@ const (
var ( var (
// UserAgent represents Prometheus version to use for user agent header. // UserAgent represents Prometheus version to use for user agent header.
UserAgent = fmt.Sprintf("Prometheus/%s", version.Version) UserAgent = "Prometheus/" + version.Version
remoteWriteContentTypeHeaders = map[config.RemoteWriteProtoMsg]string{ remoteWriteContentTypeHeaders = map[config.RemoteWriteProtoMsg]string{
config.RemoteWriteProtoMsgV1: appProtoContentType, // Also application/x-protobuf;proto=prometheus.WriteRequest but simplified for compatibility with 1.x spec. config.RemoteWriteProtoMsgV1: appProtoContentType, // Also application/x-protobuf;proto=prometheus.WriteRequest but simplified for compatibility with 1.x spec.

View file

@ -184,7 +184,7 @@ func ChunkFromSamplesGeneric(s Samples) (Meta, error) {
return emptyChunk, errors.New("did not expect to start a second chunk") return emptyChunk, errors.New("did not expect to start a second chunk")
} }
default: default:
panic(fmt.Sprintf("unknown sample type %s", sampleType.String())) panic("unknown sample type " + sampleType.String())
} }
} }
return Meta{ return Meta{

View file

@ -1645,7 +1645,7 @@ func TestSparseHistogramSpaceSavings(t *testing.T) {
lbls := labels.FromStrings( lbls := labels.FromStrings(
"__name__", fmt.Sprintf("rpc_durations_%d_histogram_seconds", i), "__name__", fmt.Sprintf("rpc_durations_%d_histogram_seconds", i),
"instance", "localhost:8080", "instance", "localhost:8080",
"job", fmt.Sprintf("sparse_histogram_schema_%s", schemaDescription[sid]), "job", "sparse_histogram_schema_"+schemaDescription[sid],
) )
allSparseSeries = append(allSparseSeries, struct { allSparseSeries = append(allSparseSeries, struct {
baseLabels labels.Labels baseLabels labels.Labels

View file

@ -5673,7 +5673,7 @@ func testQuerierOOOQuery(t *testing.T,
}, },
} }
for _, tc := range tests { for _, tc := range tests {
t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) { t.Run("name="+tc.name, func(t *testing.T) {
opts.OutOfOrderCapMax = tc.oooCap opts.OutOfOrderCapMax = tc.oooCap
db := openTestDB(t, opts, nil) db := openTestDB(t, opts, nil)
db.DisableCompactions() db.DisableCompactions()
@ -6004,7 +6004,7 @@ func testChunkQuerierOOOQuery(t *testing.T,
}, },
} }
for _, tc := range tests { for _, tc := range tests {
t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) { t.Run("name="+tc.name, func(t *testing.T) {
opts.OutOfOrderCapMax = tc.oooCap opts.OutOfOrderCapMax = tc.oooCap
db := openTestDB(t, opts, nil) db := openTestDB(t, opts, nil)
db.DisableCompactions() db.DisableCompactions()
@ -6187,7 +6187,7 @@ func testOOONativeHistogramsWithCounterResets(t *testing.T, scenario sampleTypeS
}, },
} }
for _, tc := range tests { for _, tc := range tests {
t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) { t.Run("name="+tc.name, func(t *testing.T) {
db := openTestDB(t, opts, nil) db := openTestDB(t, opts, nil)
db.DisableCompactions() db.DisableCompactions()
db.EnableOOONativeHistograms() db.EnableOOONativeHistograms()

View file

@ -839,7 +839,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
} }
for _, tc := range tests { for _, tc := range tests {
t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) { t.Run("name="+tc.name, func(t *testing.T) {
db := newTestDBWithOpts(t, opts) db := newTestDBWithOpts(t, opts)
app := db.Appender(context.Background()) app := db.Appender(context.Background())
@ -1010,7 +1010,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
} }
for _, tc := range tests { for _, tc := range tests {
t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) { t.Run("name="+tc.name, func(t *testing.T) {
db := newTestDBWithOpts(t, opts) db := newTestDBWithOpts(t, opts)
app := db.Appender(context.Background()) app := db.Appender(context.Background())
@ -1117,7 +1117,7 @@ func TestSortMetaByMinTimeAndMinRef(t *testing.T) {
} }
for _, tc := range tests { for _, tc := range tests {
t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) { t.Run("name="+tc.name, func(t *testing.T) {
slices.SortFunc(tc.inputMetas, lessByMinTimeAndMinRef) slices.SortFunc(tc.inputMetas, lessByMinTimeAndMinRef)
require.Equal(t, tc.expMetas, tc.inputMetas) require.Equal(t, tc.expMetas, tc.inputMetas)
}) })

View file

@ -82,7 +82,7 @@ func createFlagRow(flag *kingpin.FlagModel) []string {
} }
if valueType.Kind() == reflect.Struct { if valueType.Kind() == reflect.Struct {
if _, found := valueType.FieldByName("slice"); found { if _, found := valueType.FieldByName("slice"); found {
name = fmt.Sprintf(`%s <code class="text-nowrap">...<code class="text-nowrap">`, name) name += " <code class=\"text-nowrap\">...<code class=\"text-nowrap\">"
} }
} }
@ -105,7 +105,7 @@ func writeFlagTable(writer io.Writer, level int, fgm *kingpin.FlagGroupModel) er
} }
} }
return writeTable(writer, rows, fmt.Sprintf("%s Flags", strings.Repeat("#", level+2))) return writeTable(writer, rows, strings.Repeat("#", level+2)+" Flags")
} }
func createArgRow(arg *kingpin.ArgModel) []string { func createArgRow(arg *kingpin.ArgModel) []string {
@ -136,7 +136,7 @@ func writeArgTable(writer io.Writer, level int, agm *kingpin.ArgGroupModel) erro
rows = append(rows, row) rows = append(rows, row)
} }
return writeTable(writer, rows, fmt.Sprintf("%s Arguments", strings.Repeat("#", level+2))) return writeTable(writer, rows, strings.Repeat("#", level+2)+" Arguments")
} }
func createCmdRow(cmd *kingpin.CmdModel) []string { func createCmdRow(cmd *kingpin.CmdModel) []string {

View file

@ -3464,7 +3464,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
r.RemoteAddr = "127.0.0.1:20201" r.RemoteAddr = "127.0.0.1:20201"
return r, err return r, err
} }
r, err := http.NewRequest(m, fmt.Sprintf("http://example.com?%s", q.Encode()), nil) r, err := http.NewRequest(m, "http://example.com?"+q.Encode(), nil)
r.RemoteAddr = "127.0.0.1:20201" r.RemoteAddr = "127.0.0.1:20201"
return r, err return r, err
} }
@ -3773,7 +3773,7 @@ func TestAdminEndpoints(t *testing.T) {
} }
endpoint := tc.endpoint(api) endpoint := tc.endpoint(api)
req, err := http.NewRequest(tc.method, fmt.Sprintf("?%s", tc.values.Encode()), nil) req, err := http.NewRequest(tc.method, "?"+tc.values.Encode(), nil)
require.NoError(t, err) require.NoError(t, err)
res := setUnavailStatusOnTSDBNotReady(endpoint(req)) res := setUnavailStatusOnTSDBNotReady(endpoint(req))
@ -4149,7 +4149,7 @@ func TestTSDBStatus(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
api := &API{db: tc.db, gatherer: prometheus.DefaultGatherer} api := &API{db: tc.db, gatherer: prometheus.DefaultGatherer}
endpoint := tc.endpoint(api) endpoint := tc.endpoint(api)
req, err := http.NewRequest(tc.method, fmt.Sprintf("?%s", tc.values.Encode()), nil) req, err := http.NewRequest(tc.method, "?"+tc.values.Encode(), nil)
require.NoError(t, err, "Error when creating test request") require.NoError(t, err, "Error when creating test request")
res := endpoint(req) res := endpoint(req)
assertAPIError(t, res.err, tc.errType) assertAPIError(t, res.err, tc.errType)
@ -4479,7 +4479,7 @@ func TestQueryTimeout(t *testing.T) {
"timeout": []string{"1s"}, "timeout": []string{"1s"},
} }
ctx := context.Background() ctx := context.Background()
req, err := http.NewRequest(tc.method, fmt.Sprintf("http://example.com?%s", query.Encode()), nil) req, err := http.NewRequest(tc.method, "http://example.com?"+query.Encode(), nil)
require.NoError(t, err) require.NoError(t, err)
req.RemoteAddr = "127.0.0.1:20201" req.RemoteAddr = "127.0.0.1:20201"