Merge pull request #14091 from alexandear/enable-perfsprint-linter

Enable perfsprint linter and fix up code issues
This commit is contained in:
Björn Rabenstein 2024-05-15 17:43:43 +02:00 committed by GitHub
commit 806073ad63
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
48 changed files with 126 additions and 103 deletions

View file

@ -21,6 +21,7 @@ linters:
- goimports
- misspell
- nolintlint
- perfsprint
- predeclared
- revive
- testifylint
@ -44,7 +45,9 @@ issues:
- linters:
- godot
source: "^// ==="
- linters:
- perfsprint
text: "fmt.Sprintf can be replaced with string addition"
linters-settings:
depguard:
rules:
@ -85,6 +88,9 @@ linters-settings:
local-prefixes: github.com/prometheus/prometheus
gofumpt:
extra-rules: true
perfsprint:
# Optimizes `fmt.Errorf`.
errorf: false
revive:
# By default, revive will enable only the linting rules that are named in the configuration file.
# So, it's needed to explicitly set in configuration all required rules.

View file

@ -24,6 +24,7 @@ import (
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"testing"
@ -189,7 +190,7 @@ func TestSendAlerts(t *testing.T) {
for i, tc := range testCases {
tc := tc
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
senderFunc := senderFunc(func(alerts ...*notifier.Alert) {
require.NotEmpty(t, tc.in, "sender called with 0 alert")
require.Equal(t, tc.exp, alerts)

View file

@ -25,6 +25,7 @@ import (
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"testing"
@ -410,7 +411,7 @@ func TestExitCodes(t *testing.T) {
} {
t.Run(c.file, func(t *testing.T) {
for _, lintFatal := range []bool{true, false} {
t.Run(fmt.Sprintf("%t", lintFatal), func(t *testing.T) {
t.Run(strconv.FormatBool(lintFatal), func(t *testing.T) {
args := []string{"-test.main", "check", "config", "testdata/" + c.file}
if lintFatal {
args = append(args, "--lint-fatal")

View file

@ -856,9 +856,9 @@ func displayHistogram(dataType string, datas []int, total int) {
}
avg := sum / len(datas)
fmt.Printf("%s (min/avg/max): %d/%d/%d\n", dataType, datas[0], avg, datas[len(datas)-1])
maxLeftLen := strconv.Itoa(len(fmt.Sprintf("%d", end)))
maxRightLen := strconv.Itoa(len(fmt.Sprintf("%d", end+step)))
maxCountLen := strconv.Itoa(len(fmt.Sprintf("%d", maxCount)))
maxLeftLen := strconv.Itoa(len(strconv.Itoa(end)))
maxRightLen := strconv.Itoa(len(strconv.Itoa(end + step)))
maxCountLen := strconv.Itoa(len(strconv.Itoa(maxCount)))
for bucket, count := range buckets {
percentage := 100.0 * count / total
fmt.Printf("[%"+maxLeftLen+"d, %"+maxRightLen+"d]: %"+maxCountLen+"d %s\n", bucket*step+start+1, (bucket+1)*step+start, count, strings.Repeat("#", percentage))

View file

@ -573,7 +573,7 @@ func (la labelsAndAnnotations) String() string {
}
s := "[\n0:" + indentLines("\n"+la[0].String(), " ")
for i, l := range la[1:] {
s += ",\n" + fmt.Sprintf("%d", i+1) + ":" + indentLines("\n"+l.String(), " ")
s += ",\n" + strconv.Itoa(i+1) + ":" + indentLines("\n"+l.String(), " ")
}
s += "\n]"

View file

@ -18,6 +18,7 @@ import (
"errors"
"fmt"
"net"
"strconv"
"strings"
"time"
@ -279,7 +280,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
if inst.PrivateDnsName != nil {
labels[ec2LabelPrivateDNS] = model.LabelValue(*inst.PrivateDnsName)
}
addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port))
addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port))
labels[model.AddressLabel] = model.LabelValue(addr)
if inst.Platform != nil {

View file

@ -18,6 +18,7 @@ import (
"errors"
"fmt"
"net"
"strconv"
"strings"
"time"
@ -229,7 +230,7 @@ func (d *LightsailDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
lightsailLabelRegion: model.LabelValue(d.cfg.Region),
}
addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port))
addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port))
labels[model.AddressLabel] = model.LabelValue(addr)
if inst.PublicIpAddress != nil {

View file

@ -20,6 +20,7 @@ import (
"math/rand"
"net"
"net/http"
"strconv"
"strings"
"sync"
"time"
@ -492,7 +493,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM
}
if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil {
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress)
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", d.port))
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, strconv.Itoa(d.port))
labels[model.AddressLabel] = model.LabelValue(address)
return labels, nil
}

View file

@ -539,9 +539,9 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
// since the service may be registered remotely through a different node.
var addr string
if serviceNode.Service.Address != "" {
addr = net.JoinHostPort(serviceNode.Service.Address, fmt.Sprintf("%d", serviceNode.Service.Port))
addr = net.JoinHostPort(serviceNode.Service.Address, strconv.Itoa(serviceNode.Service.Port))
} else {
addr = net.JoinHostPort(serviceNode.Node.Address, fmt.Sprintf("%d", serviceNode.Service.Port))
addr = net.JoinHostPort(serviceNode.Node.Address, strconv.Itoa(serviceNode.Service.Port))
}
labels := model.LabelSet{

View file

@ -177,7 +177,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
}
labels := model.LabelSet{
doLabelID: model.LabelValue(fmt.Sprintf("%d", droplet.ID)),
doLabelID: model.LabelValue(strconv.Itoa(droplet.ID)),
doLabelName: model.LabelValue(droplet.Name),
doLabelImage: model.LabelValue(droplet.Image.Slug),
doLabelImageName: model.LabelValue(droplet.Image.Name),

View file

@ -18,6 +18,7 @@ import (
"errors"
"fmt"
"net"
"strconv"
"strings"
"sync"
"time"
@ -200,7 +201,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
tg := &targetgroup.Group{}
hostPort := func(a string, p int) model.LabelValue {
return model.LabelValue(net.JoinHostPort(a, fmt.Sprintf("%d", p)))
return model.LabelValue(net.JoinHostPort(a, strconv.Itoa(p)))
}
for _, record := range response.Answer {
@ -209,7 +210,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
switch addr := record.(type) {
case *dns.SRV:
dnsSrvRecordTarget = model.LabelValue(addr.Target)
dnsSrvRecordPort = model.LabelValue(fmt.Sprintf("%d", addr.Port))
dnsSrvRecordPort = model.LabelValue(strconv.Itoa(int(addr.Port)))
// Remove the final dot from rooted DNS names to make them look more usual.
addr.Target = strings.TrimRight(addr.Target, ".")

View file

@ -15,7 +15,6 @@ package hetzner
import (
"context"
"fmt"
"net"
"net/http"
"strconv"
@ -92,7 +91,7 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
for i, server := range servers {
labels := model.LabelSet{
hetznerLabelRole: model.LabelValue(HetznerRoleHcloud),
hetznerLabelServerID: model.LabelValue(fmt.Sprintf("%d", server.ID)),
hetznerLabelServerID: model.LabelValue(strconv.FormatInt(server.ID, 10)),
hetznerLabelServerName: model.LabelValue(server.Name),
hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name),
hetznerLabelPublicIPv4: model.LabelValue(server.PublicNet.IPv4.IP.String()),
@ -102,10 +101,10 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
hetznerLabelHcloudDatacenterLocation: model.LabelValue(server.Datacenter.Location.Name),
hetznerLabelHcloudDatacenterLocationNetworkZone: model.LabelValue(server.Datacenter.Location.NetworkZone),
hetznerLabelHcloudType: model.LabelValue(server.ServerType.Name),
hetznerLabelHcloudCPUCores: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Cores)),
hetznerLabelHcloudCPUCores: model.LabelValue(strconv.Itoa(server.ServerType.Cores)),
hetznerLabelHcloudCPUType: model.LabelValue(server.ServerType.CPUType),
hetznerLabelHcloudMemoryGB: model.LabelValue(fmt.Sprintf("%d", int(server.ServerType.Memory))),
hetznerLabelHcloudDiskGB: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Disk)),
hetznerLabelHcloudMemoryGB: model.LabelValue(strconv.Itoa(int(server.ServerType.Memory))),
hetznerLabelHcloudDiskGB: model.LabelValue(strconv.Itoa(server.ServerType.Disk)),
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.PublicNet.IPv4.IP.String(), strconv.FormatUint(uint64(d.port), 10))),
}

View file

@ -112,7 +112,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
hetznerLabelPublicIPv4: model.LabelValue(server.Server.ServerIP),
hetznerLabelServerStatus: model.LabelValue(server.Server.Status),
hetznerLabelRobotProduct: model.LabelValue(server.Server.Product),
hetznerLabelRobotCancelled: model.LabelValue(fmt.Sprintf("%t", server.Server.Canceled)),
hetznerLabelRobotCancelled: model.LabelValue(strconv.FormatBool(server.Server.Canceled)),
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.Server.ServerIP, strconv.FormatUint(uint64(d.port), 10))),
}

View file

@ -720,7 +720,7 @@ func staticConfig(addrs ...string) discovery.StaticConfig {
var cfg discovery.StaticConfig
for i, addr := range addrs {
cfg = append(cfg, &targetgroup.Group{
Source: fmt.Sprint(i),
Source: strconv.Itoa(i),
Targets: []model.LabelSet{
{model.AddressLabel: model.LabelValue(addr)},
},

View file

@ -325,7 +325,7 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
}
labels := model.LabelSet{
linodeLabelID: model.LabelValue(fmt.Sprintf("%d", instance.ID)),
linodeLabelID: model.LabelValue(strconv.Itoa(instance.ID)),
linodeLabelName: model.LabelValue(instance.Label),
linodeLabelImage: model.LabelValue(instance.Image),
linodeLabelPrivateIPv4: model.LabelValue(privateIPv4),
@ -338,13 +338,13 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
linodeLabelType: model.LabelValue(instance.Type),
linodeLabelStatus: model.LabelValue(instance.Status),
linodeLabelGroup: model.LabelValue(instance.Group),
linodeLabelGPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.GPUs)),
linodeLabelGPUs: model.LabelValue(strconv.Itoa(instance.Specs.GPUs)),
linodeLabelHypervisor: model.LabelValue(instance.Hypervisor),
linodeLabelBackups: model.LabelValue(backupsStatus),
linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Disk)<<20)),
linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Memory)<<20)),
linodeLabelSpecsVCPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.VCPUs)),
linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Transfer)<<20)),
linodeLabelSpecsDiskBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Disk)<<20, 10)),
linodeLabelSpecsMemoryBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Memory)<<20, 10)),
linodeLabelSpecsVCPUs: model.LabelValue(strconv.Itoa(instance.Specs.VCPUs)),
linodeLabelSpecsTransferBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Transfer)<<20, 10)),
}
addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10))

View file

@ -720,7 +720,7 @@ func staticConfig(addrs ...string) StaticConfig {
var cfg StaticConfig
for i, addr := range addrs {
cfg = append(cfg, &targetgroup.Group{
Source: fmt.Sprint(i),
Source: strconv.Itoa(i),
Targets: []model.LabelSet{
{model.AddressLabel: model.LabelValue(addr)},
},

View file

@ -505,7 +505,7 @@ func targetEndpoint(task *task, port uint32, containerNet bool) string {
host = task.Host
}
return net.JoinHostPort(host, fmt.Sprintf("%d", port))
return net.JoinHostPort(host, strconv.Itoa(int(port)))
}
// Get a list of ports and a list of labels from a PortMapping.

View file

@ -15,7 +15,7 @@ package moby
import (
"context"
"fmt"
"strconv"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
@ -44,8 +44,8 @@ func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix s
labelPrefix + labelNetworkID: network.ID,
labelPrefix + labelNetworkName: network.Name,
labelPrefix + labelNetworkScope: network.Scope,
labelPrefix + labelNetworkInternal: fmt.Sprintf("%t", network.Internal),
labelPrefix + labelNetworkIngress: fmt.Sprintf("%t", network.Ingress),
labelPrefix + labelNetworkInternal: strconv.FormatBool(network.Internal),
labelPrefix + labelNetworkIngress: strconv.FormatBool(network.Ingress),
}
for k, v := range network.Labels {
ln := strutil.SanitizeLabelName(k)

View file

@ -66,7 +66,7 @@ func (d *Discovery) refreshNodes(ctx context.Context) ([]*targetgroup.Group, err
swarmLabelNodeAddress: model.LabelValue(n.Status.Addr),
}
if n.ManagerStatus != nil {
labels[swarmLabelNodeManagerLeader] = model.LabelValue(fmt.Sprintf("%t", n.ManagerStatus.Leader))
labels[swarmLabelNodeManagerLeader] = model.LabelValue(strconv.FormatBool(n.ManagerStatus.Leader))
labels[swarmLabelNodeManagerReachability] = model.LabelValue(n.ManagerStatus.Reachability)
labels[swarmLabelNodeManagerAddr] = model.LabelValue(n.ManagerStatus.Addr)
}

View file

@ -116,7 +116,7 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group,
labels[model.LabelName(k)] = model.LabelValue(v)
}
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port))
addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)

View file

@ -150,7 +150,7 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err
labels[model.LabelName(k)] = model.LabelValue(v)
}
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port))
addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)

View file

@ -17,6 +17,7 @@ import (
"context"
"fmt"
"net"
"strconv"
"github.com/go-kit/log"
"github.com/gophercloud/gophercloud"
@ -72,7 +73,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group
}
tg := &targetgroup.Group{
Source: fmt.Sprintf("OS_" + h.region),
Source: "OS_" + h.region,
}
// OpenStack API reference
// https://developer.openstack.org/api-ref/compute/#list-hypervisors-details
@ -84,7 +85,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group
}
for _, hypervisor := range hypervisorList {
labels := model.LabelSet{}
addr := net.JoinHostPort(hypervisor.HostIP, fmt.Sprintf("%d", h.port))
addr := net.JoinHostPort(hypervisor.HostIP, strconv.Itoa(h.port))
labels[model.AddressLabel] = model.LabelValue(addr)
labels[openstackLabelHypervisorID] = model.LabelValue(hypervisor.ID)
labels[openstackLabelHypervisorHostName] = model.LabelValue(hypervisor.HypervisorHostname)

View file

@ -17,6 +17,7 @@ import (
"context"
"fmt"
"net"
"strconv"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
@ -120,7 +121,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
}
pager := servers.List(client, opts)
tg := &targetgroup.Group{
Source: fmt.Sprintf("OS_" + i.region),
Source: "OS_" + i.region,
}
err = pager.EachPage(func(page pagination.Page) (bool, error) {
if ctx.Err() != nil {
@ -194,7 +195,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
if val, ok := floatingIPList[floatingIPKey{id: s.ID, fixed: addr}]; ok {
lbls[openstackLabelPublicIP] = model.LabelValue(val)
}
addr = net.JoinHostPort(addr, fmt.Sprintf("%d", i.port))
addr = net.JoinHostPort(addr, strconv.Itoa(i.port))
lbls[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, lbls)

View file

@ -144,12 +144,12 @@ func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Grou
model.InstanceLabel: model.LabelValue(server.Name),
dedicatedServerLabelPrefix + "state": model.LabelValue(server.State),
dedicatedServerLabelPrefix + "commercial_range": model.LabelValue(server.CommercialRange),
dedicatedServerLabelPrefix + "link_speed": model.LabelValue(fmt.Sprintf("%d", server.LinkSpeed)),
dedicatedServerLabelPrefix + "link_speed": model.LabelValue(strconv.Itoa(server.LinkSpeed)),
dedicatedServerLabelPrefix + "rack": model.LabelValue(server.Rack),
dedicatedServerLabelPrefix + "no_intervention": model.LabelValue(strconv.FormatBool(server.NoIntervention)),
dedicatedServerLabelPrefix + "os": model.LabelValue(server.Os),
dedicatedServerLabelPrefix + "support_level": model.LabelValue(server.SupportLevel),
dedicatedServerLabelPrefix + "server_id": model.LabelValue(fmt.Sprintf("%d", server.ServerID)),
dedicatedServerLabelPrefix + "server_id": model.LabelValue(strconv.FormatInt(server.ServerID, 10)),
dedicatedServerLabelPrefix + "reverse": model.LabelValue(server.Reverse),
dedicatedServerLabelPrefix + "datacenter": model.LabelValue(server.Datacenter),
dedicatedServerLabelPrefix + "name": model.LabelValue(server.Name),

View file

@ -19,6 +19,7 @@ import (
"net/netip"
"net/url"
"path"
"strconv"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
@ -161,21 +162,21 @@ func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
model.InstanceLabel: model.LabelValue(server.Name),
vpsLabelPrefix + "offer": model.LabelValue(server.Model.Offer),
vpsLabelPrefix + "datacenter": model.LabelValue(fmt.Sprintf("%+v", server.Model.Datacenter)),
vpsLabelPrefix + "model_vcore": model.LabelValue(fmt.Sprintf("%d", server.Model.Vcore)),
vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(fmt.Sprintf("%d", server.Model.MaximumAdditionalIP)),
vpsLabelPrefix + "model_vcore": model.LabelValue(strconv.Itoa(server.Model.Vcore)),
vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(strconv.Itoa(server.Model.MaximumAdditionalIP)),
vpsLabelPrefix + "version": model.LabelValue(server.Model.Version),
vpsLabelPrefix + "model_name": model.LabelValue(server.Model.Name),
vpsLabelPrefix + "disk": model.LabelValue(fmt.Sprintf("%d", server.Model.Disk)),
vpsLabelPrefix + "memory": model.LabelValue(fmt.Sprintf("%d", server.Model.Memory)),
vpsLabelPrefix + "disk": model.LabelValue(strconv.Itoa(server.Model.Disk)),
vpsLabelPrefix + "memory": model.LabelValue(strconv.Itoa(server.Model.Memory)),
vpsLabelPrefix + "zone": model.LabelValue(server.Zone),
vpsLabelPrefix + "display_name": model.LabelValue(server.DisplayName),
vpsLabelPrefix + "cluster": model.LabelValue(server.Cluster),
vpsLabelPrefix + "state": model.LabelValue(server.State),
vpsLabelPrefix + "name": model.LabelValue(server.Name),
vpsLabelPrefix + "netboot_mode": model.LabelValue(server.NetbootMode),
vpsLabelPrefix + "memory_limit": model.LabelValue(fmt.Sprintf("%d", server.MemoryLimit)),
vpsLabelPrefix + "memory_limit": model.LabelValue(strconv.Itoa(server.MemoryLimit)),
vpsLabelPrefix + "offer_type": model.LabelValue(server.OfferType),
vpsLabelPrefix + "vcore": model.LabelValue(fmt.Sprintf("%d", server.Vcore)),
vpsLabelPrefix + "vcore": model.LabelValue(strconv.Itoa(server.Vcore)),
vpsLabelPrefix + "ipv4": model.LabelValue(ipv4),
vpsLabelPrefix + "ipv6": model.LabelValue(ipv6),
}

View file

@ -237,7 +237,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
pdbLabelResource: model.LabelValue(resource.Resource),
pdbLabelType: model.LabelValue(resource.Type),
pdbLabelTitle: model.LabelValue(resource.Title),
pdbLabelExported: model.LabelValue(fmt.Sprintf("%t", resource.Exported)),
pdbLabelExported: model.LabelValue(strconv.FormatBool(resource.Exported)),
pdbLabelFile: model.LabelValue(resource.File),
pdbLabelEnvironment: model.LabelValue(resource.Environment),
}

View file

@ -20,6 +20,7 @@ import (
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
@ -269,7 +270,7 @@ func (d *Discovery) getEndpointLabels(
model.AddressLabel: model.LabelValue(addr),
uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname),
uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN),
uyuniLablelSystemID: model.LabelValue(fmt.Sprintf("%d", endpoint.SystemID)),
uyuniLablelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)),
uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)),
uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName),
uyuniLablelExporter: model.LabelValue(endpoint.ExporterName),

View file

@ -280,17 +280,17 @@ func parseServersetMember(data []byte, path string) (model.LabelSet, error) {
labels := model.LabelSet{}
labels[serversetPathLabel] = model.LabelValue(path)
labels[model.AddressLabel] = model.LabelValue(
net.JoinHostPort(member.ServiceEndpoint.Host, fmt.Sprintf("%d", member.ServiceEndpoint.Port)))
net.JoinHostPort(member.ServiceEndpoint.Host, strconv.Itoa(member.ServiceEndpoint.Port)))
labels[serversetEndpointLabelPrefix+"_host"] = model.LabelValue(member.ServiceEndpoint.Host)
labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.ServiceEndpoint.Port))
labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.ServiceEndpoint.Port))
for name, endpoint := range member.AdditionalEndpoints {
cleanName := model.LabelName(strutil.SanitizeLabelName(name))
labels[serversetEndpointLabelPrefix+"_host_"+cleanName] = model.LabelValue(
endpoint.Host)
labels[serversetEndpointLabelPrefix+"_port_"+cleanName] = model.LabelValue(
fmt.Sprintf("%d", endpoint.Port))
strconv.Itoa(endpoint.Port))
}
labels[serversetStatusLabel] = model.LabelValue(member.Status)
@ -321,10 +321,10 @@ func parseNerveMember(data []byte, path string) (model.LabelSet, error) {
labels := model.LabelSet{}
labels[nervePathLabel] = model.LabelValue(path)
labels[model.AddressLabel] = model.LabelValue(
net.JoinHostPort(member.Host, fmt.Sprintf("%d", member.Port)))
net.JoinHostPort(member.Host, strconv.Itoa(member.Port)))
labels[nerveEndpointLabelPrefix+"_host"] = model.LabelValue(member.Host)
labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.Port))
labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.Port))
labels[nerveEndpointLabelPrefix+"_name"] = model.LabelValue(member.Name)
return labels, nil

View file

@ -127,9 +127,9 @@ func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*target
// since the service may be registered remotely through a different node.
var addr string
if node.ServiceAddress != "" {
addr = net.JoinHostPort(node.ServiceAddress, fmt.Sprintf("%d", node.ServicePort))
addr = net.JoinHostPort(node.ServiceAddress, strconv.Itoa(node.ServicePort))
} else {
addr = net.JoinHostPort(node.Address, fmt.Sprintf("%d", node.ServicePort))
addr = net.JoinHostPort(node.Address, strconv.Itoa(node.ServicePort))
}
target := model.LabelSet{model.AddressLabel: model.LabelValue(addr)}

View file

@ -14,9 +14,9 @@
package histogram
import (
"fmt"
"math"
"math/rand"
"strconv"
"testing"
"github.com/stretchr/testify/require"
@ -2134,7 +2134,7 @@ func TestAllFloatBucketIterator(t *testing.T) {
}
for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
var expBuckets, actBuckets []Bucket[float64]
if c.includeNeg {
@ -2360,7 +2360,7 @@ func TestAllReverseFloatBucketIterator(t *testing.T) {
}
for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
var expBuckets, actBuckets []Bucket[float64]
if c.includePos {

View file

@ -14,8 +14,8 @@
package histogram
import (
"fmt"
"math"
"strconv"
"testing"
"github.com/stretchr/testify/require"
@ -72,7 +72,7 @@ func TestHistogramString(t *testing.T) {
}
for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
actualString := c.histogram.String()
require.Equal(t, c.expectedString, actualString)
})
@ -211,7 +211,7 @@ func TestCumulativeBucketIterator(t *testing.T) {
}
for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
it := c.histogram.CumulativeBucketIterator()
actualBuckets := make([]Bucket[uint64], 0, len(c.expectedBuckets))
for it.Next() {
@ -371,7 +371,7 @@ func TestRegularBucketIterator(t *testing.T) {
}
for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
it := c.histogram.PositiveBucketIterator()
actualPositiveBuckets := make([]Bucket[uint64], 0, len(c.expectedPositiveBuckets))
for it.Next() {

View file

@ -17,6 +17,7 @@ import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"testing"
@ -732,7 +733,7 @@ func TestScratchBuilder(t *testing.T) {
want: FromStrings("ddd", "444"),
},
} {
t.Run(fmt.Sprint(i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
b := NewScratchBuilder(len(tcase.add))
for _, lbl := range tcase.add {
b.Add(lbl.Name, lbl.Value)

View file

@ -14,7 +14,7 @@
package relabel
import (
"fmt"
"strconv"
"testing"
"github.com/prometheus/common/model"
@ -657,7 +657,7 @@ func TestRelabelValidate(t *testing.T) {
},
}
for i, test := range tests {
t.Run(fmt.Sprint(i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
err := test.config.Validate()
if test.expected == "" {
require.NoError(t, err)

View file

@ -74,7 +74,7 @@ func TestHandlerNextBatch(t *testing.T) {
for i := range make([]struct{}, 2*maxBatchSize+1) {
h.queue = append(h.queue, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
})
}
@ -186,10 +186,10 @@ func TestHandlerSendAll(t *testing.T) {
for i := range make([]struct{}, maxBatchSize) {
h.queue = append(h.queue, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
})
expected = append(expected, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
})
}
@ -297,23 +297,23 @@ func TestHandlerSendAllRemapPerAm(t *testing.T) {
for i := range make([]struct{}, maxBatchSize/2) {
h.queue = append(h.queue,
&Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
},
&Alert{
Labels: labels.FromStrings("alertname", "test", "alertnamedrop", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", "test", "alertnamedrop", strconv.Itoa(i)),
},
)
expected1 = append(expected1,
&Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}, &Alert{
Labels: labels.FromStrings("alertname", "test", "alertnamedrop", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", "test", "alertnamedrop", strconv.Itoa(i)),
},
)
expected2 = append(expected2, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
})
}
@ -502,7 +502,7 @@ func TestHandlerQueuing(t *testing.T) {
var alerts []*Alert
for i := range make([]struct{}, 20*maxBatchSize) {
alerts = append(alerts, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
})
}
@ -762,7 +762,7 @@ func TestHangingNotifier(t *testing.T) {
var alerts []*Alert
for i := range make([]struct{}, 20*maxBatchSize) {
alerts = append(alerts, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
})
}

View file

@ -20,6 +20,7 @@ import (
"math"
"os"
"sort"
"strconv"
"testing"
"time"
@ -4406,7 +4407,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) {
_, err := app.Append(0, labels.FromStrings("__name__", "float_series", "idx", "0"), ts, 42)
require.NoError(t, err)
for idx1, h := range c.histograms {
lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1))
lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1))
// Since we mutate h later, we need to create a copy here.
var err error
if floatHisto {
@ -4678,7 +4679,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) {
ts := idx0 * int64(10*time.Minute/time.Millisecond)
app := storage.Appender(context.Background())
for idx1, h := range c.histograms {
lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1))
lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1))
// Since we mutate h later, we need to create a copy here.
var err error
if floatHisto {

View file

@ -19,6 +19,7 @@ import (
"math"
"os"
"sort"
"strconv"
"sync"
"testing"
"time"
@ -1361,7 +1362,7 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) {
ts := time.Now()
app := db.Appender(context.Background())
for i, h := range hists {
l := labels.FromStrings("__name__", "histogram_metric", "idx", fmt.Sprintf("%d", i))
l := labels.FromStrings("__name__", "histogram_metric", "idx", strconv.Itoa(i))
_, err := app.AppendHistogram(0, l, ts.UnixMilli(), h.Copy(), nil)
require.NoError(t, err)
}

View file

@ -1285,7 +1285,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
for i := 0; i < 500; i++ {
s = fmt.Sprintf("%smetric_%d_%d 42\n", s, i, numScrapes)
}
w.Write([]byte(fmt.Sprintf(s + "&")))
w.Write([]byte(s + "&"))
} else {
cancel()
}

View file

@ -21,6 +21,7 @@ import (
"net/http/httptest"
"net/url"
"os"
"strconv"
"strings"
"testing"
"time"
@ -67,7 +68,7 @@ func TestTargetOffset(t *testing.T) {
// Calculate offsets for 10000 different targets.
for i := range offsets {
target := newTestTarget("example.com:80", 0, labels.FromStrings(
"label", fmt.Sprintf("%d", i),
"label", strconv.Itoa(i),
))
offsets[i] = target.offset(interval, offsetSeed)
}

View file

@ -468,7 +468,7 @@ func TestReleaseNoninternedString(t *testing.T) {
m.StoreSeries([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(i),
Labels: labels.FromStrings("asdf", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("asdf", strconv.Itoa(i)),
},
}, 0)
m.SeriesReset(1)

View file

@ -14,8 +14,8 @@
package agent
import (
"fmt"
"math"
"strconv"
"sync"
"testing"
"time"
@ -53,7 +53,7 @@ func TestNoDeadlock(t *testing.T) {
series := &memSeries{
ref: chunks.HeadSeriesRef(i),
lset: labels.FromMap(map[string]string{
"id": fmt.Sprintf("%d", i),
"id": strconv.Itoa(i),
}),
}
stripeSeries.Set(series.lset.Hash(), series)

View file

@ -22,6 +22,7 @@ import (
"os"
"path"
"path/filepath"
"strconv"
"sync"
"testing"
"time"
@ -1129,7 +1130,7 @@ func BenchmarkCompactionFromHead(b *testing.B) {
for ln := 0; ln < labelNames; ln++ {
app := h.Appender(context.Background())
for lv := 0; lv < labelValues; lv++ {
app.Append(0, labels.FromStrings(fmt.Sprintf("%d", ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)), 0, 0)
app.Append(0, labels.FromStrings(strconv.Itoa(ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)), 0, 0)
}
require.NoError(b, app.Commit())
}
@ -1161,7 +1162,7 @@ func BenchmarkCompactionFromOOOHead(b *testing.B) {
for ln := 0; ln < labelNames; ln++ {
app := h.Appender(context.Background())
for lv := 0; lv < labelValues; lv++ {
lbls := labels.FromStrings(fmt.Sprintf("%d", ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln))
lbls := labels.FromStrings(strconv.Itoa(ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln))
_, err = app.Append(0, lbls, int64(totalSamples), 0)
require.NoError(b, err)
for ts := 0; ts < totalSamples; ts++ {

View file

@ -1065,7 +1065,7 @@ func TestWALSegmentSizeOptions(t *testing.T) {
for i := int64(0); i < 155; i++ {
app := db.Appender(context.Background())
ref, err := app.Append(0, labels.FromStrings("wal"+fmt.Sprintf("%d", i), "size"), i, rand.Float64())
ref, err := app.Append(0, labels.FromStrings("wal"+strconv.Itoa(int(i)), "size"), i, rand.Float64())
require.NoError(t, err)
for j := int64(1); j <= 78; j++ {
_, err := app.Append(ref, labels.EmptyLabels(), i+j, rand.Float64())

View file

@ -416,7 +416,7 @@ func BenchmarkAddExemplar(b *testing.B) {
exLabels := labels.FromStrings("trace_id", "89620921")
for _, n := range []int{10000, 100000, 1000000} {
b.Run(fmt.Sprintf("%d", n), func(b *testing.B) {
b.Run(strconv.Itoa(n), func(b *testing.B) {
for j := 0; j < b.N; j++ {
b.StopTimer()
exs, err := NewCircularExemplarStorage(int64(n), eMetrics)

View file

@ -3383,7 +3383,7 @@ func TestWaitForPendingReadersInTimeRange(t *testing.T) {
func TestAppendHistogram(t *testing.T) {
l := labels.FromStrings("a", "b")
for _, numHistograms := range []int{1, 10, 150, 200, 250, 300} {
t.Run(fmt.Sprintf("%d", numHistograms), func(t *testing.T) {
t.Run(strconv.Itoa(numHistograms), func(t *testing.T) {
head, _ := newTestHead(t, 1000, wlog.CompressionNone, false)
t.Cleanup(func() {
require.NoError(t, head.Close())
@ -3692,7 +3692,7 @@ func TestChunkSnapshot(t *testing.T) {
e := ex{
seriesLabels: lbls,
e: exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", fmt.Sprintf("%d", rand.Int())),
Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
Value: rand.Float64(),
Ts: ts,
},
@ -5032,7 +5032,7 @@ func TestOOOAppendWithNoSeries(t *testing.T) {
require.Equal(t, expSamples, ms.headChunks.chunk.NumSamples())
}
newLabels := func(idx int) labels.Labels { return labels.FromStrings("foo", fmt.Sprintf("%d", idx)) }
newLabels := func(idx int) labels.Labels { return labels.FromStrings("foo", strconv.Itoa(idx)) }
s1 := newLabels(1)
appendSample(s1, 300) // At 300m.

View file

@ -50,7 +50,7 @@ func TestMemPostings_ensureOrder(t *testing.T) {
for j := range l {
l[j] = storage.SeriesRef(rand.Uint64())
}
v := fmt.Sprintf("%d", i)
v := strconv.Itoa(i)
p.m["a"][v] = l
}
@ -391,7 +391,7 @@ func BenchmarkMerge(t *testing.B) {
its := make([]Postings, len(refs))
for _, nSeries := range []int{1, 10, 100, 1000, 10000, 100000} {
t.Run(fmt.Sprint(nSeries), func(bench *testing.B) {
t.Run(strconv.Itoa(nSeries), func(bench *testing.B) {
ctx := context.Background()
for i := 0; i < bench.N; i++ {
// Reset the ListPostings to their original values each time round the loop.

View file

@ -19,6 +19,7 @@ import (
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"testing"
@ -232,10 +233,10 @@ func TestCheckpoint(t *testing.T) {
// Write changing metadata for each series. In the end, only the latest
// version should end up in the checkpoint.
b = enc.Metadata([]record.RefMetadata{
{Ref: 0, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
{Ref: 1, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
{Ref: 2, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
{Ref: 3, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
{Ref: 0, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 1, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 2, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 3, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
}, nil)
require.NoError(t, w.Log(b))
@ -324,8 +325,8 @@ func TestCheckpoint(t *testing.T) {
testutil.RequireEqual(t, expectedRefSeries, series)
expectedRefMetadata := []record.RefMetadata{
{Ref: 0, Unit: fmt.Sprintf("%d", last-100), Help: fmt.Sprintf("%d", last-100)},
{Ref: 2, Unit: fmt.Sprintf("%d", last-100), Help: fmt.Sprintf("%d", last-100)},
{Ref: 0, Unit: strconv.FormatInt(last-100, 10), Help: strconv.FormatInt(last-100, 10)},
{Ref: 2, Unit: strconv.FormatInt(last-100, 10), Help: strconv.FormatInt(last-100, 10)},
{Ref: 4, Unit: "unit", Help: "help"},
}
sort.Slice(metadata, func(i, j int) bool { return metadata[i].Ref < metadata[j].Ref })

View file

@ -25,6 +25,7 @@ import (
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"testing"
"time"
@ -3544,7 +3545,7 @@ func TestTSDBStatus(t *testing.T) {
},
} {
tc := tc
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
api := &API{db: tc.db, gatherer: prometheus.DefaultGatherer}
endpoint := tc.endpoint(api)
req, err := http.NewRequest(tc.method, fmt.Sprintf("?%s", tc.values.Encode()), nil)

View file

@ -22,6 +22,7 @@ import (
"net/http"
"net/http/httptest"
"sort"
"strconv"
"strings"
"testing"
"time"
@ -341,8 +342,8 @@ func TestFederationWithNativeHistograms(t *testing.T) {
}
app := db.Appender(context.Background())
for i := 0; i < 6; i++ {
l := labels.FromStrings("__name__", "test_metric", "foo", fmt.Sprintf("%d", i))
expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", fmt.Sprintf("%d", i))
l := labels.FromStrings("__name__", "test_metric", "foo", strconv.Itoa(i))
expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", strconv.Itoa(i))
var err error
switch i {
case 0, 3: