Enable perfsprint linter and fix up code

Signed-off-by: Oleksandr Redko <oleksandr.red+github@gmail.com>
This commit is contained in:
Oleksandr Redko 2024-05-13 18:36:19 +03:00
parent 179163a4c6
commit f10c3454e9
48 changed files with 126 additions and 103 deletions

View file

@ -21,6 +21,7 @@ linters:
- goimports - goimports
- misspell - misspell
- nolintlint - nolintlint
- perfsprint
- predeclared - predeclared
- revive - revive
- testifylint - testifylint
@ -44,7 +45,9 @@ issues:
- linters: - linters:
- godot - godot
source: "^// ===" source: "^// ==="
- linters:
- perfsprint
text: "fmt.Sprintf can be replaced with string addition"
linters-settings: linters-settings:
depguard: depguard:
rules: rules:
@ -85,6 +88,9 @@ linters-settings:
local-prefixes: github.com/prometheus/prometheus local-prefixes: github.com/prometheus/prometheus
gofumpt: gofumpt:
extra-rules: true extra-rules: true
perfsprint:
# Optimizes `fmt.Errorf`.
errorf: false
revive: revive:
# By default, revive will enable only the linting rules that are named in the configuration file. # By default, revive will enable only the linting rules that are named in the configuration file.
# So, it's needed to explicitly set in configuration all required rules. # So, it's needed to explicitly set in configuration all required rules.

View file

@ -24,6 +24,7 @@ import (
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strconv"
"strings" "strings"
"syscall" "syscall"
"testing" "testing"
@ -189,7 +190,7 @@ func TestSendAlerts(t *testing.T) {
for i, tc := range testCases { for i, tc := range testCases {
tc := tc tc := tc
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
senderFunc := senderFunc(func(alerts ...*notifier.Alert) { senderFunc := senderFunc(func(alerts ...*notifier.Alert) {
require.NotEmpty(t, tc.in, "sender called with 0 alert") require.NotEmpty(t, tc.in, "sender called with 0 alert")
require.Equal(t, tc.exp, alerts) require.Equal(t, tc.exp, alerts)

View file

@ -25,6 +25,7 @@ import (
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strconv"
"strings" "strings"
"syscall" "syscall"
"testing" "testing"
@ -410,7 +411,7 @@ func TestExitCodes(t *testing.T) {
} { } {
t.Run(c.file, func(t *testing.T) { t.Run(c.file, func(t *testing.T) {
for _, lintFatal := range []bool{true, false} { for _, lintFatal := range []bool{true, false} {
t.Run(fmt.Sprintf("%t", lintFatal), func(t *testing.T) { t.Run(strconv.FormatBool(lintFatal), func(t *testing.T) {
args := []string{"-test.main", "check", "config", "testdata/" + c.file} args := []string{"-test.main", "check", "config", "testdata/" + c.file}
if lintFatal { if lintFatal {
args = append(args, "--lint-fatal") args = append(args, "--lint-fatal")

View file

@ -856,9 +856,9 @@ func displayHistogram(dataType string, datas []int, total int) {
} }
avg := sum / len(datas) avg := sum / len(datas)
fmt.Printf("%s (min/avg/max): %d/%d/%d\n", dataType, datas[0], avg, datas[len(datas)-1]) fmt.Printf("%s (min/avg/max): %d/%d/%d\n", dataType, datas[0], avg, datas[len(datas)-1])
maxLeftLen := strconv.Itoa(len(fmt.Sprintf("%d", end))) maxLeftLen := strconv.Itoa(len(strconv.Itoa(end)))
maxRightLen := strconv.Itoa(len(fmt.Sprintf("%d", end+step))) maxRightLen := strconv.Itoa(len(strconv.Itoa(end + step)))
maxCountLen := strconv.Itoa(len(fmt.Sprintf("%d", maxCount))) maxCountLen := strconv.Itoa(len(strconv.Itoa(maxCount)))
for bucket, count := range buckets { for bucket, count := range buckets {
percentage := 100.0 * count / total percentage := 100.0 * count / total
fmt.Printf("[%"+maxLeftLen+"d, %"+maxRightLen+"d]: %"+maxCountLen+"d %s\n", bucket*step+start+1, (bucket+1)*step+start, count, strings.Repeat("#", percentage)) fmt.Printf("[%"+maxLeftLen+"d, %"+maxRightLen+"d]: %"+maxCountLen+"d %s\n", bucket*step+start+1, (bucket+1)*step+start, count, strings.Repeat("#", percentage))

View file

@ -573,7 +573,7 @@ func (la labelsAndAnnotations) String() string {
} }
s := "[\n0:" + indentLines("\n"+la[0].String(), " ") s := "[\n0:" + indentLines("\n"+la[0].String(), " ")
for i, l := range la[1:] { for i, l := range la[1:] {
s += ",\n" + fmt.Sprintf("%d", i+1) + ":" + indentLines("\n"+l.String(), " ") s += ",\n" + strconv.Itoa(i+1) + ":" + indentLines("\n"+l.String(), " ")
} }
s += "\n]" s += "\n]"

View file

@ -18,6 +18,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"net" "net"
"strconv"
"strings" "strings"
"time" "time"
@ -279,7 +280,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
if inst.PrivateDnsName != nil { if inst.PrivateDnsName != nil {
labels[ec2LabelPrivateDNS] = model.LabelValue(*inst.PrivateDnsName) labels[ec2LabelPrivateDNS] = model.LabelValue(*inst.PrivateDnsName)
} }
addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port)) addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port))
labels[model.AddressLabel] = model.LabelValue(addr) labels[model.AddressLabel] = model.LabelValue(addr)
if inst.Platform != nil { if inst.Platform != nil {

View file

@ -18,6 +18,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"net" "net"
"strconv"
"strings" "strings"
"time" "time"
@ -229,7 +230,7 @@ func (d *LightsailDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
lightsailLabelRegion: model.LabelValue(d.cfg.Region), lightsailLabelRegion: model.LabelValue(d.cfg.Region),
} }
addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port)) addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port))
labels[model.AddressLabel] = model.LabelValue(addr) labels[model.AddressLabel] = model.LabelValue(addr)
if inst.PublicIpAddress != nil { if inst.PublicIpAddress != nil {

View file

@ -20,6 +20,7 @@ import (
"math/rand" "math/rand"
"net" "net"
"net/http" "net/http"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -492,7 +493,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM
} }
if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil { if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil {
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress) labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress)
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", d.port)) address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, strconv.Itoa(d.port))
labels[model.AddressLabel] = model.LabelValue(address) labels[model.AddressLabel] = model.LabelValue(address)
return labels, nil return labels, nil
} }

View file

@ -539,9 +539,9 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
// since the service may be registered remotely through a different node. // since the service may be registered remotely through a different node.
var addr string var addr string
if serviceNode.Service.Address != "" { if serviceNode.Service.Address != "" {
addr = net.JoinHostPort(serviceNode.Service.Address, fmt.Sprintf("%d", serviceNode.Service.Port)) addr = net.JoinHostPort(serviceNode.Service.Address, strconv.Itoa(serviceNode.Service.Port))
} else { } else {
addr = net.JoinHostPort(serviceNode.Node.Address, fmt.Sprintf("%d", serviceNode.Service.Port)) addr = net.JoinHostPort(serviceNode.Node.Address, strconv.Itoa(serviceNode.Service.Port))
} }
labels := model.LabelSet{ labels := model.LabelSet{

View file

@ -177,7 +177,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
} }
labels := model.LabelSet{ labels := model.LabelSet{
doLabelID: model.LabelValue(fmt.Sprintf("%d", droplet.ID)), doLabelID: model.LabelValue(strconv.Itoa(droplet.ID)),
doLabelName: model.LabelValue(droplet.Name), doLabelName: model.LabelValue(droplet.Name),
doLabelImage: model.LabelValue(droplet.Image.Slug), doLabelImage: model.LabelValue(droplet.Image.Slug),
doLabelImageName: model.LabelValue(droplet.Image.Name), doLabelImageName: model.LabelValue(droplet.Image.Name),

View file

@ -18,6 +18,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"net" "net"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -200,7 +201,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
tg := &targetgroup.Group{} tg := &targetgroup.Group{}
hostPort := func(a string, p int) model.LabelValue { hostPort := func(a string, p int) model.LabelValue {
return model.LabelValue(net.JoinHostPort(a, fmt.Sprintf("%d", p))) return model.LabelValue(net.JoinHostPort(a, strconv.Itoa(p)))
} }
for _, record := range response.Answer { for _, record := range response.Answer {
@ -209,7 +210,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
switch addr := record.(type) { switch addr := record.(type) {
case *dns.SRV: case *dns.SRV:
dnsSrvRecordTarget = model.LabelValue(addr.Target) dnsSrvRecordTarget = model.LabelValue(addr.Target)
dnsSrvRecordPort = model.LabelValue(fmt.Sprintf("%d", addr.Port)) dnsSrvRecordPort = model.LabelValue(strconv.Itoa(int(addr.Port)))
// Remove the final dot from rooted DNS names to make them look more usual. // Remove the final dot from rooted DNS names to make them look more usual.
addr.Target = strings.TrimRight(addr.Target, ".") addr.Target = strings.TrimRight(addr.Target, ".")

View file

@ -15,7 +15,6 @@ package hetzner
import ( import (
"context" "context"
"fmt"
"net" "net"
"net/http" "net/http"
"strconv" "strconv"
@ -92,7 +91,7 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
for i, server := range servers { for i, server := range servers {
labels := model.LabelSet{ labels := model.LabelSet{
hetznerLabelRole: model.LabelValue(HetznerRoleHcloud), hetznerLabelRole: model.LabelValue(HetznerRoleHcloud),
hetznerLabelServerID: model.LabelValue(fmt.Sprintf("%d", server.ID)), hetznerLabelServerID: model.LabelValue(strconv.FormatInt(server.ID, 10)),
hetznerLabelServerName: model.LabelValue(server.Name), hetznerLabelServerName: model.LabelValue(server.Name),
hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name), hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name),
hetznerLabelPublicIPv4: model.LabelValue(server.PublicNet.IPv4.IP.String()), hetznerLabelPublicIPv4: model.LabelValue(server.PublicNet.IPv4.IP.String()),
@ -102,10 +101,10 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
hetznerLabelHcloudDatacenterLocation: model.LabelValue(server.Datacenter.Location.Name), hetznerLabelHcloudDatacenterLocation: model.LabelValue(server.Datacenter.Location.Name),
hetznerLabelHcloudDatacenterLocationNetworkZone: model.LabelValue(server.Datacenter.Location.NetworkZone), hetznerLabelHcloudDatacenterLocationNetworkZone: model.LabelValue(server.Datacenter.Location.NetworkZone),
hetznerLabelHcloudType: model.LabelValue(server.ServerType.Name), hetznerLabelHcloudType: model.LabelValue(server.ServerType.Name),
hetznerLabelHcloudCPUCores: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Cores)), hetznerLabelHcloudCPUCores: model.LabelValue(strconv.Itoa(server.ServerType.Cores)),
hetznerLabelHcloudCPUType: model.LabelValue(server.ServerType.CPUType), hetznerLabelHcloudCPUType: model.LabelValue(server.ServerType.CPUType),
hetznerLabelHcloudMemoryGB: model.LabelValue(fmt.Sprintf("%d", int(server.ServerType.Memory))), hetznerLabelHcloudMemoryGB: model.LabelValue(strconv.Itoa(int(server.ServerType.Memory))),
hetznerLabelHcloudDiskGB: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Disk)), hetznerLabelHcloudDiskGB: model.LabelValue(strconv.Itoa(server.ServerType.Disk)),
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.PublicNet.IPv4.IP.String(), strconv.FormatUint(uint64(d.port), 10))), model.AddressLabel: model.LabelValue(net.JoinHostPort(server.PublicNet.IPv4.IP.String(), strconv.FormatUint(uint64(d.port), 10))),
} }

View file

@ -112,7 +112,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
hetznerLabelPublicIPv4: model.LabelValue(server.Server.ServerIP), hetznerLabelPublicIPv4: model.LabelValue(server.Server.ServerIP),
hetznerLabelServerStatus: model.LabelValue(server.Server.Status), hetznerLabelServerStatus: model.LabelValue(server.Server.Status),
hetznerLabelRobotProduct: model.LabelValue(server.Server.Product), hetznerLabelRobotProduct: model.LabelValue(server.Server.Product),
hetznerLabelRobotCancelled: model.LabelValue(fmt.Sprintf("%t", server.Server.Canceled)), hetznerLabelRobotCancelled: model.LabelValue(strconv.FormatBool(server.Server.Canceled)),
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.Server.ServerIP, strconv.FormatUint(uint64(d.port), 10))), model.AddressLabel: model.LabelValue(net.JoinHostPort(server.Server.ServerIP, strconv.FormatUint(uint64(d.port), 10))),
} }

View file

@ -720,7 +720,7 @@ func staticConfig(addrs ...string) discovery.StaticConfig {
var cfg discovery.StaticConfig var cfg discovery.StaticConfig
for i, addr := range addrs { for i, addr := range addrs {
cfg = append(cfg, &targetgroup.Group{ cfg = append(cfg, &targetgroup.Group{
Source: fmt.Sprint(i), Source: strconv.Itoa(i),
Targets: []model.LabelSet{ Targets: []model.LabelSet{
{model.AddressLabel: model.LabelValue(addr)}, {model.AddressLabel: model.LabelValue(addr)},
}, },

View file

@ -325,7 +325,7 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
} }
labels := model.LabelSet{ labels := model.LabelSet{
linodeLabelID: model.LabelValue(fmt.Sprintf("%d", instance.ID)), linodeLabelID: model.LabelValue(strconv.Itoa(instance.ID)),
linodeLabelName: model.LabelValue(instance.Label), linodeLabelName: model.LabelValue(instance.Label),
linodeLabelImage: model.LabelValue(instance.Image), linodeLabelImage: model.LabelValue(instance.Image),
linodeLabelPrivateIPv4: model.LabelValue(privateIPv4), linodeLabelPrivateIPv4: model.LabelValue(privateIPv4),
@ -338,13 +338,13 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
linodeLabelType: model.LabelValue(instance.Type), linodeLabelType: model.LabelValue(instance.Type),
linodeLabelStatus: model.LabelValue(instance.Status), linodeLabelStatus: model.LabelValue(instance.Status),
linodeLabelGroup: model.LabelValue(instance.Group), linodeLabelGroup: model.LabelValue(instance.Group),
linodeLabelGPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.GPUs)), linodeLabelGPUs: model.LabelValue(strconv.Itoa(instance.Specs.GPUs)),
linodeLabelHypervisor: model.LabelValue(instance.Hypervisor), linodeLabelHypervisor: model.LabelValue(instance.Hypervisor),
linodeLabelBackups: model.LabelValue(backupsStatus), linodeLabelBackups: model.LabelValue(backupsStatus),
linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Disk)<<20)), linodeLabelSpecsDiskBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Disk)<<20, 10)),
linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Memory)<<20)), linodeLabelSpecsMemoryBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Memory)<<20, 10)),
linodeLabelSpecsVCPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.VCPUs)), linodeLabelSpecsVCPUs: model.LabelValue(strconv.Itoa(instance.Specs.VCPUs)),
linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Transfer)<<20)), linodeLabelSpecsTransferBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Transfer)<<20, 10)),
} }
addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10)) addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10))

View file

@ -720,7 +720,7 @@ func staticConfig(addrs ...string) StaticConfig {
var cfg StaticConfig var cfg StaticConfig
for i, addr := range addrs { for i, addr := range addrs {
cfg = append(cfg, &targetgroup.Group{ cfg = append(cfg, &targetgroup.Group{
Source: fmt.Sprint(i), Source: strconv.Itoa(i),
Targets: []model.LabelSet{ Targets: []model.LabelSet{
{model.AddressLabel: model.LabelValue(addr)}, {model.AddressLabel: model.LabelValue(addr)},
}, },

View file

@ -505,7 +505,7 @@ func targetEndpoint(task *task, port uint32, containerNet bool) string {
host = task.Host host = task.Host
} }
return net.JoinHostPort(host, fmt.Sprintf("%d", port)) return net.JoinHostPort(host, strconv.Itoa(int(port)))
} }
// Get a list of ports and a list of labels from a PortMapping. // Get a list of ports and a list of labels from a PortMapping.

View file

@ -15,7 +15,7 @@ package moby
import ( import (
"context" "context"
"fmt" "strconv"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/client" "github.com/docker/docker/client"
@ -44,8 +44,8 @@ func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix s
labelPrefix + labelNetworkID: network.ID, labelPrefix + labelNetworkID: network.ID,
labelPrefix + labelNetworkName: network.Name, labelPrefix + labelNetworkName: network.Name,
labelPrefix + labelNetworkScope: network.Scope, labelPrefix + labelNetworkScope: network.Scope,
labelPrefix + labelNetworkInternal: fmt.Sprintf("%t", network.Internal), labelPrefix + labelNetworkInternal: strconv.FormatBool(network.Internal),
labelPrefix + labelNetworkIngress: fmt.Sprintf("%t", network.Ingress), labelPrefix + labelNetworkIngress: strconv.FormatBool(network.Ingress),
} }
for k, v := range network.Labels { for k, v := range network.Labels {
ln := strutil.SanitizeLabelName(k) ln := strutil.SanitizeLabelName(k)

View file

@ -66,7 +66,7 @@ func (d *Discovery) refreshNodes(ctx context.Context) ([]*targetgroup.Group, err
swarmLabelNodeAddress: model.LabelValue(n.Status.Addr), swarmLabelNodeAddress: model.LabelValue(n.Status.Addr),
} }
if n.ManagerStatus != nil { if n.ManagerStatus != nil {
labels[swarmLabelNodeManagerLeader] = model.LabelValue(fmt.Sprintf("%t", n.ManagerStatus.Leader)) labels[swarmLabelNodeManagerLeader] = model.LabelValue(strconv.FormatBool(n.ManagerStatus.Leader))
labels[swarmLabelNodeManagerReachability] = model.LabelValue(n.ManagerStatus.Reachability) labels[swarmLabelNodeManagerReachability] = model.LabelValue(n.ManagerStatus.Reachability)
labels[swarmLabelNodeManagerAddr] = model.LabelValue(n.ManagerStatus.Addr) labels[swarmLabelNodeManagerAddr] = model.LabelValue(n.ManagerStatus.Addr)
} }

View file

@ -116,7 +116,7 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group,
labels[model.LabelName(k)] = model.LabelValue(v) labels[model.LabelName(k)] = model.LabelValue(v)
} }
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port)) addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port))
labels[model.AddressLabel] = model.LabelValue(addr) labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels) tg.Targets = append(tg.Targets, labels)

View file

@ -150,7 +150,7 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err
labels[model.LabelName(k)] = model.LabelValue(v) labels[model.LabelName(k)] = model.LabelValue(v)
} }
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port)) addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port))
labels[model.AddressLabel] = model.LabelValue(addr) labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels) tg.Targets = append(tg.Targets, labels)

View file

@ -17,6 +17,7 @@ import (
"context" "context"
"fmt" "fmt"
"net" "net"
"strconv"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud"
@ -72,7 +73,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group
} }
tg := &targetgroup.Group{ tg := &targetgroup.Group{
Source: fmt.Sprintf("OS_" + h.region), Source: "OS_" + h.region,
} }
// OpenStack API reference // OpenStack API reference
// https://developer.openstack.org/api-ref/compute/#list-hypervisors-details // https://developer.openstack.org/api-ref/compute/#list-hypervisors-details
@ -84,7 +85,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group
} }
for _, hypervisor := range hypervisorList { for _, hypervisor := range hypervisorList {
labels := model.LabelSet{} labels := model.LabelSet{}
addr := net.JoinHostPort(hypervisor.HostIP, fmt.Sprintf("%d", h.port)) addr := net.JoinHostPort(hypervisor.HostIP, strconv.Itoa(h.port))
labels[model.AddressLabel] = model.LabelValue(addr) labels[model.AddressLabel] = model.LabelValue(addr)
labels[openstackLabelHypervisorID] = model.LabelValue(hypervisor.ID) labels[openstackLabelHypervisorID] = model.LabelValue(hypervisor.ID)
labels[openstackLabelHypervisorHostName] = model.LabelValue(hypervisor.HypervisorHostname) labels[openstackLabelHypervisorHostName] = model.LabelValue(hypervisor.HypervisorHostname)

View file

@ -17,6 +17,7 @@ import (
"context" "context"
"fmt" "fmt"
"net" "net"
"strconv"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
@ -120,7 +121,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
} }
pager := servers.List(client, opts) pager := servers.List(client, opts)
tg := &targetgroup.Group{ tg := &targetgroup.Group{
Source: fmt.Sprintf("OS_" + i.region), Source: "OS_" + i.region,
} }
err = pager.EachPage(func(page pagination.Page) (bool, error) { err = pager.EachPage(func(page pagination.Page) (bool, error) {
if ctx.Err() != nil { if ctx.Err() != nil {
@ -194,7 +195,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
if val, ok := floatingIPList[floatingIPKey{id: s.ID, fixed: addr}]; ok { if val, ok := floatingIPList[floatingIPKey{id: s.ID, fixed: addr}]; ok {
lbls[openstackLabelPublicIP] = model.LabelValue(val) lbls[openstackLabelPublicIP] = model.LabelValue(val)
} }
addr = net.JoinHostPort(addr, fmt.Sprintf("%d", i.port)) addr = net.JoinHostPort(addr, strconv.Itoa(i.port))
lbls[model.AddressLabel] = model.LabelValue(addr) lbls[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, lbls) tg.Targets = append(tg.Targets, lbls)

View file

@ -144,12 +144,12 @@ func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Grou
model.InstanceLabel: model.LabelValue(server.Name), model.InstanceLabel: model.LabelValue(server.Name),
dedicatedServerLabelPrefix + "state": model.LabelValue(server.State), dedicatedServerLabelPrefix + "state": model.LabelValue(server.State),
dedicatedServerLabelPrefix + "commercial_range": model.LabelValue(server.CommercialRange), dedicatedServerLabelPrefix + "commercial_range": model.LabelValue(server.CommercialRange),
dedicatedServerLabelPrefix + "link_speed": model.LabelValue(fmt.Sprintf("%d", server.LinkSpeed)), dedicatedServerLabelPrefix + "link_speed": model.LabelValue(strconv.Itoa(server.LinkSpeed)),
dedicatedServerLabelPrefix + "rack": model.LabelValue(server.Rack), dedicatedServerLabelPrefix + "rack": model.LabelValue(server.Rack),
dedicatedServerLabelPrefix + "no_intervention": model.LabelValue(strconv.FormatBool(server.NoIntervention)), dedicatedServerLabelPrefix + "no_intervention": model.LabelValue(strconv.FormatBool(server.NoIntervention)),
dedicatedServerLabelPrefix + "os": model.LabelValue(server.Os), dedicatedServerLabelPrefix + "os": model.LabelValue(server.Os),
dedicatedServerLabelPrefix + "support_level": model.LabelValue(server.SupportLevel), dedicatedServerLabelPrefix + "support_level": model.LabelValue(server.SupportLevel),
dedicatedServerLabelPrefix + "server_id": model.LabelValue(fmt.Sprintf("%d", server.ServerID)), dedicatedServerLabelPrefix + "server_id": model.LabelValue(strconv.FormatInt(server.ServerID, 10)),
dedicatedServerLabelPrefix + "reverse": model.LabelValue(server.Reverse), dedicatedServerLabelPrefix + "reverse": model.LabelValue(server.Reverse),
dedicatedServerLabelPrefix + "datacenter": model.LabelValue(server.Datacenter), dedicatedServerLabelPrefix + "datacenter": model.LabelValue(server.Datacenter),
dedicatedServerLabelPrefix + "name": model.LabelValue(server.Name), dedicatedServerLabelPrefix + "name": model.LabelValue(server.Name),

View file

@ -19,6 +19,7 @@ import (
"net/netip" "net/netip"
"net/url" "net/url"
"path" "path"
"strconv"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
@ -161,21 +162,21 @@ func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
model.InstanceLabel: model.LabelValue(server.Name), model.InstanceLabel: model.LabelValue(server.Name),
vpsLabelPrefix + "offer": model.LabelValue(server.Model.Offer), vpsLabelPrefix + "offer": model.LabelValue(server.Model.Offer),
vpsLabelPrefix + "datacenter": model.LabelValue(fmt.Sprintf("%+v", server.Model.Datacenter)), vpsLabelPrefix + "datacenter": model.LabelValue(fmt.Sprintf("%+v", server.Model.Datacenter)),
vpsLabelPrefix + "model_vcore": model.LabelValue(fmt.Sprintf("%d", server.Model.Vcore)), vpsLabelPrefix + "model_vcore": model.LabelValue(strconv.Itoa(server.Model.Vcore)),
vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(fmt.Sprintf("%d", server.Model.MaximumAdditionalIP)), vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(strconv.Itoa(server.Model.MaximumAdditionalIP)),
vpsLabelPrefix + "version": model.LabelValue(server.Model.Version), vpsLabelPrefix + "version": model.LabelValue(server.Model.Version),
vpsLabelPrefix + "model_name": model.LabelValue(server.Model.Name), vpsLabelPrefix + "model_name": model.LabelValue(server.Model.Name),
vpsLabelPrefix + "disk": model.LabelValue(fmt.Sprintf("%d", server.Model.Disk)), vpsLabelPrefix + "disk": model.LabelValue(strconv.Itoa(server.Model.Disk)),
vpsLabelPrefix + "memory": model.LabelValue(fmt.Sprintf("%d", server.Model.Memory)), vpsLabelPrefix + "memory": model.LabelValue(strconv.Itoa(server.Model.Memory)),
vpsLabelPrefix + "zone": model.LabelValue(server.Zone), vpsLabelPrefix + "zone": model.LabelValue(server.Zone),
vpsLabelPrefix + "display_name": model.LabelValue(server.DisplayName), vpsLabelPrefix + "display_name": model.LabelValue(server.DisplayName),
vpsLabelPrefix + "cluster": model.LabelValue(server.Cluster), vpsLabelPrefix + "cluster": model.LabelValue(server.Cluster),
vpsLabelPrefix + "state": model.LabelValue(server.State), vpsLabelPrefix + "state": model.LabelValue(server.State),
vpsLabelPrefix + "name": model.LabelValue(server.Name), vpsLabelPrefix + "name": model.LabelValue(server.Name),
vpsLabelPrefix + "netboot_mode": model.LabelValue(server.NetbootMode), vpsLabelPrefix + "netboot_mode": model.LabelValue(server.NetbootMode),
vpsLabelPrefix + "memory_limit": model.LabelValue(fmt.Sprintf("%d", server.MemoryLimit)), vpsLabelPrefix + "memory_limit": model.LabelValue(strconv.Itoa(server.MemoryLimit)),
vpsLabelPrefix + "offer_type": model.LabelValue(server.OfferType), vpsLabelPrefix + "offer_type": model.LabelValue(server.OfferType),
vpsLabelPrefix + "vcore": model.LabelValue(fmt.Sprintf("%d", server.Vcore)), vpsLabelPrefix + "vcore": model.LabelValue(strconv.Itoa(server.Vcore)),
vpsLabelPrefix + "ipv4": model.LabelValue(ipv4), vpsLabelPrefix + "ipv4": model.LabelValue(ipv4),
vpsLabelPrefix + "ipv6": model.LabelValue(ipv6), vpsLabelPrefix + "ipv6": model.LabelValue(ipv6),
} }

View file

@ -237,7 +237,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
pdbLabelResource: model.LabelValue(resource.Resource), pdbLabelResource: model.LabelValue(resource.Resource),
pdbLabelType: model.LabelValue(resource.Type), pdbLabelType: model.LabelValue(resource.Type),
pdbLabelTitle: model.LabelValue(resource.Title), pdbLabelTitle: model.LabelValue(resource.Title),
pdbLabelExported: model.LabelValue(fmt.Sprintf("%t", resource.Exported)), pdbLabelExported: model.LabelValue(strconv.FormatBool(resource.Exported)),
pdbLabelFile: model.LabelValue(resource.File), pdbLabelFile: model.LabelValue(resource.File),
pdbLabelEnvironment: model.LabelValue(resource.Environment), pdbLabelEnvironment: model.LabelValue(resource.Environment),
} }

View file

@ -20,6 +20,7 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
"strconv"
"strings" "strings"
"time" "time"
@ -269,7 +270,7 @@ func (d *Discovery) getEndpointLabels(
model.AddressLabel: model.LabelValue(addr), model.AddressLabel: model.LabelValue(addr),
uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname), uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname),
uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN), uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN),
uyuniLablelSystemID: model.LabelValue(fmt.Sprintf("%d", endpoint.SystemID)), uyuniLablelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)),
uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)), uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)),
uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName), uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName),
uyuniLablelExporter: model.LabelValue(endpoint.ExporterName), uyuniLablelExporter: model.LabelValue(endpoint.ExporterName),

View file

@ -280,17 +280,17 @@ func parseServersetMember(data []byte, path string) (model.LabelSet, error) {
labels := model.LabelSet{} labels := model.LabelSet{}
labels[serversetPathLabel] = model.LabelValue(path) labels[serversetPathLabel] = model.LabelValue(path)
labels[model.AddressLabel] = model.LabelValue( labels[model.AddressLabel] = model.LabelValue(
net.JoinHostPort(member.ServiceEndpoint.Host, fmt.Sprintf("%d", member.ServiceEndpoint.Port))) net.JoinHostPort(member.ServiceEndpoint.Host, strconv.Itoa(member.ServiceEndpoint.Port)))
labels[serversetEndpointLabelPrefix+"_host"] = model.LabelValue(member.ServiceEndpoint.Host) labels[serversetEndpointLabelPrefix+"_host"] = model.LabelValue(member.ServiceEndpoint.Host)
labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.ServiceEndpoint.Port)) labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.ServiceEndpoint.Port))
for name, endpoint := range member.AdditionalEndpoints { for name, endpoint := range member.AdditionalEndpoints {
cleanName := model.LabelName(strutil.SanitizeLabelName(name)) cleanName := model.LabelName(strutil.SanitizeLabelName(name))
labels[serversetEndpointLabelPrefix+"_host_"+cleanName] = model.LabelValue( labels[serversetEndpointLabelPrefix+"_host_"+cleanName] = model.LabelValue(
endpoint.Host) endpoint.Host)
labels[serversetEndpointLabelPrefix+"_port_"+cleanName] = model.LabelValue( labels[serversetEndpointLabelPrefix+"_port_"+cleanName] = model.LabelValue(
fmt.Sprintf("%d", endpoint.Port)) strconv.Itoa(endpoint.Port))
} }
labels[serversetStatusLabel] = model.LabelValue(member.Status) labels[serversetStatusLabel] = model.LabelValue(member.Status)
@ -321,10 +321,10 @@ func parseNerveMember(data []byte, path string) (model.LabelSet, error) {
labels := model.LabelSet{} labels := model.LabelSet{}
labels[nervePathLabel] = model.LabelValue(path) labels[nervePathLabel] = model.LabelValue(path)
labels[model.AddressLabel] = model.LabelValue( labels[model.AddressLabel] = model.LabelValue(
net.JoinHostPort(member.Host, fmt.Sprintf("%d", member.Port))) net.JoinHostPort(member.Host, strconv.Itoa(member.Port)))
labels[nerveEndpointLabelPrefix+"_host"] = model.LabelValue(member.Host) labels[nerveEndpointLabelPrefix+"_host"] = model.LabelValue(member.Host)
labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.Port)) labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.Port))
labels[nerveEndpointLabelPrefix+"_name"] = model.LabelValue(member.Name) labels[nerveEndpointLabelPrefix+"_name"] = model.LabelValue(member.Name)
return labels, nil return labels, nil

View file

@ -127,9 +127,9 @@ func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*target
// since the service may be registered remotely through a different node. // since the service may be registered remotely through a different node.
var addr string var addr string
if node.ServiceAddress != "" { if node.ServiceAddress != "" {
addr = net.JoinHostPort(node.ServiceAddress, fmt.Sprintf("%d", node.ServicePort)) addr = net.JoinHostPort(node.ServiceAddress, strconv.Itoa(node.ServicePort))
} else { } else {
addr = net.JoinHostPort(node.Address, fmt.Sprintf("%d", node.ServicePort)) addr = net.JoinHostPort(node.Address, strconv.Itoa(node.ServicePort))
} }
target := model.LabelSet{model.AddressLabel: model.LabelValue(addr)} target := model.LabelSet{model.AddressLabel: model.LabelValue(addr)}

View file

@ -14,9 +14,9 @@
package histogram package histogram
import ( import (
"fmt"
"math" "math"
"math/rand" "math/rand"
"strconv"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -2134,7 +2134,7 @@ func TestAllFloatBucketIterator(t *testing.T) {
} }
for i, c := range cases { for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
var expBuckets, actBuckets []Bucket[float64] var expBuckets, actBuckets []Bucket[float64]
if c.includeNeg { if c.includeNeg {
@ -2360,7 +2360,7 @@ func TestAllReverseFloatBucketIterator(t *testing.T) {
} }
for i, c := range cases { for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
var expBuckets, actBuckets []Bucket[float64] var expBuckets, actBuckets []Bucket[float64]
if c.includePos { if c.includePos {

View file

@ -14,8 +14,8 @@
package histogram package histogram
import ( import (
"fmt"
"math" "math"
"strconv"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -72,7 +72,7 @@ func TestHistogramString(t *testing.T) {
} }
for i, c := range cases { for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
actualString := c.histogram.String() actualString := c.histogram.String()
require.Equal(t, c.expectedString, actualString) require.Equal(t, c.expectedString, actualString)
}) })
@ -211,7 +211,7 @@ func TestCumulativeBucketIterator(t *testing.T) {
} }
for i, c := range cases { for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
it := c.histogram.CumulativeBucketIterator() it := c.histogram.CumulativeBucketIterator()
actualBuckets := make([]Bucket[uint64], 0, len(c.expectedBuckets)) actualBuckets := make([]Bucket[uint64], 0, len(c.expectedBuckets))
for it.Next() { for it.Next() {
@ -371,7 +371,7 @@ func TestRegularBucketIterator(t *testing.T) {
} }
for i, c := range cases { for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
it := c.histogram.PositiveBucketIterator() it := c.histogram.PositiveBucketIterator()
actualPositiveBuckets := make([]Bucket[uint64], 0, len(c.expectedPositiveBuckets)) actualPositiveBuckets := make([]Bucket[uint64], 0, len(c.expectedPositiveBuckets))
for it.Next() { for it.Next() {

View file

@ -17,6 +17,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http" "net/http"
"strconv"
"strings" "strings"
"testing" "testing"
@ -732,7 +733,7 @@ func TestScratchBuilder(t *testing.T) {
want: FromStrings("ddd", "444"), want: FromStrings("ddd", "444"),
}, },
} { } {
t.Run(fmt.Sprint(i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
b := NewScratchBuilder(len(tcase.add)) b := NewScratchBuilder(len(tcase.add))
for _, lbl := range tcase.add { for _, lbl := range tcase.add {
b.Add(lbl.Name, lbl.Value) b.Add(lbl.Name, lbl.Value)

View file

@ -14,7 +14,7 @@
package relabel package relabel
import ( import (
"fmt" "strconv"
"testing" "testing"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -657,7 +657,7 @@ func TestRelabelValidate(t *testing.T) {
}, },
} }
for i, test := range tests { for i, test := range tests {
t.Run(fmt.Sprint(i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
err := test.config.Validate() err := test.config.Validate()
if test.expected == "" { if test.expected == "" {
require.NoError(t, err) require.NoError(t, err)

View file

@ -74,7 +74,7 @@ func TestHandlerNextBatch(t *testing.T) {
for i := range make([]struct{}, 2*maxBatchSize+1) { for i := range make([]struct{}, 2*maxBatchSize+1) {
h.queue = append(h.queue, &Alert{ h.queue = append(h.queue, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}) })
} }
@ -186,10 +186,10 @@ func TestHandlerSendAll(t *testing.T) {
for i := range make([]struct{}, maxBatchSize) { for i := range make([]struct{}, maxBatchSize) {
h.queue = append(h.queue, &Alert{ h.queue = append(h.queue, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}) })
expected = append(expected, &Alert{ expected = append(expected, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}) })
} }
@ -297,23 +297,23 @@ func TestHandlerSendAllRemapPerAm(t *testing.T) {
for i := range make([]struct{}, maxBatchSize/2) { for i := range make([]struct{}, maxBatchSize/2) {
h.queue = append(h.queue, h.queue = append(h.queue,
&Alert{ &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}, },
&Alert{ &Alert{
Labels: labels.FromStrings("alertname", "test", "alertnamedrop", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", "test", "alertnamedrop", strconv.Itoa(i)),
}, },
) )
expected1 = append(expected1, expected1 = append(expected1,
&Alert{ &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}, &Alert{ }, &Alert{
Labels: labels.FromStrings("alertname", "test", "alertnamedrop", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", "test", "alertnamedrop", strconv.Itoa(i)),
}, },
) )
expected2 = append(expected2, &Alert{ expected2 = append(expected2, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}) })
} }
@ -502,7 +502,7 @@ func TestHandlerQueuing(t *testing.T) {
var alerts []*Alert var alerts []*Alert
for i := range make([]struct{}, 20*maxBatchSize) { for i := range make([]struct{}, 20*maxBatchSize) {
alerts = append(alerts, &Alert{ alerts = append(alerts, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}) })
} }
@ -762,7 +762,7 @@ func TestHangingNotifier(t *testing.T) {
var alerts []*Alert var alerts []*Alert
for i := range make([]struct{}, 20*maxBatchSize) { for i := range make([]struct{}, 20*maxBatchSize) {
alerts = append(alerts, &Alert{ alerts = append(alerts, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}) })
} }

View file

@ -20,6 +20,7 @@ import (
"math" "math"
"os" "os"
"sort" "sort"
"strconv"
"testing" "testing"
"time" "time"
@ -4406,7 +4407,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) {
_, err := app.Append(0, labels.FromStrings("__name__", "float_series", "idx", "0"), ts, 42) _, err := app.Append(0, labels.FromStrings("__name__", "float_series", "idx", "0"), ts, 42)
require.NoError(t, err) require.NoError(t, err)
for idx1, h := range c.histograms { for idx1, h := range c.histograms {
lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1)) lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1))
// Since we mutate h later, we need to create a copy here. // Since we mutate h later, we need to create a copy here.
var err error var err error
if floatHisto { if floatHisto {
@ -4678,7 +4679,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) {
ts := idx0 * int64(10*time.Minute/time.Millisecond) ts := idx0 * int64(10*time.Minute/time.Millisecond)
app := storage.Appender(context.Background()) app := storage.Appender(context.Background())
for idx1, h := range c.histograms { for idx1, h := range c.histograms {
lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1)) lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1))
// Since we mutate h later, we need to create a copy here. // Since we mutate h later, we need to create a copy here.
var err error var err error
if floatHisto { if floatHisto {

View file

@ -19,6 +19,7 @@ import (
"math" "math"
"os" "os"
"sort" "sort"
"strconv"
"sync" "sync"
"testing" "testing"
"time" "time"
@ -1361,7 +1362,7 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) {
ts := time.Now() ts := time.Now()
app := db.Appender(context.Background()) app := db.Appender(context.Background())
for i, h := range hists { for i, h := range hists {
l := labels.FromStrings("__name__", "histogram_metric", "idx", fmt.Sprintf("%d", i)) l := labels.FromStrings("__name__", "histogram_metric", "idx", strconv.Itoa(i))
_, err := app.AppendHistogram(0, l, ts.UnixMilli(), h.Copy(), nil) _, err := app.AppendHistogram(0, l, ts.UnixMilli(), h.Copy(), nil)
require.NoError(t, err) require.NoError(t, err)
} }

View file

@ -1285,7 +1285,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
for i := 0; i < 500; i++ { for i := 0; i < 500; i++ {
s = fmt.Sprintf("%smetric_%d_%d 42\n", s, i, numScrapes) s = fmt.Sprintf("%smetric_%d_%d 42\n", s, i, numScrapes)
} }
w.Write([]byte(fmt.Sprintf(s + "&"))) w.Write([]byte(s + "&"))
} else { } else {
cancel() cancel()
} }

View file

@ -21,6 +21,7 @@ import (
"net/http/httptest" "net/http/httptest"
"net/url" "net/url"
"os" "os"
"strconv"
"strings" "strings"
"testing" "testing"
"time" "time"
@ -67,7 +68,7 @@ func TestTargetOffset(t *testing.T) {
// Calculate offsets for 10000 different targets. // Calculate offsets for 10000 different targets.
for i := range offsets { for i := range offsets {
target := newTestTarget("example.com:80", 0, labels.FromStrings( target := newTestTarget("example.com:80", 0, labels.FromStrings(
"label", fmt.Sprintf("%d", i), "label", strconv.Itoa(i),
)) ))
offsets[i] = target.offset(interval, offsetSeed) offsets[i] = target.offset(interval, offsetSeed)
} }

View file

@ -468,7 +468,7 @@ func TestReleaseNoninternedString(t *testing.T) {
m.StoreSeries([]record.RefSeries{ m.StoreSeries([]record.RefSeries{
{ {
Ref: chunks.HeadSeriesRef(i), Ref: chunks.HeadSeriesRef(i),
Labels: labels.FromStrings("asdf", fmt.Sprintf("%d", i)), Labels: labels.FromStrings("asdf", strconv.Itoa(i)),
}, },
}, 0) }, 0)
m.SeriesReset(1) m.SeriesReset(1)

View file

@ -14,8 +14,8 @@
package agent package agent
import ( import (
"fmt"
"math" "math"
"strconv"
"sync" "sync"
"testing" "testing"
"time" "time"
@ -53,7 +53,7 @@ func TestNoDeadlock(t *testing.T) {
series := &memSeries{ series := &memSeries{
ref: chunks.HeadSeriesRef(i), ref: chunks.HeadSeriesRef(i),
lset: labels.FromMap(map[string]string{ lset: labels.FromMap(map[string]string{
"id": fmt.Sprintf("%d", i), "id": strconv.Itoa(i),
}), }),
} }
stripeSeries.Set(series.lset.Hash(), series) stripeSeries.Set(series.lset.Hash(), series)

View file

@ -22,6 +22,7 @@ import (
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"strconv"
"sync" "sync"
"testing" "testing"
"time" "time"
@ -1129,7 +1130,7 @@ func BenchmarkCompactionFromHead(b *testing.B) {
for ln := 0; ln < labelNames; ln++ { for ln := 0; ln < labelNames; ln++ {
app := h.Appender(context.Background()) app := h.Appender(context.Background())
for lv := 0; lv < labelValues; lv++ { for lv := 0; lv < labelValues; lv++ {
app.Append(0, labels.FromStrings(fmt.Sprintf("%d", ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)), 0, 0) app.Append(0, labels.FromStrings(strconv.Itoa(ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)), 0, 0)
} }
require.NoError(b, app.Commit()) require.NoError(b, app.Commit())
} }
@ -1161,7 +1162,7 @@ func BenchmarkCompactionFromOOOHead(b *testing.B) {
for ln := 0; ln < labelNames; ln++ { for ln := 0; ln < labelNames; ln++ {
app := h.Appender(context.Background()) app := h.Appender(context.Background())
for lv := 0; lv < labelValues; lv++ { for lv := 0; lv < labelValues; lv++ {
lbls := labels.FromStrings(fmt.Sprintf("%d", ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)) lbls := labels.FromStrings(strconv.Itoa(ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln))
_, err = app.Append(0, lbls, int64(totalSamples), 0) _, err = app.Append(0, lbls, int64(totalSamples), 0)
require.NoError(b, err) require.NoError(b, err)
for ts := 0; ts < totalSamples; ts++ { for ts := 0; ts < totalSamples; ts++ {

View file

@ -1065,7 +1065,7 @@ func TestWALSegmentSizeOptions(t *testing.T) {
for i := int64(0); i < 155; i++ { for i := int64(0); i < 155; i++ {
app := db.Appender(context.Background()) app := db.Appender(context.Background())
ref, err := app.Append(0, labels.FromStrings("wal"+fmt.Sprintf("%d", i), "size"), i, rand.Float64()) ref, err := app.Append(0, labels.FromStrings("wal"+strconv.Itoa(int(i)), "size"), i, rand.Float64())
require.NoError(t, err) require.NoError(t, err)
for j := int64(1); j <= 78; j++ { for j := int64(1); j <= 78; j++ {
_, err := app.Append(ref, labels.EmptyLabels(), i+j, rand.Float64()) _, err := app.Append(ref, labels.EmptyLabels(), i+j, rand.Float64())

View file

@ -416,7 +416,7 @@ func BenchmarkAddExemplar(b *testing.B) {
exLabels := labels.FromStrings("trace_id", "89620921") exLabels := labels.FromStrings("trace_id", "89620921")
for _, n := range []int{10000, 100000, 1000000} { for _, n := range []int{10000, 100000, 1000000} {
b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { b.Run(strconv.Itoa(n), func(b *testing.B) {
for j := 0; j < b.N; j++ { for j := 0; j < b.N; j++ {
b.StopTimer() b.StopTimer()
exs, err := NewCircularExemplarStorage(int64(n), eMetrics) exs, err := NewCircularExemplarStorage(int64(n), eMetrics)

View file

@ -3383,7 +3383,7 @@ func TestWaitForPendingReadersInTimeRange(t *testing.T) {
func TestAppendHistogram(t *testing.T) { func TestAppendHistogram(t *testing.T) {
l := labels.FromStrings("a", "b") l := labels.FromStrings("a", "b")
for _, numHistograms := range []int{1, 10, 150, 200, 250, 300} { for _, numHistograms := range []int{1, 10, 150, 200, 250, 300} {
t.Run(fmt.Sprintf("%d", numHistograms), func(t *testing.T) { t.Run(strconv.Itoa(numHistograms), func(t *testing.T) {
head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) head, _ := newTestHead(t, 1000, wlog.CompressionNone, false)
t.Cleanup(func() { t.Cleanup(func() {
require.NoError(t, head.Close()) require.NoError(t, head.Close())
@ -3692,7 +3692,7 @@ func TestChunkSnapshot(t *testing.T) {
e := ex{ e := ex{
seriesLabels: lbls, seriesLabels: lbls,
e: exemplar.Exemplar{ e: exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", fmt.Sprintf("%d", rand.Int())), Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
Value: rand.Float64(), Value: rand.Float64(),
Ts: ts, Ts: ts,
}, },
@ -5032,7 +5032,7 @@ func TestOOOAppendWithNoSeries(t *testing.T) {
require.Equal(t, expSamples, ms.headChunks.chunk.NumSamples()) require.Equal(t, expSamples, ms.headChunks.chunk.NumSamples())
} }
newLabels := func(idx int) labels.Labels { return labels.FromStrings("foo", fmt.Sprintf("%d", idx)) } newLabels := func(idx int) labels.Labels { return labels.FromStrings("foo", strconv.Itoa(idx)) }
s1 := newLabels(1) s1 := newLabels(1)
appendSample(s1, 300) // At 300m. appendSample(s1, 300) // At 300m.

View file

@ -50,7 +50,7 @@ func TestMemPostings_ensureOrder(t *testing.T) {
for j := range l { for j := range l {
l[j] = storage.SeriesRef(rand.Uint64()) l[j] = storage.SeriesRef(rand.Uint64())
} }
v := fmt.Sprintf("%d", i) v := strconv.Itoa(i)
p.m["a"][v] = l p.m["a"][v] = l
} }
@ -391,7 +391,7 @@ func BenchmarkMerge(t *testing.B) {
its := make([]Postings, len(refs)) its := make([]Postings, len(refs))
for _, nSeries := range []int{1, 10, 100, 1000, 10000, 100000} { for _, nSeries := range []int{1, 10, 100, 1000, 10000, 100000} {
t.Run(fmt.Sprint(nSeries), func(bench *testing.B) { t.Run(strconv.Itoa(nSeries), func(bench *testing.B) {
ctx := context.Background() ctx := context.Background()
for i := 0; i < bench.N; i++ { for i := 0; i < bench.N; i++ {
// Reset the ListPostings to their original values each time round the loop. // Reset the ListPostings to their original values each time round the loop.

View file

@ -19,6 +19,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"sort" "sort"
"strconv"
"strings" "strings"
"testing" "testing"
@ -232,10 +233,10 @@ func TestCheckpoint(t *testing.T) {
// Write changing metadata for each series. In the end, only the latest // Write changing metadata for each series. In the end, only the latest
// version should end up in the checkpoint. // version should end up in the checkpoint.
b = enc.Metadata([]record.RefMetadata{ b = enc.Metadata([]record.RefMetadata{
{Ref: 0, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)}, {Ref: 0, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 1, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)}, {Ref: 1, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 2, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)}, {Ref: 2, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 3, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)}, {Ref: 3, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
}, nil) }, nil)
require.NoError(t, w.Log(b)) require.NoError(t, w.Log(b))
@ -324,8 +325,8 @@ func TestCheckpoint(t *testing.T) {
testutil.RequireEqual(t, expectedRefSeries, series) testutil.RequireEqual(t, expectedRefSeries, series)
expectedRefMetadata := []record.RefMetadata{ expectedRefMetadata := []record.RefMetadata{
{Ref: 0, Unit: fmt.Sprintf("%d", last-100), Help: fmt.Sprintf("%d", last-100)}, {Ref: 0, Unit: strconv.FormatInt(last-100, 10), Help: strconv.FormatInt(last-100, 10)},
{Ref: 2, Unit: fmt.Sprintf("%d", last-100), Help: fmt.Sprintf("%d", last-100)}, {Ref: 2, Unit: strconv.FormatInt(last-100, 10), Help: strconv.FormatInt(last-100, 10)},
{Ref: 4, Unit: "unit", Help: "help"}, {Ref: 4, Unit: "unit", Help: "help"},
} }
sort.Slice(metadata, func(i, j int) bool { return metadata[i].Ref < metadata[j].Ref }) sort.Slice(metadata, func(i, j int) bool { return metadata[i].Ref < metadata[j].Ref })

View file

@ -25,6 +25,7 @@ import (
"reflect" "reflect"
"runtime" "runtime"
"sort" "sort"
"strconv"
"strings" "strings"
"testing" "testing"
"time" "time"
@ -3544,7 +3545,7 @@ func TestTSDBStatus(t *testing.T) {
}, },
} { } {
tc := tc tc := tc
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
api := &API{db: tc.db, gatherer: prometheus.DefaultGatherer} api := &API{db: tc.db, gatherer: prometheus.DefaultGatherer}
endpoint := tc.endpoint(api) endpoint := tc.endpoint(api)
req, err := http.NewRequest(tc.method, fmt.Sprintf("?%s", tc.values.Encode()), nil) req, err := http.NewRequest(tc.method, fmt.Sprintf("?%s", tc.values.Encode()), nil)

View file

@ -22,6 +22,7 @@ import (
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"sort" "sort"
"strconv"
"strings" "strings"
"testing" "testing"
"time" "time"
@ -341,8 +342,8 @@ func TestFederationWithNativeHistograms(t *testing.T) {
} }
app := db.Appender(context.Background()) app := db.Appender(context.Background())
for i := 0; i < 6; i++ { for i := 0; i < 6; i++ {
l := labels.FromStrings("__name__", "test_metric", "foo", fmt.Sprintf("%d", i)) l := labels.FromStrings("__name__", "test_metric", "foo", strconv.Itoa(i))
expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", fmt.Sprintf("%d", i)) expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", strconv.Itoa(i))
var err error var err error
switch i { switch i {
case 0, 3: case 0, 3: