chore: enable unused-parameter from revive

Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
This commit is contained in:
Matthieu MOREL 2025-02-10 08:06:58 +01:00
parent 86bb04783c
commit c7d4b53ec1
97 changed files with 350 additions and 351 deletions

View file

@ -155,7 +155,6 @@ linters-settings:
- name: unexported-return
- name: unreachable-code
- name: unused-parameter
disabled: true
- name: var-declaration
- name: var-naming
testifylint:

View file

@ -154,7 +154,7 @@ func init() {
// serverOnlyFlag creates server-only kingpin flag.
func serverOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagClause {
return app.Flag(name, fmt.Sprintf("%s Use with server mode only.", help)).
PreAction(func(parseContext *kingpin.ParseContext) error {
PreAction(func(_ *kingpin.ParseContext) error {
// This will be invoked only if flag is actually provided by user.
serverOnlyFlags = append(serverOnlyFlags, "--"+name)
return nil
@ -164,7 +164,7 @@ func serverOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagCl
// agentOnlyFlag creates agent-only kingpin flag.
func agentOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagClause {
return app.Flag(name, fmt.Sprintf("%s Use with agent mode only.", help)).
PreAction(func(parseContext *kingpin.ParseContext) error {
PreAction(func(_ *kingpin.ParseContext) error {
// This will be invoked only if flag is actually provided by user.
agentOnlyFlags = append(agentOnlyFlags, "--"+name)
return nil
@ -526,7 +526,7 @@ func main() {
promslogflag.AddFlags(a, &cfg.promslogConfig)
a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error {
a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(_ *kingpin.ParseContext) error {
if err := documentcli.GenerateMarkdown(a.Model(), os.Stdout); err != nil {
os.Exit(1)
return err
@ -1021,7 +1021,7 @@ func main() {
}
return nil
},
func(err error) {
func(_ error) {
close(cancel)
webHandler.SetReady(web.Stopping)
notifs.AddNotification(notifications.ShuttingDown)
@ -1036,7 +1036,7 @@ func main() {
logger.Info("Scrape discovery manager stopped")
return err
},
func(err error) {
func(_ error) {
logger.Info("Stopping scrape discovery manager...")
cancelScrape()
},
@ -1050,7 +1050,7 @@ func main() {
logger.Info("Notify discovery manager stopped")
return err
},
func(err error) {
func(_ error) {
logger.Info("Stopping notify discovery manager...")
cancelNotify()
},
@ -1064,7 +1064,7 @@ func main() {
ruleManager.Run()
return nil
},
func(err error) {
func(_ error) {
ruleManager.Stop()
},
)
@ -1083,7 +1083,7 @@ func main() {
logger.Info("Scrape manager stopped")
return err
},
func(err error) {
func(_ error) {
// Scrape manager needs to be stopped before closing the local TSDB
// so that it doesn't try to write samples to a closed storage.
// We should also wait for rule manager to be fully stopped to ensure
@ -1101,7 +1101,7 @@ func main() {
tracingManager.Run()
return nil
},
func(err error) {
func(_ error) {
tracingManager.Stop()
},
)
@ -1182,7 +1182,7 @@ func main() {
}
}
},
func(err error) {
func(_ error) {
// Wait for any in-progress reloads to complete to avoid
// reloading things after they have been shutdown.
cancel <- struct{}{}
@ -1214,7 +1214,7 @@ func main() {
<-cancel
return nil
},
func(err error) {
func(_ error) {
close(cancel)
},
)
@ -1267,7 +1267,7 @@ func main() {
<-cancel
return nil
},
func(err error) {
func(_ error) {
if err := fanoutStorage.Close(); err != nil {
logger.Error("Error stopping storage", "err", err)
}
@ -1322,7 +1322,7 @@ func main() {
<-cancel
return nil
},
func(e error) {
func(_ error) {
if err := fanoutStorage.Close(); err != nil {
logger.Error("Error stopping storage", "err", err)
}
@ -1339,7 +1339,7 @@ func main() {
}
return nil
},
func(err error) {
func(_ error) {
cancelWeb()
},
)
@ -1361,7 +1361,7 @@ func main() {
logger.Info("Notifier manager stopped")
return nil
},
func(err error) {
func(_ error) {
notifierManager.Stop()
},
)
@ -1638,29 +1638,29 @@ func (s *readyStorage) Appender(ctx context.Context) storage.Appender {
type notReadyAppender struct{}
// SetOptions does nothing in this appender implementation.
func (n notReadyAppender) SetOptions(opts *storage.AppendOptions) {}
func (n notReadyAppender) SetOptions(_ *storage.AppendOptions) {}
func (n notReadyAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
func (n notReadyAppender) Append(_ storage.SeriesRef, _ labels.Labels, _ int64, _ float64) (storage.SeriesRef, error) {
return 0, tsdb.ErrNotReady
}
func (n notReadyAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
func (n notReadyAppender) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) {
return 0, tsdb.ErrNotReady
}
func (n notReadyAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
func (n notReadyAppender) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
return 0, tsdb.ErrNotReady
}
func (n notReadyAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
func (n notReadyAppender) AppendHistogramCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
return 0, tsdb.ErrNotReady
}
func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
func (n notReadyAppender) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
return 0, tsdb.ErrNotReady
}
func (n notReadyAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) {
func (n notReadyAppender) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) {
return 0, tsdb.ErrNotReady
}

View file

@ -171,7 +171,7 @@ func reloadPrometheus(t *testing.T, port int) {
// startGarbageServer sets up a mock server that returns a 500 Internal Server Error
// for all requests. It also increments the request count each time it's hit.
func startGarbageServer(t *testing.T, requestCount *atomic.Int32) string {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
requestCount.Inc()
w.WriteHeader(http.StatusInternalServerError)
}))

View file

@ -45,7 +45,7 @@ func sortSamples(samples []backfillSample) {
})
}
func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample {
func queryAllSeries(t testing.TB, q storage.Querier, _, _ int64) []backfillSample {
ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
samples := []backfillSample{}
for ss.Next() {

View file

@ -1324,7 +1324,7 @@ func labelsSetPromQL(query, labelMatchType, name, value string) error {
return fmt.Errorf("invalid label match type: %s", labelMatchType)
}
parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error {
if n, ok := node.(*parser.VectorSelector); ok {
var found bool
for i, l := range n.LabelMatchers {
@ -1355,7 +1355,7 @@ func labelsDeletePromQL(query, name string) error {
return err
}
parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error {
if n, ok := node.(*parser.VectorSelector); ok {
for i, l := range n.LabelMatchers {
if l.Name == name {

View file

@ -35,7 +35,7 @@ type mockQueryRangeAPI struct {
samples model.Matrix
}
func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) {
func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, _ string, _ v1.Range, _ ...v1.Option) (model.Value, v1.Warnings, error) {
return mockAPI.samples, v1.Warnings{}, nil
}

View file

@ -38,7 +38,7 @@ type sdCheckResult struct {
}
// CheckSD performs service discovery for the given job name and reports the results.
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int {
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, _ prometheus.Registerer) int {
logger := promslog.New(&promslog.Config{})
cfg, err := config.LoadFile(sdConfigFiles, false, logger)

View file

@ -224,7 +224,7 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde
QueryFunc: rules.EngineQueryFunc(suite.QueryEngine(), suite.Storage()),
Appendable: suite.Storage(),
Context: context.Background(),
NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {},
NotifyFunc: func(_ context.Context, _ string, _ ...*rules.Alert) {},
Logger: promslog.NewNopLogger(),
}
m := rules.NewManager(opts)

View file

@ -101,7 +101,7 @@ type EC2SDConfig struct {
}
// NewDiscovererMetrics implements discovery.Config.
func (*EC2SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*EC2SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &ec2Metrics{
refreshMetrics: rmi,
}
@ -262,7 +262,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
}
input := &ec2.DescribeInstancesInput{Filters: filters}
if err := ec2Client.DescribeInstancesPagesWithContext(ctx, input, func(p *ec2.DescribeInstancesOutput, lastPage bool) bool {
if err := ec2Client.DescribeInstancesPagesWithContext(ctx, input, func(p *ec2.DescribeInstancesOutput, _ bool) bool {
for _, r := range p.Reservations {
for _, inst := range r.Instances {
if inst.PrivateIpAddress == nil {

View file

@ -399,7 +399,7 @@ func newMockEC2Client(ec2Data *ec2DataStore) *mockEC2Client {
return &client
}
func (m *mockEC2Client) DescribeAvailabilityZonesWithContext(ctx aws.Context, input *ec2.DescribeAvailabilityZonesInput, opts ...request.Option) (*ec2.DescribeAvailabilityZonesOutput, error) {
func (m *mockEC2Client) DescribeAvailabilityZonesWithContext(_ aws.Context, _ *ec2.DescribeAvailabilityZonesInput, _ ...request.Option) (*ec2.DescribeAvailabilityZonesOutput, error) {
if len(m.ec2Data.azToAZID) == 0 {
return nil, errors.New("No AZs found")
}
@ -420,7 +420,7 @@ func (m *mockEC2Client) DescribeAvailabilityZonesWithContext(ctx aws.Context, in
}, nil
}
func (m *mockEC2Client) DescribeInstancesPagesWithContext(ctx aws.Context, input *ec2.DescribeInstancesInput, fn func(*ec2.DescribeInstancesOutput, bool) bool, opts ...request.Option) error {
func (m *mockEC2Client) DescribeInstancesPagesWithContext(_ aws.Context, _ *ec2.DescribeInstancesInput, fn func(*ec2.DescribeInstancesOutput, bool) bool, _ ...request.Option) error {
r := ec2.Reservation{}
r.SetInstances(m.ec2Data.instances)
r.SetOwnerId(m.ec2Data.ownerID)

View file

@ -83,7 +83,7 @@ type LightsailSDConfig struct {
}
// NewDiscovererMetrics implements discovery.Config.
func (*LightsailSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*LightsailSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &lightsailMetrics{
refreshMetrics: rmi,
}

View file

@ -723,11 +723,11 @@ func createMockAzureClient(t *testing.T, vmResp []armcompute.VirtualMachinesClie
func defaultMockInterfaceServer(interfaceResp armnetwork.Interface) fakenetwork.InterfacesServer {
return fakenetwork.InterfacesServer{
Get: func(ctx context.Context, resourceGroupName, networkInterfaceName string, options *armnetwork.InterfacesClientGetOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetResponse], errResp azfake.ErrorResponder) {
Get: func(_ context.Context, _, _ string, _ *armnetwork.InterfacesClientGetOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetResponse], errResp azfake.ErrorResponder) {
resp.SetResponse(http.StatusOK, armnetwork.InterfacesClientGetResponse{Interface: interfaceResp}, nil)
return
},
GetVirtualMachineScaleSetNetworkInterface: func(ctx context.Context, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName string, options *armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse], errResp azfake.ErrorResponder) {
GetVirtualMachineScaleSetNetworkInterface: func(_ context.Context, _, _, _, _ string, _ *armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse], errResp azfake.ErrorResponder) {
resp.SetResponse(http.StatusOK, armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse{Interface: interfaceResp}, nil)
return
},
@ -736,7 +736,7 @@ func defaultMockInterfaceServer(interfaceResp armnetwork.Interface) fakenetwork.
func defaultMockVMServer(vmResp []armcompute.VirtualMachinesClientListAllResponse) fake.VirtualMachinesServer {
return fake.VirtualMachinesServer{
NewListAllPager: func(options *armcompute.VirtualMachinesClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachinesClientListAllResponse]) {
NewListAllPager: func(_ *armcompute.VirtualMachinesClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachinesClientListAllResponse]) {
for _, page := range vmResp {
resp.AddPage(http.StatusOK, page, nil)
}
@ -747,7 +747,7 @@ func defaultMockVMServer(vmResp []armcompute.VirtualMachinesClientListAllRespons
func defaultMockVMSSServer(vmssResp []armcompute.VirtualMachineScaleSetsClientListAllResponse) fake.VirtualMachineScaleSetsServer {
return fake.VirtualMachineScaleSetsServer{
NewListAllPager: func(options *armcompute.VirtualMachineScaleSetsClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListAllResponse]) {
NewListAllPager: func(_ *armcompute.VirtualMachineScaleSetsClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListAllResponse]) {
for _, page := range vmssResp {
resp.AddPage(http.StatusOK, page, nil)
}
@ -758,7 +758,7 @@ func defaultMockVMSSServer(vmssResp []armcompute.VirtualMachineScaleSetsClientLi
func defaultMockVMSSVMServer(vmssvmResp []armcompute.VirtualMachineScaleSetVMsClientListResponse) fake.VirtualMachineScaleSetVMsServer {
return fake.VirtualMachineScaleSetVMsServer{
NewListPager: func(resourceGroupName, virtualMachineScaleSetName string, options *armcompute.VirtualMachineScaleSetVMsClientListOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetVMsClientListResponse]) {
NewListPager: func(_, _ string, _ *armcompute.VirtualMachineScaleSetVMsClientListOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetVMsClientListResponse]) {
for _, page := range vmssvmResp {
resp.AddPage(http.StatusOK, page, nil)
}

View file

@ -399,14 +399,14 @@ func TestGetDatacenterShouldReturnError(t *testing.T) {
}{
{
// Define a handler that will return status 500.
handler: func(w http.ResponseWriter, r *http.Request) {
handler: func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
},
errMessage: "Unexpected response code: 500 ()",
},
{
// Define a handler that will return incorrect response.
handler: func(w http.ResponseWriter, r *http.Request) {
handler: func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte(`{"Config": {"Not-Datacenter": "test-dc"}}`))
},
errMessage: "invalid value '<nil>' for Config.Datacenter",

View file

@ -31,7 +31,7 @@ type consulMetrics struct {
metricRegisterer discovery.MetricRegisterer
}
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func newDiscovererMetrics(reg prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
m := &consulMetrics{
rpcFailuresCount: prometheus.NewCounter(
prometheus.CounterOpts{

View file

@ -65,7 +65,7 @@ func init() {
}
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &digitaloceanMetrics{
refreshMetrics: rmi,
}

View file

@ -52,7 +52,7 @@ func TestDNS(t *testing.T) {
Port: 80,
Type: "A",
},
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) {
return nil, errors.New("some error")
},
expected: []*targetgroup.Group{},
@ -65,7 +65,7 @@ func TestDNS(t *testing.T) {
Port: 80,
Type: "A",
},
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) {
return &dns.Msg{
Answer: []dns.RR{
&dns.A{A: net.IPv4(192, 0, 2, 2)},
@ -97,7 +97,7 @@ func TestDNS(t *testing.T) {
Port: 80,
Type: "AAAA",
},
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) {
return &dns.Msg{
Answer: []dns.RR{
&dns.AAAA{AAAA: net.IPv6loopback},
@ -128,7 +128,7 @@ func TestDNS(t *testing.T) {
Type: "SRV",
RefreshInterval: model.Duration(time.Minute),
},
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) {
return &dns.Msg{
Answer: []dns.RR{
&dns.SRV{Port: 3306, Target: "db1.example.com."},
@ -167,7 +167,7 @@ func TestDNS(t *testing.T) {
Names: []string{"_mysql._tcp.db.example.com."},
RefreshInterval: model.Duration(time.Minute),
},
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) {
return &dns.Msg{
Answer: []dns.RR{
&dns.SRV{Port: 3306, Target: "db1.example.com."},
@ -198,7 +198,7 @@ func TestDNS(t *testing.T) {
Names: []string{"_mysql._tcp.db.example.com."},
RefreshInterval: model.Duration(time.Minute),
},
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) {
return &dns.Msg{}, nil
},
expected: []*targetgroup.Group{
@ -215,7 +215,7 @@ func TestDNS(t *testing.T) {
Port: 25,
RefreshInterval: model.Duration(time.Minute),
},
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
lookup: func(_ string, _ uint16, _ *slog.Logger) (*dns.Msg, error) {
return &dns.Msg{
Answer: []dns.RR{
&dns.MX{Preference: 0, Mx: "smtp1.example.com."},

View file

@ -172,7 +172,7 @@ func TestFetchApps(t *testing.T) {
</applications>`
// Simulate apps with a valid XML response.
respHandler := func(w http.ResponseWriter, r *http.Request) {
respHandler := func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/xml")
io.WriteString(w, appsXML)
@ -199,7 +199,7 @@ func TestFetchApps(t *testing.T) {
func Test500ErrorHttpResponse(t *testing.T) {
// Simulate 500 error.
respHandler := func(w http.ResponseWriter, r *http.Request) {
respHandler := func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
w.Header().Set("Content-Type", "application/xml")
io.WriteString(w, ``)

View file

@ -77,7 +77,7 @@ type SDConfig struct {
}
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &eurekaMetrics{
refreshMetrics: rmi,
}

View file

@ -58,7 +58,7 @@ func testUpdateServices(respHandler http.HandlerFunc) ([]*targetgroup.Group, err
func TestEurekaSDHandleError(t *testing.T) {
var (
errTesting = "non 2xx status '500' response during eureka service discovery"
respHandler = func(w http.ResponseWriter, r *http.Request) {
respHandler = func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
w.Header().Set("Content-Type", "application/xml")
io.WriteString(w, ``)
@ -76,7 +76,7 @@ func TestEurekaSDEmptyList(t *testing.T) {
<versions__delta>1</versions__delta>
<apps__hashcode/>
</applications>`
respHandler = func(w http.ResponseWriter, r *http.Request) {
respHandler = func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/xml")
io.WriteString(w, appsXML)
@ -235,7 +235,7 @@ func TestEurekaSDSendGroup(t *testing.T) {
</instance>
</application>
</applications>`
respHandler = func(w http.ResponseWriter, r *http.Request) {
respHandler = func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/xml")
io.WriteString(w, appsXML)

View file

@ -30,7 +30,7 @@ type fileMetrics struct {
metricRegisterer discovery.MetricRegisterer
}
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func newDiscovererMetrics(reg prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
fm := &fileMetrics{
fileSDReadErrorsCount: prometheus.NewCounter(
prometheus.CounterOpts{

View file

@ -83,7 +83,7 @@ type SDConfig struct {
}
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &gceMetrics{
refreshMetrics: rmi,
}

View file

@ -64,7 +64,7 @@ type SDConfig struct {
}
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &hetznerMetrics{
refreshMetrics: rmi,
}

View file

@ -75,7 +75,7 @@ func TestHTTPValidRefresh(t *testing.T) {
}
func TestHTTPInvalidCode(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusBadRequest)
}))
@ -104,7 +104,7 @@ func TestHTTPInvalidCode(t *testing.T) {
}
func TestHTTPInvalidFormat(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
fmt.Fprintln(w, "{}")
}))
@ -212,7 +212,7 @@ func TestContentTypeRegex(t *testing.T) {
func TestSourceDisappeared(t *testing.T) {
var stubResponse string
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, stubResponse)
}))

View file

@ -89,7 +89,7 @@ type SDConfig struct {
}
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &ionosMetrics{
refreshMetrics: rmi,
}

View file

@ -312,7 +312,7 @@ func TestFailuresCountMetric(t *testing.T) {
require.Equal(t, float64(0), prom_testutil.ToFloat64(n.metrics.failuresCount))
// Simulate an error on watch requests.
c.Discovery().(*fakediscovery.FakeDiscovery).PrependWatchReactor("*", func(action kubetesting.Action) (bool, watch.Interface, error) {
c.Discovery().(*fakediscovery.FakeDiscovery).PrependWatchReactor("*", func(_ kubetesting.Action) (bool, watch.Interface, error) {
return true, nil, apierrors.NewUnauthorized("unauthorized")
})

View file

@ -28,7 +28,7 @@ type kubernetesMetrics struct {
metricRegisterer discovery.MetricRegisterer
}
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func newDiscovererMetrics(reg prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
m := &kubernetesMetrics{
eventCount: prometheus.NewCounterVec(
prometheus.CounterOpts{

View file

@ -80,7 +80,7 @@ type SDConfig struct {
}
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &marathonMetrics{
refreshMetrics: rmi,
}

View file

@ -243,7 +243,7 @@ func TestMarathonZeroTaskPorts(t *testing.T) {
func Test500ErrorHttpResponseWithValidJSONBody(t *testing.T) {
// Simulate 500 error with a valid JSON response.
respHandler := func(w http.ResponseWriter, r *http.Request) {
respHandler := func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
w.Header().Set("Content-Type", "application/json")
io.WriteString(w, `{}`)

View file

@ -70,7 +70,7 @@ type Filter struct {
}
// NewDiscovererMetrics implements discovery.Config.
func (*DockerSwarmSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*DockerSwarmSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &dockerswarmMetrics{
refreshMetrics: rmi,
}

View file

@ -76,7 +76,7 @@ func (s *NomadSDTestSuite) SetupTest(t *testing.T) {
}
func (m *SDMock) HandleServicesList() {
m.Mux.HandleFunc("/v1/services", func(w http.ResponseWriter, r *http.Request) {
m.Mux.HandleFunc("/v1/services", func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("content-type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
@ -99,7 +99,7 @@ func (m *SDMock) HandleServicesList() {
}
func (m *SDMock) HandleServiceHashiCupsGet() {
m.Mux.HandleFunc("/v1/service/hashicups", func(w http.ResponseWriter, r *http.Request) {
m.Mux.HandleFunc("/v1/service/hashicups", func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("content-type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)

View file

@ -77,7 +77,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group
// OpenStack API reference
// https://developer.openstack.org/api-ref/compute/#list-hypervisors-details
pagerHypervisors := hypervisors.List(client, nil)
err = pagerHypervisors.EachPage(ctx, func(ctx context.Context, page pagination.Page) (bool, error) {
err = pagerHypervisors.EachPage(ctx, func(_ context.Context, page pagination.Page) (bool, error) {
hypervisorList, err := hypervisors.ExtractHypervisors(page)
if err != nil {
return false, fmt.Errorf("could not extract hypervisors: %w", err)

View file

@ -119,7 +119,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
pagerFIP := floatingips.List(networkClient, floatingips.ListOpts{})
floatingIPList := make(map[floatingIPKey]string)
floatingIPPresent := make(map[string]struct{})
err = pagerFIP.EachPage(ctx, func(ctx context.Context, page pagination.Page) (bool, error) {
err = pagerFIP.EachPage(ctx, func(_ context.Context, page pagination.Page) (bool, error) {
result, err := floatingips.ExtractFloatingIPs(page)
if err != nil {
return false, fmt.Errorf("could not extract floatingips: %w", err)

View file

@ -62,7 +62,7 @@ func testHeader(t *testing.T, r *http.Request, header, expected string) {
// HandleVersionsSuccessfully mocks version call.
func (m *SDMock) HandleVersionsSuccessfully() {
m.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
m.Mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) {
fmt.Fprintf(w, `
{
"versions": {
@ -90,7 +90,7 @@ func (m *SDMock) HandleVersionsSuccessfully() {
// HandleAuthSuccessfully mocks auth call.
func (m *SDMock) HandleAuthSuccessfully() {
m.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, r *http.Request) {
m.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, _ *http.Request) {
w.Header().Add("X-Subject-Token", tokenID)
w.WriteHeader(http.StatusCreated)

View file

@ -67,7 +67,7 @@ type SDConfig struct {
}
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &openstackMetrics{
refreshMetrics: rmi,
}

View file

@ -54,7 +54,7 @@ type SDConfig struct {
}
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &ovhcloudMetrics{
refreshMetrics: rmi,
}

View file

@ -82,7 +82,7 @@ type SDConfig struct {
}
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &puppetdbMetrics{
refreshMetrics: rmi,
}

View file

@ -184,7 +184,7 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) {
}
func TestPuppetDBInvalidCode(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusBadRequest)
}))
@ -212,7 +212,7 @@ func TestPuppetDBInvalidCode(t *testing.T) {
}
func TestPuppetDBInvalidFormat(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
fmt.Fprintln(w, "{}")
}))

View file

@ -56,7 +56,7 @@ func TestRefresh(t *testing.T) {
}
var i int
refresh := func(ctx context.Context) ([]*targetgroup.Group, error) {
refresh := func(_ context.Context) ([]*targetgroup.Group, error) {
i++
switch i {
case 1:

View file

@ -105,7 +105,7 @@ type SDConfig struct {
}
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &scalewayMetrics{
refreshMetrics: rmi,
}

View file

@ -71,7 +71,7 @@ type SDConfig struct {
}
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &tritonMetrics{
refreshMetrics: rmi,
}

View file

@ -230,7 +230,7 @@ func TestTritonSDRefreshCNsWithHostname(t *testing.T) {
func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet {
var (
td, m, _ = newTritonDiscovery(c)
s = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
s = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
fmt.Fprintln(w, dstr)
}))
)

View file

@ -113,7 +113,7 @@ type Discovery struct {
}
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &uyuniMetrics{
refreshMetrics: rmi,
}

View file

@ -59,7 +59,7 @@ func testUpdateServices(respHandler http.HandlerFunc) ([]*targetgroup.Group, err
func TestUyuniSDHandleError(t *testing.T) {
var (
errTesting = "unable to login to Uyuni API: request error: bad status code - 500"
respHandler = func(w http.ResponseWriter, r *http.Request) {
respHandler = func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
w.Header().Set("Content-Type", "application/xml")
io.WriteString(w, ``)
@ -75,7 +75,7 @@ func TestUyuniSDLogin(t *testing.T) {
var (
errTesting = "unable to get the managed system groups information of monitored clients: request error: bad status code - 500"
call = 0
respHandler = func(w http.ResponseWriter, r *http.Request) {
respHandler = func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/xml")
switch call {
case 0:
@ -106,7 +106,7 @@ func TestUyuniSDLogin(t *testing.T) {
func TestUyuniSDSkipLogin(t *testing.T) {
var (
errTesting = "unable to get the managed system groups information of monitored clients: request error: bad status code - 500"
respHandler = func(w http.ResponseWriter, r *http.Request) {
respHandler = func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
w.Header().Set("Content-Type", "application/xml")
io.WriteString(w, ``)

View file

@ -76,7 +76,7 @@ type SDConfig struct {
}
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &vultrMetrics{
refreshMetrics: rmi,
}

View file

@ -106,7 +106,7 @@ func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig,
}
func TestHTTPResourceClientFetchEmptyResponse(t *testing.T) {
client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) {
client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(_ *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) {
return nil, nil
})
defer cleanup()
@ -146,7 +146,7 @@ func TestHTTPResourceClientFetchFullResponse(t *testing.T) {
}
func TestHTTPResourceClientServerError(t *testing.T) {
client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) {
client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(_ *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) {
return nil, errors.New("server error")
})
defer cleanup()

View file

@ -29,7 +29,7 @@ type xdsMetrics struct {
metricRegisterer discovery.MetricRegisterer
}
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func newDiscovererMetrics(reg prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
m := &xdsMetrics{
fetchFailuresCount: prometheus.NewCounter(
prometheus.CounterOpts{

View file

@ -85,7 +85,7 @@ func createTestHTTPServer(t *testing.T, responder discoveryResponder) *httptest.
}
func constantResourceParser(targets []model.LabelSet, err error) resourceParser {
return func(resources []*anypb.Any, typeUrl string) ([]model.LabelSet, error) {
return func(_ []*anypb.Any, _ string) ([]model.LabelSet, error) {
return targets, err
}
}
@ -120,7 +120,7 @@ func (rc testResourceClient) Close() {
func TestPollingRefreshSkipUpdate(t *testing.T) {
rc := &testResourceClient{
fetch: func(ctx context.Context) (*v3.DiscoveryResponse, error) {
fetch: func(_ context.Context) (*v3.DiscoveryResponse, error) {
return nil, nil
},
}
@ -167,7 +167,7 @@ func TestPollingRefreshAttachesGroupMetadata(t *testing.T) {
rc := &testResourceClient{
server: server,
protocolVersion: ProtocolV3,
fetch: func(ctx context.Context) (*v3.DiscoveryResponse, error) {
fetch: func(_ context.Context) (*v3.DiscoveryResponse, error) {
return &v3.DiscoveryResponse{}, nil
},
}
@ -223,14 +223,14 @@ func TestPollingDisappearingTargets(t *testing.T) {
rc := &testResourceClient{
server: server,
protocolVersion: ProtocolV3,
fetch: func(ctx context.Context) (*v3.DiscoveryResponse, error) {
fetch: func(_ context.Context) (*v3.DiscoveryResponse, error) {
return &v3.DiscoveryResponse{}, nil
},
}
// On the first poll, send back two targets. On the next, send just one.
counter := 0
parser := func(resources []*anypb.Any, typeUrl string) ([]model.LabelSet, error) {
parser := func(_ []*anypb.Any, _ string) ([]model.LabelSet, error) {
counter++
if counter == 1 {
return []model.LabelSet{

View file

@ -59,7 +59,7 @@ type ServersetSDConfig struct {
}
// NewDiscovererMetrics implements discovery.Config.
func (*ServersetSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*ServersetSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &discovery.NoopDiscovererMetrics{}
}
@ -101,7 +101,7 @@ type NerveSDConfig struct {
}
// NewDiscovererMetrics implements discovery.Config.
func (*NerveSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
func (*NerveSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &discovery.NoopDiscovererMetrics{}
}

View file

@ -31,6 +31,6 @@ func TestNewDiscoveryError(t *testing.T) {
[]string{"unreachable.test"},
time.Second, []string{"/"},
nil,
func(data []byte, path string) (model.LabelSet, error) { return nil, nil })
func(_ []byte, _ string) (model.LabelSet, error) { return nil, nil })
require.Error(t, err)
}

View file

@ -72,7 +72,7 @@ testmetric,test_label=test_label_value2 value=5.1234 123456789123
`
server := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
func(_ http.ResponseWriter, r *http.Request) {
require.Equal(t, http.MethodPost, r.Method, "Unexpected method.")
require.Equal(t, "/api/v2/write", r.URL.Path, "Unexpected path.")
b, err := io.ReadAll(r.Body)

View file

@ -991,7 +991,7 @@ func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) Str
return true
}
analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool {
analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, _ StringMatcher) bool {
// Ensure we don't have mixed case sensitivity.
if caseSensitiveSet && caseSensitive != prefixCaseSensitive {
return false
@ -1026,7 +1026,7 @@ func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) Str
findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool {
multiMatcher.add(matcher.s)
return true
}, func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool {
}, func(prefix string, _ bool, matcher StringMatcher) bool {
multiMatcher.addPrefix(prefix, caseSensitive, matcher)
return true
})

View file

@ -987,7 +987,7 @@ func TestFindEqualOrPrefixStringMatchers(t *testing.T) {
ok = findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool {
matches = append(matches, match{matcher.s, matcher.caseSensitive})
return true
}, func(prefix string, prefixCaseSensitive bool, right StringMatcher) bool {
}, func(prefix string, prefixCaseSensitive bool, _ StringMatcher) bool {
matches = append(matches, match{prefix, prefixCaseSensitive})
return true
})

View file

@ -604,14 +604,14 @@ func TestNHCBParser_NoNHCBWhenExponential(t *testing.T) {
return "ProtoBuf", factory, []int{1, 2, 3}, parserOptions{useUTF8sep: true, hasCreatedTimeStamp: true}
},
func() (string, parserFactory, []int, parserOptions) {
factory := func(keepClassic bool) Parser {
factory := func(_ bool) Parser {
input := createTestOpenMetricsHistogram()
return NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
}
return "OpenMetrics", factory, []int{1}, parserOptions{hasCreatedTimeStamp: true}
},
func() (string, parserFactory, []int, parserOptions) {
factory := func(keepClassic bool) Parser {
factory := func(_ bool) Parser {
input := createTestPromHistogram()
return NewPromParser([]byte(input), labels.NewSymbolTable())
}

View file

@ -348,7 +348,7 @@ func TestCustomDo(t *testing.T) {
var received bool
h := NewManager(&Options{
Do: func(_ context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
Do: func(_ context.Context, _ *http.Client, req *http.Request) (*http.Response, error) {
received = true
body, err := io.ReadAll(req.Body)
@ -447,7 +447,7 @@ func TestHandlerQueuing(t *testing.T) {
errc = make(chan error, 1)
)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
server := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {
// Notify the test function that we have received something.
select {
case called <- struct{}{}:
@ -724,7 +724,7 @@ func TestHangingNotifier(t *testing.T) {
// Set up a faulty Alertmanager.
var faultyCalled atomic.Bool
faultyServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
faultyServer := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {
faultyCalled.Store(true)
select {
case <-done:
@ -736,7 +736,7 @@ func TestHangingNotifier(t *testing.T) {
// Set up a functional Alertmanager.
var functionalCalled atomic.Bool
functionalServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
functionalServer := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {
functionalCalled.Store(true)
}))
functionalURL, err := url.Parse(functionalServer.URL)

View file

@ -558,7 +558,7 @@ func (ng *Engine) validateOpts(expr parser.Expr) error {
var atModifierUsed, negativeOffsetUsed bool
var validationErr error
parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error {
switch n := node.(type) {
case *parser.VectorSelector:
if n.Timestamp != nil || n.StartOrEnd == parser.START || n.StartOrEnd == parser.END {
@ -1969,7 +1969,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
case *parser.NumberLiteral:
span.SetAttributes(attribute.Float64("value", e.Val))
return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.rangeEval(ctx, nil, func(_ []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil
})
@ -2138,7 +2138,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co
seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)-1)
}
return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.rangeEval(ctx, nil, func(_ []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if vs.Timestamp != nil {
// This is a special case for "timestamp()" when the @ modifier is used, to ensure that
// we return a point for each time step in this case.
@ -3789,7 +3789,7 @@ func NewHashRatioSampler() *HashRatioSampler {
return &HashRatioSampler{}
}
func (s *HashRatioSampler) sampleOffset(ts int64, sample *Sample) float64 {
func (s *HashRatioSampler) sampleOffset(_ int64, sample *Sample) float64 {
const (
float64MaxUint64 = float64(math.MaxUint64)
)

View file

@ -263,7 +263,7 @@ func TestQueryError(t *testing.T) {
}
engine := promqltest.NewTestEngineWithOpts(t, opts)
errStorage := promql.ErrStorage{errors.New("storage error")}
queryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) {
queryable := storage.QueryableFunc(func(_, _ int64) (storage.Querier, error) {
return &errQuerier{err: errStorage}, nil
})
ctx, cancelCtx := context.WithCancel(context.Background())
@ -2290,7 +2290,7 @@ func TestQueryLogger_error(t *testing.T) {
ctx = promql.NewOriginContext(ctx, map[string]interface{}{"foo": "bar"})
defer cancelCtx()
testErr := errors.New("failure")
query := engine.NewTestQuery(func(ctx context.Context) error {
query := engine.NewTestQuery(func(_ context.Context) error {
return testErr
})

View file

@ -59,7 +59,7 @@ import (
type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations)
// === time() float64 ===
func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcTime(_ []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return Vector{Sample{
F: float64(enh.Ts) / 1000,
}}, nil
@ -498,7 +498,7 @@ func filterFloats(v Vector) Vector {
}
// === sort(node parser.ValueTypeVector) (Vector, Annotations) ===
func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcSort(vals []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) {
// NaN should sort to the bottom, so take descending sort with NaN first and
// reverse it.
byValueSorter := vectorByReverseValueHeap(filterFloats(vals[0].(Vector)))
@ -507,7 +507,7 @@ func funcSort(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper)
}
// === sortDesc(node parser.ValueTypeVector) (Vector, Annotations) ===
func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcSortDesc(vals []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) {
// NaN should sort to the bottom, so take ascending sort with NaN first and
// reverse it.
byValueSorter := vectorByValueHeap(filterFloats(vals[0].(Vector)))
@ -516,7 +516,7 @@ func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
}
// === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) ===
func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcSortByLabel(vals []parser.Value, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) {
lbls := stringSliceFromArgs(args[1:])
slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
for _, label := range lbls {
@ -542,7 +542,7 @@ func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNode
}
// === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) ===
func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) {
lbls := stringSliceFromArgs(args[1:])
slices.SortFunc(vals[0].(Vector), func(a, b Sample) int {
for _, label := range lbls {
@ -589,7 +589,7 @@ func clamp(vec Vector, minVal, maxVal float64, enh *EvalNodeHelper) (Vector, ann
}
// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) ===
func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcClamp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector)
minVal := vals[1].(Vector)[0].F
maxVal := vals[2].(Vector)[0].F
@ -597,14 +597,14 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
}
// === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) ===
func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcClampMax(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector)
maxVal := vals[1].(Vector)[0].F
return clamp(vec, math.Inf(-1), maxVal, enh)
}
// === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) ===
func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcClampMin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector)
minVal := vals[1].(Vector)[0].F
return clamp(vec, minVal, math.Inf(+1), enh)
@ -641,7 +641,7 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
}
// === Scalar(node parser.ValueTypeVector) Scalar ===
func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcScalar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
var (
v = vals[0].(Vector)
value float64
@ -766,14 +766,14 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
}
// === count_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcCountOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return aggrOverTime(vals, enh, func(s Series) float64 {
return float64(len(s.Floats) + len(s.Histograms))
}), nil
}
// === last_over_time(Matrix parser.ValueTypeMatrix) (Vector, Notes) ===
func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcLastOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
el := vals[0].(Matrix)[0]
var f FPoint
@ -998,13 +998,13 @@ func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe
// This function will return 1 if the matrix has at least one element.
// Due to engine optimization, this function is only called when this condition is true.
// Then, the engine post-processes the results to get the expected output.
func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcAbsentOverTime(_ []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return append(enh.Out, Sample{F: 1}), nil
}
// === present_over_time(Vector parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return aggrOverTime(vals, enh, func(s Series) float64 {
func funcPresentOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return aggrOverTime(vals, enh, func(_ Series) float64 {
return 1
}), nil
}
@ -1026,126 +1026,126 @@ func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float6
}
// === abs(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAbs(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcAbs(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Abs), nil
}
// === ceil(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcCeil(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcCeil(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Ceil), nil
}
// === floor(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcFloor(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcFloor(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Floor), nil
}
// === exp(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcExp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcExp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Exp), nil
}
// === sqrt(Vector VectorNode) (Vector, Annotations) ===
func funcSqrt(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcSqrt(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Sqrt), nil
}
// === ln(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcLn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcLn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Log), nil
}
// === log2(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcLog2(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcLog2(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Log2), nil
}
// === log10(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcLog10(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Log10), nil
}
// === sin(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcSin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcSin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Sin), nil
}
// === cos(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcCos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcCos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Cos), nil
}
// === tan(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcTan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcTan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Tan), nil
}
// === asin(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcAsin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Asin), nil
}
// === acos(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcAcos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Acos), nil
}
// === atan(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcAtan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Atan), nil
}
// === sinh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcSinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Sinh), nil
}
// === cosh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcCosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Cosh), nil
}
// === tanh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcTanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Tanh), nil
}
// === asinh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcAsinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Asinh), nil
}
// === acosh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcAcosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Acosh), nil
}
// === atanh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcAtanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Atanh), nil
}
// === rad(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcRad(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcRad(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, func(v float64) float64 {
return v * math.Pi / 180
}), nil
}
// === deg(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcDeg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, func(v float64) float64 {
return v * 180 / math.Pi
}), nil
}
// === pi() Scalar ===
func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcPi(_ []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, annotations.Annotations) {
return Vector{Sample{F: math.Pi}}, nil
}
// === sgn(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcSgn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, func(v float64) float64 {
switch {
case v < 0:
@ -1159,7 +1159,7 @@ func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper)
}
// === timestamp(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcTimestamp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector)
for _, el := range vec {
if !enh.enableDelayedNameRemoval {
@ -1284,7 +1284,7 @@ func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNo
}
// === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcHistogramCount(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
inVec := vals[0].(Vector)
for _, sample := range inVec {
@ -1305,7 +1305,7 @@ func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalN
}
// === histogram_sum(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcHistogramSum(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
inVec := vals[0].(Vector)
for _, sample := range inVec {
@ -1326,7 +1326,7 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod
}
// === histogram_avg(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramAvg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcHistogramAvg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
inVec := vals[0].(Vector)
for _, sample := range inVec {
@ -1347,7 +1347,7 @@ func funcHistogramAvg(vals []parser.Value, args parser.Expressions, enh *EvalNod
}
// === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcHistogramStdDev(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
inVec := vals[0].(Vector)
for _, sample := range inVec {
@ -1390,7 +1390,7 @@ func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *Eval
}
// === histogram_stdvar(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcHistogramStdVar(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
inVec := vals[0].(Vector)
for _, sample := range inVec {
@ -1433,7 +1433,7 @@ func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *Eval
}
// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcHistogramFraction(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
lower := vals[0].(Vector)[0].F
upper := vals[1].(Vector)[0].F
inVec := vals[2].(Vector)
@ -1550,7 +1550,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
}
// === resets(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcResets(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
floats := vals[0].(Matrix)[0].Floats
histograms := vals[0].(Matrix)[0].Histograms
resets := 0
@ -1595,7 +1595,7 @@ func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe
}
// === changes(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcChanges(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
floats := vals[0].(Matrix)[0].Floats
histograms := vals[0].(Matrix)[0].Histograms
changes := 0
@ -1683,7 +1683,7 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio
}
// === Vector(s Scalar) (Vector, Annotations) ===
func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcVector(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return append(enh.Out,
Sample{
Metric: labels.Labels{},
@ -1765,56 +1765,56 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo
}
// === days_in_month(v Vector) Scalar ===
func funcDaysInMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcDaysInMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return dateWrapper(vals, enh, func(t time.Time) float64 {
return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day())
}), nil
}
// === day_of_month(v Vector) Scalar ===
func funcDayOfMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcDayOfMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return dateWrapper(vals, enh, func(t time.Time) float64 {
return float64(t.Day())
}), nil
}
// === day_of_week(v Vector) Scalar ===
func funcDayOfWeek(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcDayOfWeek(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return dateWrapper(vals, enh, func(t time.Time) float64 {
return float64(t.Weekday())
}), nil
}
// === day_of_year(v Vector) Scalar ===
func funcDayOfYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcDayOfYear(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return dateWrapper(vals, enh, func(t time.Time) float64 {
return float64(t.YearDay())
}), nil
}
// === hour(v Vector) Scalar ===
func funcHour(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcHour(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return dateWrapper(vals, enh, func(t time.Time) float64 {
return float64(t.Hour())
}), nil
}
// === minute(v Vector) Scalar ===
func funcMinute(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcMinute(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return dateWrapper(vals, enh, func(t time.Time) float64 {
return float64(t.Minute())
}), nil
}
// === month(v Vector) Scalar ===
func funcMonth(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcMonth(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return dateWrapper(vals, enh, func(t time.Time) float64 {
return float64(t.Month())
}), nil
}
// === year(v Vector) Scalar ===
func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcYear(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return dateWrapper(vals, enh, func(t time.Time) float64 {
return float64(t.Year())
}), nil

View file

@ -173,7 +173,7 @@ func (h *histogramIterator) Next() chunkenc.ValueType {
return chunkenc.ValNone
}
func (h *histogramIterator) Seek(t int64) chunkenc.ValueType { panic("not implemented") }
func (h *histogramIterator) Seek(_ int64) chunkenc.ValueType { panic("not implemented") }
func (h *histogramIterator) At() (int64, float64) { panic("not implemented") }

View file

@ -83,7 +83,7 @@ loop:
func (ev *evaluator) infoSelectHints(expr parser.Expr) storage.SelectHints {
var nodeTimestamp *int64
var offset int64
parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error {
switch n := node.(type) {
case *parser.VectorSelector:
if n.Timestamp != nil {

View file

@ -109,7 +109,7 @@ func TestAlertingRuleTemplateWithHistogram(t *testing.T) {
NegativeBuckets: []float64{-2, 2, 2, 7, 5, 5, 2},
}
q := func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
q := func(_ context.Context, _ string, _ time.Time) (promql.Vector, error) {
return []promql.Sample{{H: &h}}, nil
}
@ -678,7 +678,7 @@ func TestQueryForStateSeries(t *testing.T) {
tests := []testInput{
// Test for empty series.
{
selectMockFunction: func(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
selectMockFunction: func(_ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet {
return storage.EmptySeriesSet()
},
expectedSeries: nil,
@ -686,7 +686,7 @@ func TestQueryForStateSeries(t *testing.T) {
},
// Test for error series.
{
selectMockFunction: func(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
selectMockFunction: func(_ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet {
return storage.ErrSeriesSet(testError)
},
expectedSeries: nil,
@ -694,7 +694,7 @@ func TestQueryForStateSeries(t *testing.T) {
},
// Test for mock series.
{
selectMockFunction: func(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
selectMockFunction: func(_ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet {
return storage.TestSeriesSet(storage.MockSeries(
[]int64{1, 2, 3},
[]float64{1, 2, 3},
@ -989,7 +989,7 @@ func TestAlertingEvalWithOrigin(t *testing.T) {
true, promslog.NewNopLogger(),
)
_, err = rule.Eval(ctx, 0, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) {
_, err = rule.Eval(ctx, 0, now, func(ctx context.Context, _ string, _ time.Time) (promql.Vector, error) {
detail = FromOriginContext(ctx)
return nil, nil
}, nil, 0)

View file

@ -1117,7 +1117,7 @@ func buildDependencyMap(rules []Rule) dependencyMap {
break
}
parser.Inspect(rule.Query(), func(node parser.Node, path []parser.Node) error {
parser.Inspect(rule.Query(), func(node parser.Node, _ []parser.Node) error {
if n, ok := node.(*parser.VectorSelector); ok {
// Find the name matcher for the rule.
var nameMatcher *labels.Matcher

View file

@ -429,7 +429,7 @@ type Sender interface {
// SendAlerts implements the rules.NotifyFunc for a Notifier.
func SendAlerts(s Sender, externalURL string) NotifyFunc {
return func(ctx context.Context, expr string, alerts ...*Alert) {
return func(_ context.Context, expr string, alerts ...*Alert) {
var res []*notifier.Alert
for _, alert := range alerts {
@ -508,7 +508,7 @@ func newRuleConcurrencyController(maxConcurrency int64) RuleConcurrencyControlle
}
}
func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, rule Rule) bool {
func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) bool {
return c.sema.TryAcquire(1)
}
@ -561,7 +561,7 @@ func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule)
return false
}
func (c sequentialRuleEvalController) SplitGroupIntoBatches(_ context.Context, g *Group) []ConcurrentRules {
func (c sequentialRuleEvalController) SplitGroupIntoBatches(_ context.Context, _ *Group) []ConcurrentRules {
return nil
}

View file

@ -375,7 +375,7 @@ func TestForStateRestore(t *testing.T) {
Queryable: storage,
Context: context.Background(),
Logger: promslog.NewNopLogger(),
NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {},
NotifyFunc: func(_ context.Context, _ string, _ ...*Alert) {},
OutageTolerance: 30 * time.Minute,
ForGracePeriod: 10 * time.Minute,
}
@ -917,7 +917,7 @@ func TestNotify(t *testing.T) {
}
engine := promqltest.NewTestEngineWithOpts(t, engineOpts)
var lastNotified []*Alert
notifyFunc := func(ctx context.Context, expr string, alerts ...*Alert) {
notifyFunc := func(_ context.Context, _ string, alerts ...*Alert) {
lastNotified = alerts
}
opts := &ManagerOptions{
@ -1356,7 +1356,7 @@ func TestRuleGroupEvalIterationFunc(t *testing.T) {
testValue = 3
}
skipEvalIterationFunc := func(ctx context.Context, g *Group, evalTimestamp time.Time) {
skipEvalIterationFunc := func(_ context.Context, _ *Group, _ time.Time) {
testValue = 4
}
@ -1395,7 +1395,7 @@ func TestRuleGroupEvalIterationFunc(t *testing.T) {
Queryable: storage,
Context: context.Background(),
Logger: promslog.NewNopLogger(),
NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {},
NotifyFunc: func(_ context.Context, _ string, _ ...*Alert) {},
OutageTolerance: 30 * time.Minute,
ForGracePeriod: 10 * time.Minute,
}
@ -1528,7 +1528,7 @@ func TestManager_LoadGroups_ShouldCheckWhetherEachRuleHasDependentsAndDependenci
Context: context.Background(),
Logger: promslog.NewNopLogger(),
Appendable: storage,
QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { return nil, nil },
QueryFunc: func(_ context.Context, _ string, _ time.Time) (promql.Vector, error) { return nil, nil },
})
t.Run("load a mix of dependent and independent rules", func(t *testing.T) {
@ -2282,7 +2282,7 @@ func TestNewRuleGroupRestoration(t *testing.T) {
interval = 60 * time.Second
)
waitForEvaluations := func(t *testing.T, ch <-chan int32, targetCount int32) {
waitForEvaluations := func(_ *testing.T, ch <-chan int32, targetCount int32) {
for {
select {
case cnt := <-ch:
@ -2300,11 +2300,11 @@ func TestNewRuleGroupRestoration(t *testing.T) {
option := optsFactory(store, &maxInflight, &inflightQueries, maxConcurrency)
option.Queryable = store
option.Appendable = store
option.NotifyFunc = func(ctx context.Context, expr string, alerts ...*Alert) {}
option.NotifyFunc = func(_ context.Context, _ string, _ ...*Alert) {}
var evalCount atomic.Int32
ch := make(chan int32)
noopEvalIterFunc := func(ctx context.Context, g *Group, evalTimestamp time.Time) {
noopEvalIterFunc := func(_ context.Context, _ *Group, _ time.Time) {
evalCount.Inc()
ch <- evalCount.Load()
}
@ -2345,7 +2345,7 @@ func TestNewRuleGroupRestorationWithRestoreNewGroupOption(t *testing.T) {
interval = 60 * time.Second
)
waitForEvaluations := func(t *testing.T, ch <-chan int32, targetCount int32) {
waitForEvaluations := func(_ *testing.T, ch <-chan int32, targetCount int32) {
for {
select {
case cnt := <-ch:
@ -2364,11 +2364,11 @@ func TestNewRuleGroupRestorationWithRestoreNewGroupOption(t *testing.T) {
option.Queryable = store
option.Appendable = store
option.RestoreNewRuleGroups = true
option.NotifyFunc = func(ctx context.Context, expr string, alerts ...*Alert) {}
option.NotifyFunc = func(_ context.Context, _ string, _ ...*Alert) {}
var evalCount atomic.Int32
ch := make(chan int32)
noopEvalIterFunc := func(ctx context.Context, g *Group, evalTimestamp time.Time) {
noopEvalIterFunc := func(_ context.Context, _ *Group, _ time.Time) {
evalCount.Inc()
ch <- evalCount.Load()
}
@ -2510,7 +2510,7 @@ func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.I
ConcurrentEvalsEnabled: concurrent,
MaxConcurrentEvals: maxConcurrent,
Appendable: storage,
QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) {
QueryFunc: func(_ context.Context, _ string, ts time.Time) (promql.Vector, error) {
inflightMu.Lock()
current := inflightQueries.Add(1)
@ -2659,7 +2659,7 @@ func TestRuleDependencyController_AnalyseRules(t *testing.T) {
Context: context.Background(),
Logger: promslog.NewNopLogger(),
Appendable: storage,
QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { return nil, nil },
QueryFunc: func(_ context.Context, _ string, _ time.Time) (promql.Vector, error) { return nil, nil },
})
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, tc.ruleFile)
@ -2688,7 +2688,7 @@ func BenchmarkRuleDependencyController_AnalyseRules(b *testing.B) {
Context: context.Background(),
Logger: promslog.NewNopLogger(),
Appendable: storage,
QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { return nil, nil },
QueryFunc: func(_ context.Context, _ string, _ time.Time) (promql.Vector, error) { return nil, nil },
})
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, "fixtures/rules_multiple.yaml")

View file

@ -246,7 +246,7 @@ func TestRecordingEvalWithOrigin(t *testing.T) {
require.NoError(t, err)
rule := NewRecordingRule(name, expr, lbs)
_, err = rule.Eval(ctx, 0, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) {
_, err = rule.Eval(ctx, 0, now, func(ctx context.Context, _ string, _ time.Time) (promql.Vector, error) {
detail = FromOriginContext(ctx)
return nil, nil
}, nil, 0)

View file

@ -43,7 +43,7 @@ func (a nopAppendable) Appender(_ context.Context) storage.Appender {
type nopAppender struct{}
func (a nopAppender) SetOptions(opts *storage.AppendOptions) {}
func (a nopAppender) SetOptions(_ *storage.AppendOptions) {}
func (a nopAppender) Append(storage.SeriesRef, labels.Labels, int64, float64) (storage.SeriesRef, error) {
return 1, nil
@ -57,7 +57,7 @@ func (a nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *h
return 3, nil
}
func (a nopAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
func (a nopAppender) AppendHistogramCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
return 0, nil
}
@ -137,7 +137,7 @@ type collectResultAppender struct {
pendingMetadata []metadataEntry
}
func (a *collectResultAppender) SetOptions(opts *storage.AppendOptions) {}
func (a *collectResultAppender) SetOptions(_ *storage.AppendOptions) {}
func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
a.mtx.Lock()
@ -184,7 +184,7 @@ func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.
return a.next.AppendHistogram(ref, l, t, h, fh)
}
func (a *collectResultAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
func (a *collectResultAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, _, ct int64, h *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
if h != nil {
return a.AppendHistogram(ref, l, ct, &histogram.Histogram{}, nil)
}
@ -205,7 +205,7 @@ func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.L
return a.next.UpdateMetadata(ref, l, m)
}
func (a *collectResultAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) {
func (a *collectResultAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, _, ct int64) (storage.SeriesRef, error) {
return a.Append(ref, l, ct, 0.0)
}

View file

@ -479,7 +479,7 @@ func loadConfiguration(t testing.TB, c string) *config.Config {
func noopLoop() loop {
return &testLoop{
startFunc: func(interval, timeout time.Duration, errc chan<- error) {},
startFunc: func(_, _ time.Duration, _ chan<- error) {},
stopFunc: func() {},
}
}
@ -730,7 +730,7 @@ func setupTestServer(t *testing.T, typ string, toWrite []byte) *httptest.Server
once := sync.Once{}
server := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
fail := true
once.Do(func() {
fail = false
@ -972,7 +972,7 @@ func TestManagerCTZeroIngestionHistogram(t *testing.T) {
once := sync.Once{}
// Start fake HTTP target to that allow one scrape only.
server := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
fail := true
once.Do(func() {
fail = false

View file

@ -456,7 +456,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
for _, t := range targets {
// Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage.
nonEmpty := false
t.LabelsRange(func(l labels.Label) { nonEmpty = true })
t.LabelsRange(func(_ labels.Label) { nonEmpty = true })
switch {
case nonEmpty:
all = append(all, t)
@ -820,7 +820,7 @@ func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) {
return s.client.Do(s.req.WithContext(ctx))
}
func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error) {
func (s *targetScraper) readResponse(_ context.Context, resp *http.Response, w io.Writer) (string, error) {
defer func() {
io.Copy(io.Discard, resp.Body)
resp.Body.Close()

View file

@ -120,7 +120,7 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde
// Create an appender for adding samples to the storage.
app := s.Appender(context.Background())
capp := &collectResultAppender{next: app}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return capp }, 0)
// Current time for generating timestamps.
now := time.Now()
@ -215,7 +215,7 @@ test_metric2{foo="bar"} 22
// Create an appender for adding samples to the storage.
capp := &collectResultAppender{next: nopAppender{}}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return capp }, 0)
now := time.Now()
slApp := sl.appender(context.Background())
@ -249,12 +249,12 @@ type nopScraper struct {
scraper
}
func (n nopScraper) Report(start time.Time, dur time.Duration, err error) {}
func (n nopScraper) Report(_ time.Time, _ time.Duration, _ error) {}
func TestScrapeReportMetadataUpdate(t *testing.T) {
// Create an appender for adding samples to the storage.
capp := &collectResultAppender{next: nopAppender{}}
sl := newBasicScrapeLoop(t, context.Background(), nopScraper{}, func(ctx context.Context) storage.Appender { return capp }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nopScraper{}, func(_ context.Context) storage.Appender { return capp }, 0)
now := time.Now()
slApp := sl.appender(context.Background())
@ -503,7 +503,7 @@ func TestScrapePoolReload(t *testing.T) {
// equivalents have been stopped.
newLoop := func(opts scrapeLoopOptions) loop {
l := &testLoop{interval: time.Duration(reloadCfg.ScrapeInterval), timeout: time.Duration(reloadCfg.ScrapeTimeout)}
l.startFunc = func(interval, timeout time.Duration, errc chan<- error) {
l.startFunc = func(interval, timeout time.Duration, _ chan<- error) {
require.Equal(t, 3*time.Second, interval, "Unexpected scrape interval")
require.Equal(t, 2*time.Second, timeout, "Unexpected scrape timeout")
@ -593,7 +593,7 @@ func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) {
}
newLoop := func(opts scrapeLoopOptions) loop {
l := &testLoop{interval: opts.interval, timeout: opts.timeout}
l.startFunc = func(interval, timeout time.Duration, errc chan<- error) {
l.startFunc = func(interval, timeout time.Duration, _ chan<- error) {
require.Equal(t, 5*time.Second, interval, "Unexpected scrape interval")
require.Equal(t, 3*time.Second, timeout, "Unexpected scrape timeout")
}
@ -651,10 +651,10 @@ func TestScrapePoolTargetLimit(t *testing.T) {
var wg sync.WaitGroup
// On starting to run, new loops created on reload check whether their preceding
// equivalents have been stopped.
newLoop := func(opts scrapeLoopOptions) loop {
newLoop := func(_ scrapeLoopOptions) loop {
wg.Add(1)
l := &testLoop{
startFunc: func(interval, timeout time.Duration, errc chan<- error) {
startFunc: func(_, _ time.Duration, _ chan<- error) {
wg.Done()
},
stopFunc: func() {},
@ -884,10 +884,10 @@ func TestScrapePoolRaces(t *testing.T) {
func TestScrapePoolScrapeLoopsStarted(t *testing.T) {
var wg sync.WaitGroup
newLoop := func(opts scrapeLoopOptions) loop {
newLoop := func(_ scrapeLoopOptions) loop {
wg.Add(1)
l := &testLoop{
startFunc: func(interval, timeout time.Duration, errc chan<- error) {
startFunc: func(_, _ time.Duration, _ chan<- error) {
wg.Done()
},
stopFunc: func() {},
@ -1022,7 +1022,7 @@ func TestScrapeLoopStop(t *testing.T) {
signal = make(chan struct{}, 1)
appender = &collectResultAppender{}
scraper = &testScraper{}
app = func(ctx context.Context) storage.Appender { return appender }
app = func(_ context.Context) storage.Appender { return appender }
)
// Since we're writing samples directly below we need to provide a protocol fallback.
@ -1078,7 +1078,7 @@ func TestScrapeLoopRun(t *testing.T) {
errc = make(chan error)
scraper = &testScraper{}
app = func(ctx context.Context) storage.Appender { return &nopAppender{} }
app = func(_ context.Context) storage.Appender { return &nopAppender{} }
scrapeMetrics = newTestScrapeMetrics(t)
)
@ -1186,7 +1186,7 @@ func TestScrapeLoopForcedErr(t *testing.T) {
errc = make(chan error)
scraper = &testScraper{}
app = func(ctx context.Context) storage.Appender { return &nopAppender{} }
app = func(_ context.Context) storage.Appender { return &nopAppender{} }
)
ctx, cancel := context.WithCancel(context.Background())
@ -1235,7 +1235,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
nil, nil,
nopMutator,
nopMutator,
func(ctx context.Context) storage.Appender { return nopAppender{} },
func(_ context.Context) storage.Appender { return nopAppender{} },
cache,
labels.NewSymbolTable(),
0,
@ -1540,7 +1540,7 @@ func TestSetOptionsHandlingStaleness(t *testing.T) {
)
sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond)
numScrapes := 0
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
numScrapes++
if numScrapes == cue {
action(sl)
@ -1608,7 +1608,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
var (
signal = make(chan struct{}, 1)
scraper = &testScraper{}
app = func(ctx context.Context) storage.Appender { return appender }
app = func(_ context.Context) storage.Appender { return appender }
)
ctx, cancel := context.WithCancel(context.Background())
@ -1617,7 +1617,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
// Succeed once, several failures, then stop.
numScrapes := 0
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
numScrapes++
switch numScrapes {
@ -1654,7 +1654,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
var (
signal = make(chan struct{}, 1)
scraper = &testScraper{}
app = func(ctx context.Context) storage.Appender { return appender }
app = func(_ context.Context) storage.Appender { return appender }
numScrapes = 0
)
@ -1663,7 +1663,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 10*time.Millisecond, "text/plain")
// Succeed once, several failures, then stop.
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
numScrapes++
switch numScrapes {
case 1:
@ -1716,7 +1716,7 @@ func TestScrapeLoopCache(t *testing.T) {
numScrapes := 0
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
switch numScrapes {
case 1, 2:
_, ok := sl.cache.series["metric_a"]
@ -1770,7 +1770,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
var (
signal = make(chan struct{}, 1)
scraper = &testScraper{}
app = func(ctx context.Context) storage.Appender { return appender }
app = func(_ context.Context) storage.Appender { return appender }
)
ctx, cancel := context.WithCancel(context.Background())
@ -1778,7 +1778,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
numScrapes := 0
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
numScrapes++
if numScrapes < 5 {
s := ""
@ -1866,7 +1866,7 @@ func TestScrapeLoopAppend(t *testing.T) {
labels: labels.FromStrings(test.discoveryLabels...),
}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0)
sl.sampleMutator = func(l labels.Labels) labels.Labels {
return mutateSampleLabels(l, discoveryLabels, test.honorLabels, nil)
}
@ -1954,7 +1954,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
app := &collectResultAppender{}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0)
sl.sampleMutator = func(l labels.Labels) labels.Labels {
return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil)
}
@ -1978,7 +1978,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
// collectResultAppender's AddFast always returns ErrNotFound if we don't give it a next.
app := &collectResultAppender{}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0)
fakeRef := storage.SeriesRef(1)
expValue := float64(1)
@ -2016,7 +2016,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
resApp := &collectResultAppender{}
app := &limitAppender{Appender: resApp, limit: 1}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0)
sl.sampleMutator = func(l labels.Labels) labels.Labels {
if l.Has("deleteme") {
return labels.EmptyLabels()
@ -2075,7 +2075,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
resApp := &collectResultAppender{}
app := &bucketLimitAppender{Appender: resApp, limit: 2}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0)
sl.enableNativeHistogramIngestion = true
sl.sampleMutator = func(l labels.Labels) labels.Labels {
if l.Has("deleteme") {
@ -2187,7 +2187,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
defer s.Close()
capp := &collectResultAppender{}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return capp }, 0)
now := time.Now()
slApp := sl.appender(context.Background())
@ -2219,7 +2219,7 @@ func TestScrapeLoopAppendFailsWithNoContentType(t *testing.T) {
app := &collectResultAppender{}
// Explicitly setting the lack of fallback protocol here to make it obvious.
sl := newBasicScrapeLoopWithFallback(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0, "")
sl := newBasicScrapeLoopWithFallback(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0, "")
now := time.Now()
slApp := sl.appender(context.Background())
@ -2233,7 +2233,7 @@ func TestScrapeLoopAppendEmptyWithNoContentType(t *testing.T) {
app := &collectResultAppender{}
// Explicitly setting the lack of fallback protocol here to make it obvious.
sl := newBasicScrapeLoopWithFallback(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0, "")
sl := newBasicScrapeLoopWithFallback(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0, "")
now := time.Now()
slApp := sl.appender(context.Background())
@ -2245,7 +2245,7 @@ func TestScrapeLoopAppendEmptyWithNoContentType(t *testing.T) {
func TestScrapeLoopAppendStaleness(t *testing.T) {
app := &collectResultAppender{}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0)
now := time.Now()
slApp := sl.appender(context.Background())
@ -2275,7 +2275,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) {
func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
app := &collectResultAppender{}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0)
now := time.Now()
slApp := sl.appender(context.Background())
_, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "text/plain", now)
@ -2299,7 +2299,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) {
app := &collectResultAppender{}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0)
sl.trackTimestampsStaleness = true
now := time.Now()
@ -2654,7 +2654,7 @@ metric: <
labels: labels.FromStrings(test.discoveryLabels...),
}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0)
sl.enableNativeHistogramIngestion = test.enableNativeHistogramsIngestion
sl.sampleMutator = func(l labels.Labels) labels.Labels {
return mutateSampleLabels(l, discoveryLabels, false, nil)
@ -2738,7 +2738,7 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
app := &collectResultAppender{}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0)
sl.sampleMutator = func(l labels.Labels) labels.Labels {
return mutateSampleLabels(l, discoveryLabels, false, nil)
}
@ -2775,13 +2775,13 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
var (
scraper = &testScraper{}
appender = &collectResultAppender{}
app = func(ctx context.Context) storage.Appender { return appender }
app = func(_ context.Context) storage.Appender { return appender }
)
ctx, cancel := context.WithCancel(context.Background())
sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond)
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
scraper.scrapeFunc = func(_ context.Context, _ io.Writer) error {
cancel()
return errors.New("scrape failed")
}
@ -2794,13 +2794,13 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
var (
scraper = &testScraper{}
appender = &collectResultAppender{}
app = func(ctx context.Context) storage.Appender { return appender }
app = func(_ context.Context) storage.Appender { return appender }
)
ctx, cancel := context.WithCancel(context.Background())
sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond)
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
cancel()
w.Write([]byte("a{l=\"\xff\"} 1\n"))
return nil
@ -2829,7 +2829,7 @@ func (app *errorAppender) Append(ref storage.SeriesRef, lset labels.Labels, t in
func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T) {
app := &errorAppender{}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0)
now := time.Unix(1, 0)
slApp := sl.appender(context.Background())
@ -2853,7 +2853,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
app := &collectResultAppender{}
sl := newBasicScrapeLoop(t, context.Background(), nil,
func(ctx context.Context) storage.Appender {
func(_ context.Context) storage.Appender {
return &timeLimitAppender{
Appender: app,
maxTime: timestamp.FromTime(time.Now().Add(10 * time.Minute)),
@ -2965,7 +2965,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
block := make(chan struct{})
server := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {
<-block
}),
)
@ -3021,7 +3021,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
func TestTargetScrapeScrapeNotFound(t *testing.T) {
server := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusNotFound)
}),
)
@ -3057,7 +3057,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) {
)
var gzipResponse bool
server := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
if gzipResponse {
w.Header().Set("Content-Encoding", "gzip")
@ -3147,11 +3147,11 @@ func (ts *testScraper) Report(start time.Time, duration time.Duration, err error
ts.lastError = err
}
func (ts *testScraper) scrape(ctx context.Context) (*http.Response, error) {
func (ts *testScraper) scrape(_ context.Context) (*http.Response, error) {
return nil, ts.scrapeErr
}
func (ts *testScraper) readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error) {
func (ts *testScraper) readResponse(ctx context.Context, _ *http.Response, w io.Writer) (string, error) {
if ts.scrapeFunc != nil {
return "", ts.scrapeFunc(ctx, w)
}
@ -3164,7 +3164,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
app := s.Appender(context.Background())
capp := &collectResultAppender{next: app}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return capp }, 0)
now := time.Now()
slApp := sl.appender(context.Background())
@ -3190,7 +3190,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
capp := &collectResultAppender{next: app}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return capp }, 0)
sl.honorTimestamps = false
now := time.Now()
@ -3253,7 +3253,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
app := s.Appender(context.Background())
ctx, cancel := context.WithCancel(context.Background())
sl := newBasicScrapeLoop(t, context.Background(), &testScraper{}, func(ctx context.Context) storage.Appender { return app }, 0)
sl := newBasicScrapeLoop(t, context.Background(), &testScraper{}, func(_ context.Context) storage.Appender { return app }, 0)
sl.sampleMutator = func(l labels.Labels) labels.Labels {
if l.Has("drop") {
return labels.FromStrings("no", "name") // This label set will trigger an error.
@ -3582,7 +3582,7 @@ func TestScrapeReportSingleAppender(t *testing.T) {
numScrapes := 0
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
numScrapes++
if numScrapes%4 == 0 {
return errors.New("scrape failed")
@ -3795,7 +3795,7 @@ func TestScrapeLoopLabelLimit(t *testing.T) {
labels: labels.FromStrings(test.discoveryLabels...),
}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender { return app }, 0)
sl.sampleMutator = func(l labels.Labels) labels.Labels {
return mutateSampleLabels(l, discoveryLabels, false, nil)
}
@ -4393,7 +4393,7 @@ metric: <
scrapeCount := 0
scraped := make(chan bool)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
if metricsText.contentType != "" {
w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`)
for _, text := range metricsText.text {
@ -4493,7 +4493,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *
var (
signal = make(chan struct{}, 1)
scraper = &testScraper{}
app = func(ctx context.Context) storage.Appender { return appender }
app = func(_ context.Context) storage.Appender { return appender }
)
ctx, cancel := context.WithCancel(context.Background())
@ -4503,7 +4503,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *
// Succeed once, several failures, then stop.
numScrapes := 0
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
numScrapes++
switch numScrapes {
@ -4830,7 +4830,7 @@ func testNativeHistogramMaxSchemaSet(t *testing.T, minBucketFactor string, expec
buffer := protoMarshalDelimited(t, histogramMetricFamily)
// Create a HTTP server to serve /metrics via ProtoBuf
metricsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
metricsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`)
w.Write(buffer)
}))
@ -5073,7 +5073,7 @@ func newScrapableServer(scrapeText string) (s *httptest.Server, scrapedTwice cha
var scrapes int
scrapedTwice = make(chan bool)
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
fmt.Fprint(w, scrapeText)
scrapes++
if scrapes == 2 {
@ -5085,7 +5085,7 @@ func newScrapableServer(scrapeText string) (s *httptest.Server, scrapedTwice cha
// Regression test for the panic fixed in https://github.com/prometheus/prometheus/pull/15523.
func TestScrapePoolScrapeAfterReload(t *testing.T) {
h := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte{0x42, 0x42})
},
))

View file

@ -147,7 +147,7 @@ func newTestTarget(targetURL string, _ time.Duration, lbls labels.Labels) *Targe
func TestNewHTTPBearerToken(t *testing.T) {
server := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
func(_ http.ResponseWriter, r *http.Request) {
expected := "Bearer 1234"
received := r.Header.Get("Authorization")
require.Equal(t, expected, received, "Authorization header was not set correctly.")
@ -168,7 +168,7 @@ func TestNewHTTPBearerToken(t *testing.T) {
func TestNewHTTPBearerTokenFile(t *testing.T) {
server := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
func(_ http.ResponseWriter, r *http.Request) {
expected := "Bearer 12345"
received := r.Header.Get("Authorization")
require.Equal(t, expected, received, "Authorization header was not set correctly.")
@ -189,7 +189,7 @@ func TestNewHTTPBearerTokenFile(t *testing.T) {
func TestNewHTTPBasicAuth(t *testing.T) {
server := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
func(_ http.ResponseWriter, r *http.Request) {
username, password, ok := r.BasicAuth()
require.True(t, ok, "Basic authorization header was not set correctly.")
require.Equal(t, "user", username)
@ -214,7 +214,7 @@ func TestNewHTTPBasicAuth(t *testing.T) {
func TestNewHTTPCACert(t *testing.T) {
server := httptest.NewUnstartedServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
w.Write([]byte{})
},
@ -238,7 +238,7 @@ func TestNewHTTPCACert(t *testing.T) {
func TestNewHTTPClientCert(t *testing.T) {
server := httptest.NewUnstartedServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
w.Write([]byte{})
},
@ -267,7 +267,7 @@ func TestNewHTTPClientCert(t *testing.T) {
func TestNewHTTPWithServerName(t *testing.T) {
server := httptest.NewUnstartedServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
w.Write([]byte{})
},
@ -292,7 +292,7 @@ func TestNewHTTPWithServerName(t *testing.T) {
func TestNewHTTPWithBadServerName(t *testing.T) {
server := httptest.NewUnstartedServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
w.Write([]byte{})
},

View file

@ -133,7 +133,7 @@ func filterChunkQueriers(qs []ChunkQuerier) []ChunkQuerier {
}
// Select returns a set of series that matches the given label matchers.
func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
func (q *mergeGenericQuerier) Select(ctx context.Context, _ bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
seriesSets := make([]genericSeriesSet, 0, len(q.queriers))
var limit int
if hints != nil {

View file

@ -959,7 +959,7 @@ func (m *mockQuerier) Select(_ context.Context, sortSeries bool, _ *SelectHints,
return &mockSeriesSet{idx: -1, series: ret, warnings: m.warnings, err: m.err}
}
func (m *mockQuerier) LabelValues(_ context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (m *mockQuerier) LabelValues(_ context.Context, name string, _ *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
m.mtx.Lock()
m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{
name: name,
@ -1409,7 +1409,7 @@ func BenchmarkMergeLabelValuesWithLimit(b *testing.B) {
},
}
b.Run("benchmark", func(b *testing.B) {
b.Run("benchmark", func(_ *testing.B) {
ctx := context.Background()
hints := &LabelHints{
Limit: 1000,
@ -1696,7 +1696,7 @@ func (e errIterator) Next() chunkenc.ValueType {
return chunkenc.ValNone
}
func (e errIterator) Seek(t int64) chunkenc.ValueType {
func (e errIterator) Seek(_ int64) chunkenc.ValueType {
return chunkenc.ValNone
}

View file

@ -61,7 +61,7 @@ func TestStoreHTTPErrorHandling(t *testing.T) {
for _, test := range tests {
server := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
http.Error(w, longErrMessage, test.code)
}),
)
@ -93,7 +93,7 @@ func TestStoreHTTPErrorHandling(t *testing.T) {
func TestClientRetryAfter(t *testing.T) {
setupServer := func(statusCode int) *httptest.Server {
return httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Retry-After", "5")
http.Error(w, longErrMessage, statusCode)
}),
@ -180,7 +180,7 @@ func TestClientCustomHeaders(t *testing.T) {
var called bool
server := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) {
called = true
receivedHeaders := r.Header
for name, value := range headersToSend {
@ -271,7 +271,7 @@ func TestReadClient(t *testing.T) {
StartTimestampMs: 4000,
EndTimestampMs: 12000,
},
httpHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
httpHandler: http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse")
flusher, ok := w.(http.Flusher)
@ -324,7 +324,7 @@ func TestReadClient(t *testing.T) {
},
{
name: "unsupported content type",
httpHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
httpHandler: http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "foobar")
}),
expectedErrorContains: "unsupported content type",
@ -399,7 +399,7 @@ func TestReadClient(t *testing.T) {
}
func sampledResponseHTTPHandler(t *testing.T) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "application/x-protobuf")
resp := prompb.ReadResponse{

View file

@ -761,7 +761,7 @@ func TestDisableReshardOnRetry(t *testing.T) {
metrics = newQueueManagerMetrics(nil, "", "")
client = &MockWriteClient{
StoreFunc: func(ctx context.Context, b []byte, i int) (WriteResponseStats, error) {
StoreFunc: func(_ context.Context, _ []byte, _ int) (WriteResponseStats, error) {
onStoreCalled()
return WriteResponseStats{}, RecoverableError{
@ -839,7 +839,7 @@ func createTimeseries(numSamples, numSeries int, extraLabels ...labels.Label) ([
return samples, series
}
func createProtoTimeseriesWithOld(numSamples, baseTs int64, extraLabels ...labels.Label) []prompb.TimeSeries {
func createProtoTimeseriesWithOld(numSamples, baseTs int64, _ ...labels.Label) []prompb.TimeSeries {
samples := make([]prompb.TimeSeries, numSamples)
// use a fixed rand source so tests are consistent
r := rand.New(rand.NewSource(99))

View file

@ -531,7 +531,7 @@ type OTLPOptions struct {
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
// writes them to the provided appendable.
func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler {
func NewOTLPWriteHandler(logger *slog.Logger, _ prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler {
ex := &rwExporter{
writeHandler: &writeHandler{
logger: logger,

View file

@ -860,7 +860,7 @@ func (m *mockAppendable) Appender(_ context.Context) storage.Appender {
return m
}
func (m *mockAppendable) SetOptions(opts *storage.AppendOptions) {
func (m *mockAppendable) SetOptions(_ *storage.AppendOptions) {
panic("unimplemented")
}
@ -956,7 +956,7 @@ func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t
return 0, nil
}
func (m *mockAppendable) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
func (m *mockAppendable) AppendHistogramCTZeroSample(_ storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
if m.appendCTZeroSampleErr != nil {
return 0, m.appendCTZeroSampleErr
}
@ -1006,7 +1006,7 @@ func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp
return 0, nil
}
func (m *mockAppendable) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) {
func (m *mockAppendable) AppendCTZeroSample(_ storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) {
if m.appendCTZeroSampleErr != nil {
return 0, m.appendCTZeroSampleErr
}

View file

@ -65,7 +65,7 @@ func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]chunks.Sampl
if err != nil {
return &ChunkSeriesEntry{
Lset: lset,
ChunkIteratorFn: func(it chunks.Iterator) chunks.Iterator {
ChunkIteratorFn: func(_ chunks.Iterator) chunks.Iterator {
return errChunksIterator{err: err}
},
}

View file

@ -656,7 +656,7 @@ Outer:
func (pb *Block) CleanTombstones(dest string, c Compactor) ([]ulid.ULID, bool, error) {
numStones := 0
if err := pb.tombstones.Iter(func(id storage.SeriesRef, ivs tombstones.Intervals) error {
if err := pb.tombstones.Iter(func(_ storage.SeriesRef, ivs tombstones.Intervals) error {
numStones += len(ivs)
return nil
}); err != nil {

View file

@ -602,7 +602,7 @@ func testPostingsForLabelMatching(t *testing.T, offset storage.SeriesRef, setUp
{
name: "missing label",
labelName: "missing",
match: func(val string) bool {
match: func(_ string) bool {
return true
},
exp: nil,

View file

@ -63,7 +63,7 @@ func TestChunkWriteQueue_WritingThroughQueue(t *testing.T) {
gotCutFile bool
)
blockingChunkWriter := func(seriesRef HeadSeriesRef, mint, maxt int64, chunk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) error {
blockingChunkWriter := func(seriesRef HeadSeriesRef, mint, maxt int64, chunk chunkenc.Chunk, ref ChunkDiskMapperRef, _, cutFile bool) error {
gotSeriesRef = seriesRef
gotMint = mint
gotMaxt = maxt
@ -82,7 +82,7 @@ func TestChunkWriteQueue_WritingThroughQueue(t *testing.T) {
ref := newChunkDiskMapperRef(321, 123)
cutFile := true
awaitCb := make(chan struct{})
require.NoError(t, q.addJob(chunkWriteJob{seriesRef: seriesRef, mint: mint, maxt: maxt, chk: chunk, ref: ref, cutFile: cutFile, callback: func(err error) {
require.NoError(t, q.addJob(chunkWriteJob{seriesRef: seriesRef, mint: mint, maxt: maxt, chk: chunk, ref: ref, cutFile: cutFile, callback: func(_ error) {
close(awaitCb)
}}))
<-awaitCb
@ -101,7 +101,7 @@ func TestChunkWriteQueue_WrappingAroundSizeLimit(t *testing.T) {
unblockChunkWriterCh := make(chan struct{}, sizeLimit)
// blockingChunkWriter blocks until the unblockChunkWriterCh channel returns a value.
blockingChunkWriter := func(seriesRef HeadSeriesRef, mint, maxt int64, chunk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) error {
blockingChunkWriter := func(_ HeadSeriesRef, _, _ int64, _ chunkenc.Chunk, _ ChunkDiskMapperRef, _, _ bool) error {
<-unblockChunkWriterCh
return nil
}
@ -117,7 +117,7 @@ func TestChunkWriteQueue_WrappingAroundSizeLimit(t *testing.T) {
callbackWg.Add(1)
require.NoError(t, q.addJob(chunkWriteJob{
ref: chunkRef,
callback: func(err error) {
callback: func(_ error) {
callbackWg.Done()
},
}))
@ -212,7 +212,7 @@ func BenchmarkChunkWriteQueue_addJob(b *testing.B) {
for _, concurrentWrites := range []int{1, 10, 100, 1000} {
b.Run(fmt.Sprintf("%d concurrent writes", concurrentWrites), func(b *testing.B) {
issueReadSignal := make(chan struct{})
q := newChunkWriteQueue(nil, 1000, func(ref HeadSeriesRef, i, i2 int64, chunk chunkenc.Chunk, ref2 ChunkDiskMapperRef, ooo, b bool) error {
q := newChunkWriteQueue(nil, 1000, func(_ HeadSeriesRef, _, _ int64, _ chunkenc.Chunk, _ ChunkDiskMapperRef, _, _ bool) error {
if withReads {
select {
case issueReadSignal <- struct{}{}:

View file

@ -155,7 +155,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
hrw = createChunkDiskMapper(t, dir)
idx := 0
require.NoError(t, hrw.IterateAllChunks(func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding, isOOO bool) error {
require.NoError(t, hrw.IterateAllChunks(func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, _, maxt int64, numSamples uint16, _ chunkenc.Encoding, isOOO bool) error {
t.Helper()
expData := expectedData[idx]
@ -574,7 +574,7 @@ func createChunk(t *testing.T, idx int, hrw *ChunkDiskMapper) (seriesRef HeadSer
if rand.Intn(2) == 0 {
isOOO = true
}
chunkRef = hrw.WriteChunk(seriesRef, mint, maxt, chunk, isOOO, func(cbErr error) {
chunkRef = hrw.WriteChunk(seriesRef, mint, maxt, chunk, isOOO, func(_ error) {
require.NoError(t, err)
close(awaitCb)
})

View file

@ -1012,7 +1012,7 @@ func TestWALFlushedOnDBClose(t *testing.T) {
func TestWALSegmentSizeOptions(t *testing.T) {
tests := map[int]func(dbdir string, segmentSize int){
// Default Wal Size.
0: func(dbDir string, segmentSize int) {
0: func(dbDir string, _ int) {
filesAndDir, err := os.ReadDir(filepath.Join(dbDir, "wal"))
require.NoError(t, err)
files := []os.FileInfo{}
@ -1051,7 +1051,7 @@ func TestWALSegmentSizeOptions(t *testing.T) {
require.Greater(t, int64(segmentSize), lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name())
},
// Wal disabled.
-1: func(dbDir string, segmentSize int) {
-1: func(dbDir string, _ int) {
// Check that WAL dir is not there.
_, err := os.Stat(filepath.Join(dbDir, "wal"))
require.Error(t, err)
@ -1553,7 +1553,7 @@ func TestSizeRetention(t *testing.T) {
// Create a WAL checkpoint, and compare sizes.
first, last, err := wlog.Segments(db.Head().wal.Dir())
require.NoError(t, err)
_, err = wlog.Checkpoint(promslog.NewNopLogger(), db.Head().wal, first, last-1, func(x chunks.HeadSeriesRef) bool { return false }, 0)
_, err = wlog.Checkpoint(promslog.NewNopLogger(), db.Head().wal, first, last-1, func(_ chunks.HeadSeriesRef) bool { return false }, 0)
require.NoError(t, err)
blockSize = int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics.
walSize, err = db.Head().wal.Size()
@ -5549,7 +5549,7 @@ func TestQuerierOOOQuery(t *testing.T) {
sampleFunc func(ts int64) chunks.Sample
}{
"float": {
appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) {
appendFunc: func(app storage.Appender, ts int64, _ bool) (storage.SeriesRef, error) {
return app.Append(0, labels.FromStrings("foo", "bar1"), ts, float64(ts))
},
sampleFunc: func(ts int64) chunks.Sample {
@ -5582,7 +5582,7 @@ func TestQuerierOOOQuery(t *testing.T) {
},
"integer histogram counter resets": {
// Adding counter reset to all histograms means each histogram will have its own chunk.
appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) {
appendFunc: func(app storage.Appender, ts int64, _ bool) (storage.SeriesRef, error) {
h := tsdbutil.GenerateTestHistogram(ts)
h.CounterResetHint = histogram.CounterReset // For this scenario, ignore the counterReset argument.
return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil)
@ -5610,7 +5610,7 @@ func testQuerierOOOQuery(t *testing.T,
series1 := labels.FromStrings("foo", "bar1")
type filterFunc func(t int64) bool
defaultFilterFunc := func(t int64) bool { return true }
defaultFilterFunc := func(_ int64) bool { return true }
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample, filter filterFunc, counterReset bool) ([]chunks.Sample, int) {
@ -5865,7 +5865,7 @@ func TestChunkQuerierOOOQuery(t *testing.T) {
checkInUseBucket bool
}{
"float": {
appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) {
appendFunc: func(app storage.Appender, ts int64, _ bool) (storage.SeriesRef, error) {
return app.Append(0, labels.FromStrings("foo", "bar1"), ts, float64(ts))
},
sampleFunc: func(ts int64) chunks.Sample {
@ -5898,7 +5898,7 @@ func TestChunkQuerierOOOQuery(t *testing.T) {
},
"integer histogram counter resets": {
// Adding counter reset to all histograms means each histogram will have its own chunk.
appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) {
appendFunc: func(app storage.Appender, ts int64, _ bool) (storage.SeriesRef, error) {
h := tsdbutil.GenerateTestHistogram(ts)
h.CounterResetHint = histogram.CounterReset // For this scenario, ignore the counterReset argument.
return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil)
@ -5909,7 +5909,7 @@ func TestChunkQuerierOOOQuery(t *testing.T) {
},
"integer histogram with recode": {
// Histograms have increasing number of buckets so their chunks are recoded.
appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) {
appendFunc: func(app storage.Appender, ts int64, _ bool) (storage.SeriesRef, error) {
n := ts / time.Minute.Milliseconds()
return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nBucketHistogram(n), nil)
},
@ -5941,7 +5941,7 @@ func testChunkQuerierOOOQuery(t *testing.T,
series1 := labels.FromStrings("foo", "bar1")
type filterFunc func(t int64) bool
defaultFilterFunc := func(t int64) bool { return true }
defaultFilterFunc := func(_ int64) bool { return true }
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample, filter filterFunc, counterReset bool) ([]chunks.Sample, int) {
@ -6221,7 +6221,7 @@ func testOOONativeHistogramsWithCounterResets(t *testing.T, scenario sampleTypeS
opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds()
type resetFunc func(v int64) bool
defaultResetFunc := func(v int64) bool { return false }
defaultResetFunc := func(_ int64) bool { return false }
lbls := labels.FromStrings("foo", "bar1")
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
@ -9293,7 +9293,7 @@ func TestNewCompactorFunc(t *testing.T) {
opts := DefaultOptions()
block1 := ulid.MustNew(1, nil)
block2 := ulid.MustNew(2, nil)
opts.NewCompactorFunc = func(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) {
opts.NewCompactorFunc = func(_ context.Context, _ prometheus.Registerer, _ *slog.Logger, _ []int64, _ chunkenc.Pool, _ *Options) (Compactor, error) {
return &mockCompactorFn{
planFn: func() ([]string, error) {
return []string{block1.String(), block2.String()}, nil

View file

@ -20,7 +20,7 @@ import (
func DirSize(dir string) (int64, error) {
var size int64
err := filepath.Walk(dir, func(filePath string, info os.FileInfo, err error) error {
err := filepath.Walk(dir, func(_ string, info os.FileInfo, err error) error {
if err != nil {
return err
}

View file

@ -76,7 +76,7 @@ func copyFile(src, dest string) error {
func readDirs(src string) ([]string, error) {
var files []string
err := filepath.Walk(src, func(path string, f os.FileInfo, err error) error {
err := filepath.Walk(src, func(path string, _ os.FileInfo, _ error) error {
relativePath := strings.TrimPrefix(path, src)
if len(relativePath) > 0 {
files = append(files, relativePath)

View file

@ -27,6 +27,6 @@ func (s *memSeries) labels() labels.Labels {
}
// RebuildSymbolTable is a no-op when not using dedupelabels.
func (h *Head) RebuildSymbolTable(logger *slog.Logger) *labels.SymbolTable {
func (h *Head) RebuildSymbolTable(_ *slog.Logger) *labels.SymbolTable {
return nil
}

View file

@ -489,7 +489,7 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi
// oooChunk returns the chunk for the HeadChunkID by m-mapping it from the disk.
// It never returns the head OOO chunk.
func (s *memSeries) oooChunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, memChunkPool *sync.Pool) (chunk chunkenc.Chunk, maxTime int64, err error) {
func (s *memSeries) oooChunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, _ *sync.Pool) (chunk chunkenc.Chunk, maxTime int64, err error) {
// ix represents the index of chunk in the s.ooo.oooMmappedChunks slice. The chunk id's are
// incremented by 1 when new chunk is created, hence (id - firstOOOChunkID) gives the slice index.
ix := int(id) - int(s.ooo.firstOOOChunkID)

View file

@ -2500,7 +2500,7 @@ func TestMemSeriesIsolation(t *testing.T) {
return i
}
testIsolation := func(h *Head, i int) {
testIsolation := func(_ *Head, _ int) {
}
// Test isolation without restart of Head.
@ -5133,7 +5133,7 @@ func testWBLReplay(t *testing.T, scenario sampleTypeScenario) {
var expOOOSamples []chunks.Sample
l := labels.FromStrings("foo", "bar")
appendSample := func(mins int64, val float64, isOOO bool) {
appendSample := func(mins int64, _ float64, isOOO bool) {
app := h.Appender(context.Background())
_, s, err := scenario.appendFunc(app, l, mins*time.Minute.Milliseconds(), mins)
require.NoError(t, err)
@ -5896,7 +5896,7 @@ func TestCuttingNewHeadChunks(t *testing.T) {
}{
"float samples": {
numTotalSamples: 180,
floatValFunc: func(i int) float64 {
floatValFunc: func(_ int) float64 {
return 1.
},
expectedChks: []struct {

View file

@ -1473,7 +1473,7 @@ func (r *Reader) Close() error {
return r.c.Close()
}
func (r *Reader) lookupSymbol(ctx context.Context, o uint32) (string, error) {
func (r *Reader) lookupSymbol(_ context.Context, o uint32) (string, error) {
if s, ok := r.nameSymbols[o]; ok {
return s, nil
}

View file

@ -72,7 +72,7 @@ func TestIsolation(t *testing.T) {
func countOpenReads(iso *isolation) int {
count := 0
iso.TraverseOpenReads(func(s *isolationState) bool {
iso.TraverseOpenReads(func(_ *isolationState) bool {
count++
return true
})

View file

@ -484,15 +484,15 @@ func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *l
return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, 0, chks)
}
func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, _ string, _ ...*labels.Matcher) ([]string, error) {
return nil, errors.New("not implemented")
}
func (ir *OOOCompactionHeadIndexReader) LabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
func (ir *OOOCompactionHeadIndexReader) LabelValues(_ context.Context, _ string, _ ...*labels.Matcher) ([]string, error) {
return nil, errors.New("not implemented")
}
func (ir *OOOCompactionHeadIndexReader) PostingsForMatchers(_ context.Context, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) {
func (ir *OOOCompactionHeadIndexReader) PostingsForMatchers(_ context.Context, _ bool, _ ...*labels.Matcher) (index.Postings, error) {
return nil, errors.New("not implemented")
}
@ -504,7 +504,7 @@ func (ir *OOOCompactionHeadIndexReader) LabelValueFor(context.Context, storage.S
return "", errors.New("not implemented")
}
func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) {
func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(_ context.Context, _ index.Postings) ([]string, error) {
return nil, errors.New("not implemented")
}

View file

@ -77,12 +77,12 @@ func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, er
}, nil
}
func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, _ *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, err := q.index.SortedLabelValues(ctx, name, matchers...)
return res, nil, err
}
func (q *blockBaseQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (q *blockBaseQuerier) LabelNames(ctx context.Context, _ *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, err := q.index.LabelNames(ctx, matchers...)
return res, nil, err
}

View file

@ -104,7 +104,7 @@ type seriesSamples struct {
// Index: labels -> postings -> chunkMetas -> chunkRef.
// ChunkReader: ref -> vals.
func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkReader, int64, int64) {
sort.Slice(tc, func(i, j int) bool {
sort.Slice(tc, func(i, _ int) bool {
return labels.Compare(labels.FromMap(tc[i].lset), labels.FromMap(tc[i].lset)) < 0
})
@ -3318,7 +3318,7 @@ func (m mockMatcherIndex) LabelValueFor(context.Context, storage.SeriesRef, stri
return "", errors.New("label value for called")
}
func (m mockMatcherIndex) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) {
func (m mockMatcherIndex) LabelNamesFor(_ context.Context, _ index.Postings) ([]string, error) {
return nil, errors.New("label names for called")
}
@ -3326,15 +3326,15 @@ func (m mockMatcherIndex) Postings(context.Context, string, ...string) (index.Po
return index.EmptyPostings(), nil
}
func (m mockMatcherIndex) SortedPostings(p index.Postings) index.Postings {
func (m mockMatcherIndex) SortedPostings(_ index.Postings) index.Postings {
return index.EmptyPostings()
}
func (m mockMatcherIndex) ShardedPostings(ps index.Postings, shardIndex, shardCount uint64) index.Postings {
func (m mockMatcherIndex) ShardedPostings(ps index.Postings, _, _ uint64) index.Postings {
return ps
}
func (m mockMatcherIndex) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
func (m mockMatcherIndex) Series(_ storage.SeriesRef, _ *labels.ScratchBuilder, _ *[]chunks.Meta) error {
return nil
}
@ -3590,13 +3590,13 @@ func TestQueryWithDeletedHistograms(t *testing.T) {
"intCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
return tsdbutil.GenerateTestHistogram(int64(i)), nil
},
"intgauge": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
"intgauge": func(_ int) (*histogram.Histogram, *histogram.FloatHistogram) {
return tsdbutil.GenerateTestGaugeHistogram(rand.Int63() % 1000), nil
},
"floatCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
return nil, tsdbutil.GenerateTestFloatHistogram(int64(i))
},
"floatGauge": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
"floatGauge": func(_ int) (*histogram.Histogram, *histogram.FloatHistogram) {
return nil, tsdbutil.GenerateTestGaugeFloatHistogram(rand.Int63() % 1000)
},
}

View file

@ -204,7 +204,7 @@ type Decoder struct {
builder labels.ScratchBuilder
}
func NewDecoder(t *labels.SymbolTable) Decoder { // FIXME remove t
func NewDecoder(_ *labels.SymbolTable) Decoder { // FIXME remove t
return Decoder{builder: labels.NewScratchBuilder(0)}
}

View file

@ -399,7 +399,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
}
}
Checkpoint(promslog.NewNopLogger(), w, 0, 1, func(x chunks.HeadSeriesRef) bool { return true }, 0)
Checkpoint(promslog.NewNopLogger(), w, 0, 1, func(_ chunks.HeadSeriesRef) bool { return true }, 0)
w.Truncate(1)
// Write more records after checkpointing.
@ -490,7 +490,7 @@ func TestReadCheckpoint(t *testing.T) {
}
_, err = w.NextSegmentSync()
require.NoError(t, err)
_, err = Checkpoint(promslog.NewNopLogger(), w, 30, 31, func(x chunks.HeadSeriesRef) bool { return true }, 0)
_, err = Checkpoint(promslog.NewNopLogger(), w, 30, 31, func(_ chunks.HeadSeriesRef) bool { return true }, 0)
require.NoError(t, err)
require.NoError(t, w.Truncate(32))
@ -653,7 +653,7 @@ func TestCheckpointSeriesReset(t *testing.T) {
return wt.checkNumSeries() == seriesCount
}, 10*time.Second, 1*time.Second)
_, err = Checkpoint(promslog.NewNopLogger(), w, 2, 4, func(x chunks.HeadSeriesRef) bool { return true }, 0)
_, err = Checkpoint(promslog.NewNopLogger(), w, 2, 4, func(_ chunks.HeadSeriesRef) bool { return true }, 0)
require.NoError(t, err)
err = w.Truncate(5)

View file

@ -40,7 +40,7 @@ func setup() func() {
}
func getCompressionHandlerFunc() CompressionHandler {
hf := func(w http.ResponseWriter, r *http.Request) {
hf := func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte("Hello World!"))
}

View file

@ -314,7 +314,7 @@ func (m *rulesRetrieverMock) CreateRuleGroups() {
Appendable: storage,
Context: context.Background(),
Logger: promslog.NewNopLogger(),
NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {},
NotifyFunc: func(_ context.Context, _ string, _ ...*rules.Alert) {},
}
var r []rules.Rule
@ -951,7 +951,7 @@ func TestStats(t *testing.T) {
},
{
name: "custom handler with known value",
renderer: func(ctx context.Context, s *stats.Statistics, p string) stats.QueryStats {
renderer: func(_ context.Context, _ *stats.Statistics, p string) stats.QueryStats {
if p == "known" {
return testStats{"Custom Value"}
}
@ -4127,7 +4127,7 @@ func TestRespondSuccess_DefaultCodecCannotEncodeResponse(t *testing.T) {
}
func TestRespondError(t *testing.T) {
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
api := API{}
api.respondError(w, &apiError{errorTimeout, errors.New("message")}, "test")
}))
@ -4723,11 +4723,11 @@ type fakeEngine struct {
query fakeQuery
}
func (e *fakeEngine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) {
func (e *fakeEngine) NewInstantQuery(_ context.Context, _ storage.Queryable, _ promql.QueryOpts, _ string, _ time.Time) (promql.Query, error) {
return &e.query, nil
}
func (e *fakeEngine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error) {
func (e *fakeEngine) NewRangeQuery(_ context.Context, _ storage.Queryable, _ promql.QueryOpts, _ string, _, _ time.Time, _ time.Duration) (promql.Query, error) {
return &e.query, nil
}

View file

@ -158,15 +158,15 @@ type errorTestQueryable struct {
err error
}
func (t errorTestQueryable) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) {
func (t errorTestQueryable) ExemplarQuerier(_ context.Context) (storage.ExemplarQuerier, error) {
return nil, t.err
}
func (t errorTestQueryable) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) {
func (t errorTestQueryable) ChunkQuerier(_, _ int64) (storage.ChunkQuerier, error) {
return nil, t.err
}
func (t errorTestQueryable) Querier(mint, maxt int64) (storage.Querier, error) {
func (t errorTestQueryable) Querier(_, _ int64) (storage.Querier, error) {
if t.q != nil {
return t.q, nil
}
@ -190,7 +190,7 @@ func (t errorTestQuerier) Close() error {
return nil
}
func (t errorTestQuerier) Select(_ context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
func (t errorTestQuerier) Select(_ context.Context, _ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet {
if t.s != nil {
return t.s
}

View file

@ -441,7 +441,7 @@ func New(logger *slog.Logger, o *Options) *Handler {
router.Get("/consoles/*filepath", readyf(h.consoles))
serveReactApp := func(w http.ResponseWriter, r *http.Request) {
serveReactApp := func(w http.ResponseWriter, _ *http.Request) {
indexPath := reactAssetsRoot + "/index.html"
f, err := ui.Assets.Open(indexPath)
if err != nil {
@ -539,18 +539,18 @@ func New(logger *slog.Logger, o *Options) *Handler {
router.Get("/debug/*subpath", serveDebug)
router.Post("/debug/*subpath", serveDebug)
router.Get("/-/healthy", func(w http.ResponseWriter, r *http.Request) {
router.Get("/-/healthy", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "%s is Healthy.\n", o.AppName)
})
router.Head("/-/healthy", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
})
router.Get("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) {
router.Get("/-/ready", readyf(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "%s is Ready.\n", o.AppName)
}))
router.Head("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) {
router.Head("/-/ready", readyf(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
}))
@ -896,7 +896,7 @@ func (h *Handler) consolesPath() string {
}
func setPathWithPrefix(prefix string) func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return func(_ string, handler http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
handler(w, r.WithContext(httputil.ContextWithPath(r.Context(), prefix+r.URL.Path)))
}