Format Go source files using 'gofumpt -w -s -extra'

Part of #9557

Signed-off-by: Mateusz Gozdek <mgozdekof@gmail.com>
This commit is contained in:
Mateusz Gozdek 2021-10-22 10:06:44 +02:00 committed by Julien Pivotto
parent b1e8e8a0ca
commit 1a6c2283a3
130 changed files with 1697 additions and 1350 deletions

View file

@ -664,7 +664,7 @@ func main() {
)
// This is passed to ruleManager.Update().
var externalURL = cfg.web.ExternalURL.String()
externalURL := cfg.web.ExternalURL.String()
reloaders := []reloader{
{
@ -896,7 +896,6 @@ func main() {
return nil
}
}
},
func(err error) {
// Wait for any in-progress reloads to complete to avoid
@ -1146,6 +1145,7 @@ type safePromQLNoStepSubqueryInterval struct {
func durationToInt64Millis(d time.Duration) int64 {
return int64(d / time.Millisecond)
}
func (i *safePromQLNoStepSubqueryInterval) Set(ev model.Duration) {
i.value.Store(durationToInt64Millis(time.Duration(ev)))
}
@ -1159,7 +1159,7 @@ type reloader struct {
reloader func(*config.Config) error
}
func reloadConfig(filename string, expandExternalLabels bool, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) {
func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) {
start := time.Now()
timings := []interface{}{}
level.Info(logger).Log("msg", "Loading configuration file", "filename", filename)

View file

@ -35,10 +35,12 @@ import (
"github.com/prometheus/prometheus/rules"
)
var promPath = os.Args[0]
var promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml")
var agentConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus-agent.yml")
var promData = filepath.Join(os.TempDir(), "data")
var (
promPath = os.Args[0]
promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml")
agentConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus-agent.yml")
promData = filepath.Join(os.TempDir(), "data")
)
func TestMain(m *testing.M) {
for i, arg := range os.Args {

View file

@ -21,7 +21,7 @@ import (
"github.com/pkg/errors"
)
const filePerm = 0666
const filePerm = 0o666
type tarGzFileWriter struct {
tarWriter *tar.Writer

View file

@ -105,7 +105,6 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
// The next sample is not in this timerange, we can avoid parsing
// the file for this timerange.
continue
}
nextSampleTs = math.MaxInt64
@ -207,13 +206,11 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
return nil
}()
if err != nil {
return errors.Wrap(err, "process blocks")
}
}
return nil
}
func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) (err error) {

View file

@ -57,7 +57,6 @@ func debugWrite(cfg debugWriterConfig) error {
return errors.Wrap(err, "error writing into the archive")
}
}
}
if err := archiver.close(); err != nil {

View file

@ -257,7 +257,7 @@ func main() {
case tsdbDumpCmd.FullCommand():
os.Exit(checkErr(dumpSamples(*dumpPath, *dumpMinTime, *dumpMaxTime)))
//TODO(aSquare14): Work on adding support for custom block size.
// TODO(aSquare14): Work on adding support for custom block size.
case openMetricsImportCmd.FullCommand():
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))
@ -560,7 +560,6 @@ func checkDuplicates(groups []rulefmt.RuleGroup) []compareRuleType {
var rules compareRuleTypes
for _, group := range groups {
for _, rule := range group.Rules {
rules = append(rules, compareRuleType{
metric: ruleMetric(rule),
@ -774,7 +773,7 @@ func QuerySeries(url *url.URL, matchers []string, start, end string, p printer)
}
// QueryLabels queries for label values against a Prometheus server.
func QueryLabels(url *url.URL, name string, start, end string, p printer) int {
func QueryLabels(url *url.URL, name, start, end string, p printer) int {
if url.Scheme == "" {
url.Scheme = "http"
}
@ -952,11 +951,13 @@ type promqlPrinter struct{}
func (p *promqlPrinter) printValue(v model.Value) {
fmt.Println(v)
}
func (p *promqlPrinter) printSeries(val []model.LabelSet) {
for _, v := range val {
fmt.Println(v)
}
}
func (p *promqlPrinter) printLabelValues(val model.LabelValues) {
for _, v := range val {
fmt.Println(v)
@ -969,10 +970,12 @@ func (j *jsonPrinter) printValue(v model.Value) {
//nolint:errcheck
json.NewEncoder(os.Stdout).Encode(v)
}
func (j *jsonPrinter) printSeries(v []model.LabelSet) {
//nolint:errcheck
json.NewEncoder(os.Stdout).Encode(v)
}
func (j *jsonPrinter) printLabelValues(v model.LabelValues) {
//nolint:errcheck
json.NewEncoder(os.Stdout).Encode(v)
@ -980,7 +983,7 @@ func (j *jsonPrinter) printLabelValues(v model.LabelValues) {
// importRules backfills recording rules from the files provided. The output are blocks of data
// at the outputDir location.
func importRules(url *url.URL, start, end, outputDir string, evalInterval time.Duration, maxBlockDuration time.Duration, files ...string) error {
func importRules(url *url.URL, start, end, outputDir string, evalInterval, maxBlockDuration time.Duration, files ...string) error {
ctx := context.Background()
var stime, etime time.Time
var err error

View file

@ -54,7 +54,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
twentyFourHourDuration, _ = time.ParseDuration("24h")
)
var testCases = []struct {
testCases := []struct {
name string
runcount int
maxBlockDuration time.Duration
@ -192,7 +192,7 @@ func createSingleRuleTestFiles(path string) error {
labels:
testlabel11: testlabelvalue11
`
return ioutil.WriteFile(path, []byte(recordingRules), 0777)
return ioutil.WriteFile(path, []byte(recordingRules), 0o777)
}
func createMultiRuleTestFiles(path string) error {
@ -212,7 +212,7 @@ func createMultiRuleTestFiles(path string) error {
labels:
testlabel11: testlabelvalue13
`
return ioutil.WriteFile(path, []byte(recordingRules), 0777)
return ioutil.WriteFile(path, []byte(recordingRules), 0o777)
}
// TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics
@ -244,7 +244,7 @@ func TestBackfillLabels(t *testing.T) {
labels:
name1: value-from-rule
`
require.NoError(t, ioutil.WriteFile(path, []byte(recordingRules), 0777))
require.NoError(t, ioutil.WriteFile(path, []byte(recordingRules), 0o777))
errs := ruleImporter.loadGroups(ctx, []string{path})
for _, err := range errs {
require.NoError(t, err)

View file

@ -46,21 +46,24 @@ func TestSDCheckResult(t *testing.T) {
}
expectedSDCheckResult := []sdCheckResult{
sdCheckResult{
{
DiscoveredLabels: labels.Labels{
labels.Label{Name: "__address__", Value: "localhost:8080"},
labels.Label{Name: "__scrape_interval__", Value: "0s"},
labels.Label{Name: "__scrape_timeout__", Value: "0s"},
labels.Label{Name: "foo", Value: "bar"}},
labels.Label{Name: "foo", Value: "bar"},
},
Labels: labels.Labels{
labels.Label{Name: "__address__", Value: "localhost:8080"},
labels.Label{Name: "__scrape_interval__", Value: "0s"},
labels.Label{Name: "__scrape_timeout__", Value: "0s"},
labels.Label{Name: "foo", Value: "bar"},
labels.Label{Name: "instance", Value: "localhost:8080"},
labels.Label{Name: "newfoo", Value: "bar"}},
labels.Label{Name: "newfoo", Value: "bar"},
},
Error: nil,
}}
},
}
require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig))
}

View file

@ -17,7 +17,6 @@ import (
"bufio"
"context"
"fmt"
"github.com/prometheus/prometheus/tsdb/index"
"io"
"io/ioutil"
"math"
@ -32,6 +31,8 @@ import (
"text/tabwriter"
"time"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/alecthomas/units"
"github.com/go-kit/log"
"github.com/pkg/errors"
@ -78,7 +79,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err
if err := os.RemoveAll(b.outPath); err != nil {
return err
}
if err := os.MkdirAll(b.outPath, 0777); err != nil {
if err := os.MkdirAll(b.outPath, 0o777); err != nil {
return err
}
@ -589,7 +590,7 @@ func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err err
histogram := make([]int, nBuckets)
totalChunks := 0
for postingsr.Next() {
var lbsl = labels.Labels{}
lbsl := labels.Labels{}
var chks []chunks.Meta
if err := indexr.Series(postingsr.At(), &lbsl, &chks); err != nil {
return err
@ -671,14 +672,14 @@ func checkErr(err error) int {
return 0
}
func backfillOpenMetrics(path string, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int {
func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int {
inputFile, err := fileutil.OpenMmapFile(path)
if err != nil {
return checkErr(err)
}
defer inputFile.Close()
if err := os.MkdirAll(outputDir, 0777); err != nil {
if err := os.MkdirAll(outputDir, 0o777); err != nil {
return checkErr(errors.Wrap(err, "create output dir"))
}

View file

@ -387,7 +387,6 @@ Outer:
// seriesLoadingString returns the input series in PromQL notation.
func (tg *testGroup) seriesLoadingString() string {
result := fmt.Sprintf("load %v\n", shortDuration(tg.Interval))
for _, is := range tg.InputSeries {
result += fmt.Sprintf(" %v %v\n", is.Series, is.Values)

View file

@ -99,7 +99,7 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
}
// LoadFile parses the given YAML file into a Config.
func LoadFile(filename string, agentMode bool, expandExternalLabels bool, logger log.Logger) (*Config, error) {
func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) {
content, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err

View file

@ -784,17 +784,19 @@ var expectedConf = &Config{
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{&openstack.SDConfig{
Role: "instance",
Region: "RegionOne",
Port: 80,
Availability: "public",
RefreshInterval: model.Duration(60 * time.Second),
TLSConfig: config.TLSConfig{
CAFile: "testdata/valid_ca_file",
CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
}},
ServiceDiscoveryConfigs: discovery.Configs{
&openstack.SDConfig{
Role: "instance",
Region: "RegionOne",
Port: 80,
Availability: "public",
RefreshInterval: model.Duration(60 * time.Second),
TLSConfig: config.TLSConfig{
CAFile: "testdata/valid_ca_file",
CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
},
},
},
},
{
@ -808,22 +810,23 @@ var expectedConf = &Config{
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{&puppetdb.SDConfig{
URL: "https://puppetserver/",
Query: "resources { type = \"Package\" and title = \"httpd\" }",
IncludeParameters: true,
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
HTTPClientConfig: config.HTTPClientConfig{
FollowRedirects: true,
TLSConfig: config.TLSConfig{
CAFile: "testdata/valid_ca_file",
CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
ServiceDiscoveryConfigs: discovery.Configs{
&puppetdb.SDConfig{
URL: "https://puppetserver/",
Query: "resources { type = \"Package\" and title = \"httpd\" }",
IncludeParameters: true,
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
HTTPClientConfig: config.HTTPClientConfig{
FollowRedirects: true,
TLSConfig: config.TLSConfig{
CAFile: "testdata/valid_ca_file",
CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
},
},
},
},
},
},
{
JobName: "hetzner",
@ -1086,170 +1089,224 @@ var expectedErrors = []struct {
{
filename: "jobname.bad.yml",
errMsg: `job_name is empty`,
}, {
},
{
filename: "jobname_dup.bad.yml",
errMsg: `found multiple scrape configs with job name "prometheus"`,
}, {
},
{
filename: "scrape_interval.bad.yml",
errMsg: `scrape timeout greater than scrape interval`,
}, {
},
{
filename: "labelname.bad.yml",
errMsg: `"not$allowed" is not a valid label name`,
}, {
},
{
filename: "labelname2.bad.yml",
errMsg: `"not:allowed" is not a valid label name`,
}, {
},
{
filename: "labelvalue.bad.yml",
errMsg: `"\xff" is not a valid label value`,
}, {
},
{
filename: "regex.bad.yml",
errMsg: "error parsing regexp",
}, {
},
{
filename: "modulus_missing.bad.yml",
errMsg: "relabel configuration for hashmod requires non-zero modulus",
}, {
},
{
filename: "labelkeep.bad.yml",
errMsg: "labelkeep action requires only 'regex', and no other fields",
}, {
},
{
filename: "labelkeep2.bad.yml",
errMsg: "labelkeep action requires only 'regex', and no other fields",
}, {
},
{
filename: "labelkeep3.bad.yml",
errMsg: "labelkeep action requires only 'regex', and no other fields",
}, {
},
{
filename: "labelkeep4.bad.yml",
errMsg: "labelkeep action requires only 'regex', and no other fields",
}, {
},
{
filename: "labelkeep5.bad.yml",
errMsg: "labelkeep action requires only 'regex', and no other fields",
}, {
},
{
filename: "labeldrop.bad.yml",
errMsg: "labeldrop action requires only 'regex', and no other fields",
}, {
},
{
filename: "labeldrop2.bad.yml",
errMsg: "labeldrop action requires only 'regex', and no other fields",
}, {
},
{
filename: "labeldrop3.bad.yml",
errMsg: "labeldrop action requires only 'regex', and no other fields",
}, {
},
{
filename: "labeldrop4.bad.yml",
errMsg: "labeldrop action requires only 'regex', and no other fields",
}, {
},
{
filename: "labeldrop5.bad.yml",
errMsg: "labeldrop action requires only 'regex', and no other fields",
}, {
},
{
filename: "labelmap.bad.yml",
errMsg: "\"l-$1\" is invalid 'replacement' for labelmap action",
}, {
},
{
filename: "rules.bad.yml",
errMsg: "invalid rule file path",
}, {
},
{
filename: "unknown_attr.bad.yml",
errMsg: "field consult_sd_configs not found in type",
}, {
},
{
filename: "bearertoken.bad.yml",
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
}, {
},
{
filename: "bearertoken_basicauth.bad.yml",
errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured",
}, {
},
{
filename: "kubernetes_http_config_without_api_server.bad.yml",
errMsg: "to use custom HTTP client configuration please provide the 'api_server' URL explicitly",
}, {
},
{
filename: "kubernetes_kubeconfig_with_apiserver.bad.yml",
errMsg: "cannot use 'kubeconfig_file' and 'api_server' simultaneously",
}, {
},
{
filename: "kubernetes_kubeconfig_with_http_config.bad.yml",
errMsg: "cannot use a custom HTTP client configuration together with 'kubeconfig_file'",
},
{
filename: "kubernetes_bearertoken.bad.yml",
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
}, {
},
{
filename: "kubernetes_role.bad.yml",
errMsg: "role",
}, {
},
{
filename: "kubernetes_selectors_endpoints.bad.yml",
errMsg: "endpoints role supports only pod, service, endpoints selectors",
}, {
},
{
filename: "kubernetes_selectors_ingress.bad.yml",
errMsg: "ingress role supports only ingress selectors",
}, {
},
{
filename: "kubernetes_selectors_node.bad.yml",
errMsg: "node role supports only node selectors",
}, {
},
{
filename: "kubernetes_selectors_pod.bad.yml",
errMsg: "pod role supports only pod selectors",
}, {
},
{
filename: "kubernetes_selectors_service.bad.yml",
errMsg: "service role supports only service selectors",
}, {
},
{
filename: "kubernetes_namespace_discovery.bad.yml",
errMsg: "field foo not found in type kubernetes.plain",
}, {
},
{
filename: "kubernetes_selectors_duplicated_role.bad.yml",
errMsg: "duplicated selector role: pod",
}, {
},
{
filename: "kubernetes_selectors_incorrect_selector.bad.yml",
errMsg: "invalid selector: 'metadata.status-Running'; can't understand 'metadata.status-Running'",
}, {
},
{
filename: "kubernetes_bearertoken_basicauth.bad.yml",
errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured",
}, {
},
{
filename: "kubernetes_authorization_basicauth.bad.yml",
errMsg: "at most one of basic_auth, oauth2 & authorization must be configured",
}, {
},
{
filename: "marathon_no_servers.bad.yml",
errMsg: "marathon_sd: must contain at least one Marathon server",
}, {
},
{
filename: "marathon_authtoken_authtokenfile.bad.yml",
errMsg: "marathon_sd: at most one of auth_token & auth_token_file must be configured",
}, {
},
{
filename: "marathon_authtoken_basicauth.bad.yml",
errMsg: "marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured",
}, {
},
{
filename: "marathon_authtoken_bearertoken.bad.yml",
errMsg: "marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured",
}, {
},
{
filename: "marathon_authtoken_authorization.bad.yml",
errMsg: "marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured",
}, {
},
{
filename: "openstack_role.bad.yml",
errMsg: "unknown OpenStack SD role",
}, {
},
{
filename: "openstack_availability.bad.yml",
errMsg: "unknown availability invalid, must be one of admin, internal or public",
}, {
},
{
filename: "url_in_targetgroup.bad.yml",
errMsg: "\"http://bad\" is not a valid hostname",
}, {
},
{
filename: "target_label_missing.bad.yml",
errMsg: "relabel configuration for replace action requires 'target_label' value",
}, {
},
{
filename: "target_label_hashmod_missing.bad.yml",
errMsg: "relabel configuration for hashmod action requires 'target_label' value",
}, {
},
{
filename: "unknown_global_attr.bad.yml",
errMsg: "field nonexistent_field not found in type config.plain",
}, {
},
{
filename: "remote_read_url_missing.bad.yml",
errMsg: `url for remote_read is empty`,
}, {
},
{
filename: "remote_write_header.bad.yml",
errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`,
}, {
},
{
filename: "remote_read_header.bad.yml",
errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`,
}, {
},
{
filename: "remote_write_authorization_header.bad.yml",
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter`,
}, {
},
{
filename: "remote_write_url_missing.bad.yml",
errMsg: `url for remote_write is empty`,
}, {
},
{
filename: "remote_write_dup.bad.yml",
errMsg: `found multiple remote write configs with job name "queue1"`,
}, {
},
{
filename: "remote_read_dup.bad.yml",
errMsg: `found multiple remote read configs with job name "queue1"`,
},

View file

@ -63,13 +63,11 @@ const (
ec2LabelSeparator = ","
)
var (
// DefaultEC2SDConfig is the default EC2 SD configuration.
DefaultEC2SDConfig = EC2SDConfig{
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
}
)
// DefaultEC2SDConfig is the default EC2 SD configuration.
var DefaultEC2SDConfig = EC2SDConfig{
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
}
func init() {
discovery.RegisterConfig(&EC2SDConfig{})

View file

@ -53,13 +53,11 @@ const (
lightsailLabelSeparator = ","
)
var (
// DefaultLightsailSDConfig is the default Lightsail SD configuration.
DefaultLightsailSDConfig = LightsailSDConfig{
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
}
)
// DefaultLightsailSDConfig is the default Lightsail SD configuration.
var DefaultLightsailSDConfig = LightsailSDConfig{
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
}
func init() {
discovery.RegisterConfig(&LightsailSDConfig{})

View file

@ -339,7 +339,6 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
// Get the IP address information via separate call to the network provider.
for _, nicID := range vm.NetworkInterfaces {
networkInterface, err := client.getNetworkInterfaceByID(ctx, nicID)
if err != nil {
level.Error(d.logger).Log("msg", "Unable to get network interface", "name", nicID, "err", err)
ch <- target{labelSet: nil, err: err}
@ -437,9 +436,8 @@ func (client *azureClient) getScaleSets(ctx context.Context) ([]compute.VirtualM
func (client *azureClient) getScaleSetVMs(ctx context.Context, scaleSet compute.VirtualMachineScaleSet) ([]virtualMachine, error) {
var vms []virtualMachine
//TODO do we really need to fetch the resourcegroup this way?
// TODO do we really need to fetch the resourcegroup this way?
r, err := newAzureResourceFromID(*scaleSet.ID, nil)
if err != nil {
return nil, errors.Wrap(err, "could not parse scale set ID")
}

View file

@ -54,7 +54,7 @@ const (
healthLabel = model.MetaLabelPrefix + "consul_health"
// serviceAddressLabel is the name of the label containing the (optional) service address.
serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address"
//servicePortLabel is the name of the label containing the service port.
// servicePortLabel is the name of the label containing the service port.
servicePortLabel = model.MetaLabelPrefix + "consul_service_port"
// datacenterLabel is the name of the label containing the datacenter ID.
datacenterLabel = model.MetaLabelPrefix + "consul_dc"
@ -530,7 +530,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
for _, serviceNode := range serviceNodes {
// We surround the separated list with the separator as well. This way regular expressions
// in relabeling rules don't have to consider tag positions.
var tags = srv.tagSeparator + strings.Join(serviceNode.Service.Tags, srv.tagSeparator) + srv.tagSeparator
tags := srv.tagSeparator + strings.Join(serviceNode.Service.Tags, srv.tagSeparator) + srv.tagSeparator
// If the service address is not empty it should be used instead of the node address
// since the service may be registered remotely through a different node.

View file

@ -37,9 +37,9 @@ func TestMain(m *testing.M) {
func TestConfiguredService(t *testing.T) {
conf := &SDConfig{
Services: []string{"configuredServiceName"}}
Services: []string{"configuredServiceName"},
}
consulDiscovery, err := NewDiscovery(conf, nil)
if err != nil {
t.Errorf("Unexpected error when initializing discovery %v", err)
}
@ -57,7 +57,6 @@ func TestConfiguredServiceWithTag(t *testing.T) {
ServiceTags: []string{"http"},
}
consulDiscovery, err := NewDiscovery(conf, nil)
if err != nil {
t.Errorf("Unexpected error when initializing discovery %v", err)
}
@ -153,7 +152,6 @@ func TestConfiguredServiceWithTags(t *testing.T) {
for _, tc := range cases {
consulDiscovery, err := NewDiscovery(tc.conf, nil)
if err != nil {
t.Errorf("Unexpected error when initializing discovery %v", err)
}
@ -168,7 +166,6 @@ func TestConfiguredServiceWithTags(t *testing.T) {
func TestNonConfiguredService(t *testing.T) {
conf := &SDConfig{}
consulDiscovery, err := NewDiscovery(conf, nil)
if err != nil {
t.Errorf("Unexpected error when initializing discovery %v", err)
}

View file

@ -75,7 +75,8 @@ func (m *SDMock) HandleDropletsList() {
panic(err)
}
}
fmt.Fprint(w, []string{`
fmt.Fprint(w, []string{
`
{
"droplets": [
{

View file

@ -73,7 +73,7 @@ func (t *testRunner) copyFile(src string) string {
}
// copyFileTo atomically copies a file with a different name to the runner's directory.
func (t *testRunner) copyFileTo(src string, name string) string {
func (t *testRunner) copyFileTo(src, name string) string {
t.Helper()
newf, err := ioutil.TempFile(t.dir, "")
@ -95,7 +95,7 @@ func (t *testRunner) copyFileTo(src string, name string) string {
}
// writeString writes atomically a string to a file.
func (t *testRunner) writeString(file string, data string) {
func (t *testRunner) writeString(file, data string) {
t.Helper()
newf, err := ioutil.TempFile(t.dir, "")
@ -477,6 +477,7 @@ func TestRemoveFile(t *testing.T) {
},
{
Source: fileSource(sdFile, 1),
}},
},
},
)
}

View file

@ -78,6 +78,7 @@ func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, er
)
return d, nil
}
func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
servers, err := d.client.Server.All(ctx)
if err != nil {

View file

@ -489,8 +489,10 @@ func (m *SDMock) HandleHcloudNetworks() {
})
}
const robotTestUsername = "my-hetzner"
const robotTestPassword = "my-password"
const (
robotTestUsername = "my-hetzner"
robotTestPassword = "my-password"
)
// HandleRobotServers mocks the robot servers list endpoint.
func (m *SDMock) HandleRobotServers() {

View file

@ -70,6 +70,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro
return d, nil
}
func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
req, err := http.NewRequest("GET", d.endpoint+"/server", nil)
if err != nil {

View file

@ -60,7 +60,6 @@ func TestHTTPValidRefresh(t *testing.T) {
},
}
require.Equal(t, tgs, expectedTargets)
}
func TestHTTPInvalidCode(t *testing.T) {
@ -398,5 +397,4 @@ func TestSourceDisappeared(t *testing.T) {
require.Equal(t, test.expectedTargets[i], tgs)
}
}
}

View file

@ -121,9 +121,11 @@ func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer
clientGoRequestLatencyMetricVec,
)
}
func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code string, method string, host string) {
func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code, method, host string) {
clientGoRequestResultMetricVec.WithLabelValues(code).Inc()
}
func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) {
clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds())
}
@ -146,21 +148,27 @@ func (f *clientGoWorkqueueMetricsProvider) Register(registerer prometheus.Regist
func (f *clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric {
return clientGoWorkqueueDepthMetricVec.WithLabelValues(name)
}
func (f *clientGoWorkqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric {
return clientGoWorkqueueAddsMetricVec.WithLabelValues(name)
}
func (f *clientGoWorkqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric {
return clientGoWorkqueueLatencyMetricVec.WithLabelValues(name)
}
func (f *clientGoWorkqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric {
return clientGoWorkqueueWorkDurationMetricVec.WithLabelValues(name)
}
func (f *clientGoWorkqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric {
return clientGoWorkqueueUnfinishedWorkSecondsMetricVec.WithLabelValues(name)
}
func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric {
return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name)
}
func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric {
// Retries are not used so the metric is omitted.
return noopMetric{}

View file

@ -27,7 +27,7 @@ import (
)
func makeEndpoints() *v1.Endpoints {
var nodeName = "foobar"
nodeName := "foobar"
return &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",

View file

@ -171,13 +171,15 @@ type hasSynced interface {
hasSynced() bool
}
var _ hasSynced = &Discovery{}
var _ hasSynced = &Node{}
var _ hasSynced = &Endpoints{}
var _ hasSynced = &EndpointSlice{}
var _ hasSynced = &Ingress{}
var _ hasSynced = &Pod{}
var _ hasSynced = &Service{}
var (
_ hasSynced = &Discovery{}
_ hasSynced = &Node{}
_ hasSynced = &Endpoints{}
_ hasSynced = &EndpointSlice{}
_ hasSynced = &Ingress{}
_ hasSynced = &Pod{}
_ hasSynced = &Service{}
)
func (d *Discovery) hasSynced() bool {
d.RLock()

View file

@ -25,7 +25,7 @@ import (
"github.com/prometheus/prometheus/discovery/targetgroup"
)
func makeNode(name, address string, labels map[string]string, annotations map[string]string) *v1.Node {
func makeNode(name, address string, labels, annotations map[string]string) *v1.Node {
return &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: name,

View file

@ -37,7 +37,6 @@ func TestMain(m *testing.M) {
// TestTargetUpdatesOrder checks that the target updates are received in the expected order.
func TestTargetUpdatesOrder(t *testing.T) {
// The order by which the updates are send is determined by the interval passed to the mock discovery adapter
// Final targets array is ordered alphabetically by the name of the discoverer.
// For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge.
@ -117,7 +116,8 @@ func TestTargetUpdatesOrder(t *testing.T) {
{
Source: "tp1_group2",
Targets: []model.LabelSet{{"__instance__": "2"}},
}},
},
},
},
},
},
@ -729,14 +729,12 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou
match := false
var mergedTargets string
for _, targetGroup := range tSets[poolKey] {
for _, l := range targetGroup.Targets {
mergedTargets = mergedTargets + " " + l.String()
if l.String() == label {
match = true
}
}
}
if match != present {
msg := ""
@ -926,7 +924,6 @@ func TestGaugeFailedConfigs(t *testing.T) {
if failedCount != 0 {
t.Fatalf("Expected to get no failed config, got: %v", failedCount)
}
}
func TestCoordinationWithReceiver(t *testing.T) {

View file

@ -37,7 +37,6 @@ func TestMain(m *testing.M) {
// TestTargetUpdatesOrder checks that the target updates are received in the expected order.
func TestTargetUpdatesOrder(t *testing.T) {
// The order by which the updates are send is determined by the interval passed to the mock discovery adapter
// Final targets array is ordered alphabetically by the name of the discoverer.
// For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge.
@ -117,7 +116,8 @@ func TestTargetUpdatesOrder(t *testing.T) {
{
Source: "tp1_group2",
Targets: []model.LabelSet{{"__instance__": "2"}},
}},
},
},
},
},
},
@ -719,7 +719,7 @@ func staticConfig(addrs ...string) StaticConfig {
return cfg
}
func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group, key string, label string, present bool) {
func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group, key, label string, present bool) {
t.Helper()
if _, ok := tGroups[key]; !ok {
t.Fatalf("'%s' should be present in Group map keys: %v", key, tGroups)
@ -734,7 +734,6 @@ func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group,
match = true
}
}
}
if match != present {
msg := ""
@ -755,14 +754,12 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou
match := false
var mergedTargets string
for _, targetGroup := range tSets[poolKey] {
for _, l := range targetGroup.Targets {
mergedTargets = mergedTargets + " " + l.String()
if l.String() == label {
match = true
}
}
}
if match != present {
msg := ""
@ -1062,7 +1059,6 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil {
t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls)
}
}
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
@ -1179,7 +1175,6 @@ func TestGaugeFailedConfigs(t *testing.T) {
if failedCount != 0 {
t.Fatalf("Expected to get no failed config, got: %v", failedCount)
}
}
func TestCoordinationWithReceiver(t *testing.T) {

View file

@ -478,7 +478,6 @@ func targetsForApp(app *app) []model.LabelSet {
// Generate a target endpoint string in host:port format.
func targetEndpoint(task *task, port uint32, containerNet bool) string {
var host string
// Use the task's ipAddress field when it's in a container network
@ -493,7 +492,6 @@ func targetEndpoint(task *task, port uint32, containerNet bool) string {
// Get a list of ports and a list of labels from a PortMapping.
func extractPortMapping(portMappings []portMapping, containerNet bool) ([]uint32, []map[string]string) {
ports := make([]uint32, len(portMappings))
labels := make([]map[string]string, len(portMappings))

View file

@ -60,9 +60,7 @@ func TestMarathonSDHandleError(t *testing.T) {
}
func TestMarathonSDEmptyList(t *testing.T) {
var (
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return &appList{}, nil }
)
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return &appList{}, nil }
tgs, err := testUpdateServices(client)
if err != nil {
t.Fatalf("Got error: %s", err)
@ -99,11 +97,9 @@ func marathonTestAppList(labels map[string]string, runningTasks int) *appList {
}
func TestMarathonSDSendGroup(t *testing.T) {
var (
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestAppList(marathonValidLabel, 1), nil
}
)
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestAppList(marathonValidLabel, 1), nil
}
tgs, err := testUpdateServices(client)
if err != nil {
t.Fatalf("Got error: %s", err)
@ -195,11 +191,9 @@ func marathonTestAppListWithMultiplePorts(labels map[string]string, runningTasks
}
func TestMarathonSDSendGroupWithMultiplePort(t *testing.T) {
var (
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestAppListWithMultiplePorts(marathonValidLabel, 1), nil
}
)
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestAppListWithMultiplePorts(marathonValidLabel, 1), nil
}
tgs, err := testUpdateServices(client)
if err != nil {
t.Fatalf("Got error: %s", err)
@ -254,11 +248,9 @@ func marathonTestZeroTaskPortAppList(labels map[string]string, runningTasks int)
}
func TestMarathonZeroTaskPorts(t *testing.T) {
var (
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestZeroTaskPortAppList(marathonValidLabel, 1), nil
}
)
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestZeroTaskPortAppList(marathonValidLabel, 1), nil
}
tgs, err := testUpdateServices(client)
if err != nil {
t.Fatalf("Got error: %s", err)
@ -331,11 +323,9 @@ func marathonTestAppListWithPortDefinitions(labels map[string]string, runningTas
}
func TestMarathonSDSendGroupWithPortDefinitions(t *testing.T) {
var (
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestAppListWithPortDefinitions(marathonValidLabel, 1), nil
}
)
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestAppListWithPortDefinitions(marathonValidLabel, 1), nil
}
tgs, err := testUpdateServices(client)
if err != nil {
t.Fatalf("Got error: %s", err)
@ -403,11 +393,9 @@ func marathonTestAppListWithPortDefinitionsRequirePorts(labels map[string]string
}
func TestMarathonSDSendGroupWithPortDefinitionsRequirePorts(t *testing.T) {
var (
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestAppListWithPortDefinitionsRequirePorts(marathonValidLabel, 1), nil
}
)
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestAppListWithPortDefinitionsRequirePorts(marathonValidLabel, 1), nil
}
tgs, err := testUpdateServices(client)
if err != nil {
t.Fatalf("Got error: %s", err)
@ -470,11 +458,9 @@ func marathonTestAppListWithPorts(labels map[string]string, runningTasks int) *a
}
func TestMarathonSDSendGroupWithPorts(t *testing.T) {
var (
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestAppListWithPorts(marathonValidLabel, 1), nil
}
)
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestAppListWithPorts(marathonValidLabel, 1), nil
}
tgs, err := testUpdateServices(client)
if err != nil {
t.Fatalf("Got error: %s", err)
@ -546,11 +532,9 @@ func marathonTestAppListWithContainerPortMappings(labels map[string]string, runn
}
func TestMarathonSDSendGroupWithContainerPortMappings(t *testing.T) {
var (
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestAppListWithContainerPortMappings(marathonValidLabel, 1), nil
}
)
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestAppListWithContainerPortMappings(marathonValidLabel, 1), nil
}
tgs, err := testUpdateServices(client)
if err != nil {
t.Fatalf("Got error: %s", err)
@ -622,11 +606,9 @@ func marathonTestAppListWithDockerContainerPortMappings(labels map[string]string
}
func TestMarathonSDSendGroupWithDockerContainerPortMappings(t *testing.T) {
var (
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestAppListWithDockerContainerPortMappings(marathonValidLabel, 1), nil
}
)
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestAppListWithDockerContainerPortMappings(marathonValidLabel, 1), nil
}
tgs, err := testUpdateServices(client)
if err != nil {
t.Fatalf("Got error: %s", err)
@ -702,11 +684,9 @@ func marathonTestAppListWithContainerNetworkAndPortMappings(labels map[string]st
}
func TestMarathonSDSendGroupWithContainerNetworkAndPortMapping(t *testing.T) {
var (
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestAppListWithContainerNetworkAndPortMappings(marathonValidLabel, 1), nil
}
)
client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
return marathonTestAppListWithContainerNetworkAndPortMappings(marathonValidLabel, 1), nil
}
tgs, err := testUpdateServices(client)
if err != nil {
t.Fatalf("Got error: %s", err)

View file

@ -51,8 +51,10 @@ type HypervisorDiscovery struct {
// newHypervisorDiscovery returns a new hypervisor discovery.
func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions,
port int, region string, availability gophercloud.Availability, l log.Logger) *HypervisorDiscovery {
return &HypervisorDiscovery{provider: provider, authOpts: opts,
region: region, port: port, availability: availability, logger: l}
return &HypervisorDiscovery{
provider: provider, authOpts: opts,
region: region, port: port, availability: availability, logger: l,
}
}
func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {

View file

@ -47,7 +47,6 @@ func (s *OpenstackSDHypervisorTestSuite) openstackAuthSuccess() (refresher, erro
}
func TestOpenstackSDHypervisorRefresh(t *testing.T) {
mock := &OpenstackSDHypervisorTestSuite{}
mock.SetupTest(t)

View file

@ -63,8 +63,10 @@ func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gopherclou
if l == nil {
l = log.NewNopLogger()
}
return &InstanceDiscovery{provider: provider, authOpts: opts,
region: region, port: port, allTenants: allTenants, availability: availability, logger: l}
return &InstanceDiscovery{
provider: provider, authOpts: opts,
region: region, port: port, allTenants: allTenants, availability: availability, logger: l,
}
}
type floatingIPKey struct {

View file

@ -51,7 +51,6 @@ func (s *OpenstackSDInstanceTestSuite) openstackAuthSuccess() (refresher, error)
}
func TestOpenstackSDInstanceRefresh(t *testing.T) {
mock := &OpenstackSDInstanceTestSuite{}
mock.SetupTest(t)

View file

@ -54,7 +54,7 @@ func testMethod(t *testing.T, r *http.Request, expected string) {
}
}
func testHeader(t *testing.T, r *http.Request, header string, expected string) {
func testHeader(t *testing.T, r *http.Request, header, expected string) {
if actual := r.Header.Get(header); expected != actual {
t.Errorf("Header %s = %s, expected %s", header, actual, expected)
}

View file

@ -145,7 +145,6 @@ func NewDiscovery(conf *SDConfig, l log.Logger) (*refresh.Discovery, error) {
time.Duration(conf.RefreshInterval),
r.refresh,
), nil
}
func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) {

View file

@ -173,8 +173,7 @@ func init() {
// Discovery periodically performs Scaleway requests. It implements
// the Discoverer interface.
type Discovery struct {
}
type Discovery struct{}
func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) {
r, err := newRefresher(conf)

View file

@ -38,7 +38,8 @@ func TestTargetGroupStrictJsonUnmarshal(t *testing.T) {
expectedReply: nil,
expectedGroup: Group{Targets: []model.LabelSet{
{"__address__": "localhost:9090"},
{"__address__": "localhost:9091"}}, Labels: model.LabelSet{"my": "label"}},
{"__address__": "localhost:9091"},
}, Labels: model.LabelSet{"my": "label"}},
},
{
json: ` {"label": {},"targets": []}`,
@ -56,7 +57,6 @@ func TestTargetGroupStrictJsonUnmarshal(t *testing.T) {
require.Equal(t, test.expectedReply, actual)
require.Equal(t, test.expectedGroup, tg)
}
}
func TestTargetGroupYamlMarshal(t *testing.T) {
@ -81,10 +81,13 @@ func TestTargetGroupYamlMarshal(t *testing.T) {
},
{
// targets only exposes addresses.
group: Group{Targets: []model.LabelSet{
{"__address__": "localhost:9090"},
{"__address__": "localhost:9091"}},
Labels: model.LabelSet{"foo": "bar", "bar": "baz"}},
group: Group{
Targets: []model.LabelSet{
{"__address__": "localhost:9090"},
{"__address__": "localhost:9091"},
},
Labels: model.LabelSet{"foo": "bar", "bar": "baz"},
},
expectedYaml: "targets:\n- localhost:9090\n- localhost:9091\nlabels:\n bar: baz\n foo: bar\n",
expectedErr: nil,
},
@ -120,7 +123,8 @@ func TestTargetGroupYamlUnmarshal(t *testing.T) {
expectedReply: nil,
expectedGroup: Group{Targets: []model.LabelSet{
{"__address__": "localhost:9090"},
{"__address__": "localhost:9191"}}, Labels: model.LabelSet{"my": "label"}},
{"__address__": "localhost:9191"},
}, Labels: model.LabelSet{"my": "label"}},
},
{
// incorrect syntax.
@ -135,21 +139,25 @@ func TestTargetGroupYamlUnmarshal(t *testing.T) {
require.Equal(t, test.expectedReply, actual)
require.Equal(t, test.expectedGroup, tg)
}
}
func TestString(t *testing.T) {
// String() should return only the source, regardless of other attributes.
group1 :=
Group{Targets: []model.LabelSet{
{"__address__": "localhost:9090"},
{"__address__": "localhost:9091"}},
Group{
Targets: []model.LabelSet{
{"__address__": "localhost:9090"},
{"__address__": "localhost:9091"},
},
Source: "<source>",
Labels: model.LabelSet{"foo": "bar", "bar": "baz"}}
Labels: model.LabelSet{"foo": "bar", "bar": "baz"},
}
group2 :=
Group{Targets: []model.LabelSet{},
Source: "<source>",
Labels: model.LabelSet{}}
Group{
Targets: []model.LabelSet{},
Source: "<source>",
Labels: model.LabelSet{},
}
require.Equal(t, "<source>", group1.String())
require.Equal(t, "<source>", group2.String())
require.Equal(t, group1.String(), group2.String())

View file

@ -190,7 +190,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
default:
return nil, fmt.Errorf("unknown role '%s' in configuration", d.sdConfig.Role)
}
var endpoint = fmt.Sprintf(endpointFormat, d.sdConfig.Endpoint, d.sdConfig.Port, d.sdConfig.Version)
endpoint := fmt.Sprintf(endpointFormat, d.sdConfig.Endpoint, d.sdConfig.Port, d.sdConfig.Version)
if len(d.sdConfig.Groups) > 0 {
groups := url.QueryEscape(strings.Join(d.sdConfig.Groups, ","))
endpoint = fmt.Sprintf("%s?groups=%s", endpoint, groups)

View file

@ -135,8 +135,7 @@ func TestTritonSDRefreshNoTargets(t *testing.T) {
}
func TestTritonSDRefreshMultipleTargets(t *testing.T) {
var (
dstr = `{"containers":[
dstr := `{"containers":[
{
"groups":["foo","bar","baz"],
"server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131",
@ -153,7 +152,6 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) {
"vm_uuid":"7b27a514-89d7-11e6-bee6-3f96f367bee7"
}]
}`
)
tgts := testTritonSDRefresh(t, conf, dstr)
require.NotNil(t, tgts)
@ -161,9 +159,7 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) {
}
func TestTritonSDRefreshNoServer(t *testing.T) {
var (
td, _ = newTritonDiscovery(conf)
)
td, _ := newTritonDiscovery(conf)
_, err := td.refresh(context.Background())
require.Error(t, err)
@ -171,9 +167,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) {
}
func TestTritonSDRefreshCancelled(t *testing.T) {
var (
td, _ = newTritonDiscovery(conf)
)
td, _ := newTritonDiscovery(conf)
ctx, cancel := context.WithCancel(context.Background())
cancel()
@ -183,8 +177,7 @@ func TestTritonSDRefreshCancelled(t *testing.T) {
}
func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) {
var (
dstr = `{"cns":[
dstr := `{"cns":[
{
"server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131"
},
@ -192,7 +185,6 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) {
"server_uuid":"a5894692-bd32-4ca1-908a-e2dda3c3a5e6"
}]
}`
)
tgts := testTritonSDRefresh(t, cnconf, dstr)
require.NotNil(t, tgts)
@ -200,8 +192,7 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) {
}
func TestTritonSDRefreshCNsWithHostname(t *testing.T) {
var (
dstr = `{"cns":[
dstr := `{"cns":[
{
"server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131",
"server_hostname": "server01"
@ -211,7 +202,6 @@ func TestTritonSDRefreshCNsWithHostname(t *testing.T) {
"server_hostname": "server02"
}]
}`
)
tgts := testTritonSDRefresh(t, cnconf, dstr)
require.NotNil(t, tgts)

View file

@ -119,7 +119,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))
if err != nil {
return err
}
@ -141,7 +140,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil
}
func login(rpcclient *xmlrpc.Client, user string, pass string) (string, error) {
func login(rpcclient *xmlrpc.Client, user, pass string) (string, error) {
var result string
err := rpcclient.Call("auth.login", []interface{}{user, pass}, &result)
return result, err
@ -151,7 +150,7 @@ func logout(rpcclient *xmlrpc.Client, token string) error {
return rpcclient.Call("auth.logout", token, nil)
}
func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token string, entitlement string) (map[int][]systemGroupID, error) {
func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token, entitlement string) (map[int][]systemGroupID, error) {
var systemGroupsInfos []struct {
SystemID int `xmlrpc:"id"`
SystemGroups []systemGroupID `xmlrpc:"system_groups"`
@ -234,7 +233,6 @@ func (d *Discovery) getEndpointLabels(
systemGroupIDs []systemGroupID,
networkInfo networkInfo,
) model.LabelSet {
var addr, scheme string
managedGroupNames := getSystemGroupNames(systemGroupIDs)
addr = fmt.Sprintf("%s:%d", networkInfo.Hostname, endpoint.Port)
@ -274,7 +272,6 @@ func (d *Discovery) getTargetsForSystems(
token string,
entitlement string,
) ([]model.LabelSet, error) {
result := make([]model.LabelSet, 0)
systemGroupIDsBySystemID, err := getSystemGroupsInfoOfMonitoredClients(rpcClient, token, entitlement)

View file

@ -26,22 +26,19 @@ import (
"google.golang.org/protobuf/types/known/anypb"
)
var (
httpResourceConf = &HTTPResourceClientConfig{
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{InsecureSkipVerify: true},
},
ResourceType: "monitoring",
// Some known type.
ResourceTypeURL: "type.googleapis.com/envoy.service.discovery.v3.DiscoveryRequest",
Server: "http://localhost",
ClientID: "test-id",
}
)
var httpResourceConf = &HTTPResourceClientConfig{
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{InsecureSkipVerify: true},
},
ResourceType: "monitoring",
// Some known type.
ResourceTypeURL: "type.googleapis.com/envoy.service.discovery.v3.DiscoveryRequest",
Server: "http://localhost",
ClientID: "test-id",
}
func urlMustParse(str string) *url.URL {
parsed, err := url.Parse(str)
if err != nil {
panic(err)
}
@ -92,7 +89,6 @@ func TestCreateNewHTTPResourceClient(t *testing.T) {
require.Equal(t, client.endpoint, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1")
require.Equal(t, client.client.Timeout, 1*time.Minute)
}
func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig, protocolVersion ProtocolVersion, responder discoveryResponder) (*HTTPResourceClient, func()) {

View file

@ -91,7 +91,6 @@ func getKumaMadsV1DiscoveryResponse(resources ...*MonitoringAssignment) (*v3.Dis
serialized := make([]*anypb.Any, len(resources))
for i, res := range resources {
data, err := proto.Marshal(res)
if err != nil {
return nil, err
}

View file

@ -50,7 +50,7 @@ var (
tagsLabel = model.MetaLabelPrefix + "consul_tags"
// serviceAddressLabel is the name of the label containing the (optional) service address.
serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address"
//servicePortLabel is the name of the label containing the service port.
// servicePortLabel is the name of the label containing the service port.
servicePortLabel = model.MetaLabelPrefix + "consul_service_port"
// serviceIDLabel is the name of the label containing the service ID.
serviceIDLabel = model.MetaLabelPrefix + "consul_service_id"
@ -120,7 +120,7 @@ func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*target
for _, node := range nodes {
// We surround the separated list with the separator as well. This way regular expressions
// in relabeling rules don't have to consider tag positions.
var tags = "," + strings.Join(node.ServiceTags, ",") + ","
tags := "," + strings.Join(node.ServiceTags, ",") + ","
// If the service address is not empty it should be used instead of the node address
// since the service may be registered remotely through a different node.
@ -162,7 +162,6 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
for c := time.Tick(time.Duration(d.refreshInterval) * time.Second); ; {
var srvs map[string][]string
resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/services", d.address))
if err != nil {
level.Error(d.logger).Log("msg", "Error getting services list", "err", err)
time.Sleep(time.Duration(d.refreshInterval) * time.Second)

View file

@ -163,7 +163,7 @@ func (a *Adapter) Run() {
}
// NewAdapter creates a new instance of Adapter.
func NewAdapter(ctx context.Context, file string, name string, d discovery.Discoverer, logger log.Logger) *Adapter {
func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger log.Logger) *Adapter {
return &Adapter{
ctx: ctx,
disc: d,

View file

@ -37,7 +37,7 @@ type Client struct {
}
// NewClient creates a new Client.
func NewClient(logger log.Logger, address string, transport string, timeout time.Duration, prefix string) *Client {
func NewClient(logger log.Logger, address, transport string, timeout time.Duration, prefix string) *Client {
if logger == nil {
logger = log.NewNopLogger()
}

View file

@ -20,13 +20,11 @@ import (
"github.com/stretchr/testify/require"
)
var (
metric = model.Metric{
model.MetricNameLabel: "test:metric",
"testlabel": "test:value",
"many_chars": "abc!ABC:012-3!45ö67~89./(){},=.\"\\",
}
)
var metric = model.Metric{
model.MetricNameLabel: "test:metric",
"testlabel": "test:value",
"many_chars": "abc!ABC:012-3!45ö67~89./(){},=.\"\\",
}
func TestEscape(t *testing.T) {
// Can we correctly keep and escape valid chars.

View file

@ -41,7 +41,7 @@ type Client struct {
}
// NewClient creates a new Client.
func NewClient(logger log.Logger, conf influx.HTTPConfig, db string, rp string) *Client {
func NewClient(logger log.Logger, conf influx.HTTPConfig, db, rp string) *Client {
c, err := influx.NewHTTPClient(conf)
// Currently influx.NewClient() *should* never return an error.
if err != nil {

View file

@ -21,13 +21,11 @@ import (
"github.com/stretchr/testify/require"
)
var (
metric = model.Metric{
model.MetricNameLabel: "test:metric",
"testlabel": "test:value",
"many_chars": "abc!ABC:012-3!45ö67~89./",
}
)
var metric = model.Metric{
model.MetricNameLabel: "test:metric",
"testlabel": "test:value",
"many_chars": "abc!ABC:012-3!45ö67~89./",
}
func TestTagsFromMetric(t *testing.T) {
expected := map[string]TagValue{

View file

@ -303,7 +303,6 @@ func (n *Manager) nextBatch() []*Alert {
// Run dispatches notifications continuously.
func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) {
for {
select {
case <-n.ctx.Done():

View file

@ -40,7 +40,7 @@ import (
)
func TestPostPath(t *testing.T) {
var cases = []struct {
cases := []struct {
in, out string
}{
{
@ -456,7 +456,7 @@ func TestLabelSetNotReused(t *testing.T) {
}
func TestReload(t *testing.T) {
var tests = []struct {
tests := []struct {
in *targetgroup.Group
out string
}{
@ -500,11 +500,10 @@ alerting:
require.Equal(t, tt.out, res)
}
}
func TestDroppedAlertmanagers(t *testing.T) {
var tests = []struct {
tests := []struct {
in *targetgroup.Group
out string
}{

View file

@ -704,7 +704,7 @@ func BenchmarkLabels_Hash(b *testing.B) {
lbls: func() Labels {
lbls := make(Labels, 10)
for i := 0; i < len(lbls); i++ {
//Label ~50B name, 50B value.
// Label ~50B name, 50B value.
lbls[i] = Label{Name: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), Value: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)}
}
return lbls

View file

@ -21,11 +21,9 @@ import (
"github.com/pkg/errors"
)
var (
timestampFormat = log.TimestampFormat(
func() time.Time { return time.Now().UTC() },
"2006-01-02T15:04:05.000Z07:00",
)
var timestampFormat = log.TimestampFormat(
func() time.Time { return time.Now().UTC() },
"2006-01-02T15:04:05.000Z07:00",
)
// JSONFileLogger represents a logger that writes JSON to a file.
@ -40,7 +38,7 @@ func NewJSONFileLogger(s string) (*JSONFileLogger, error) {
return nil, nil
}
f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666)
f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666)
if err != nil {
return nil, errors.Wrap(err, "can't create json logger")
}

View file

@ -23,7 +23,6 @@ import (
// Statfs returns the file system type (Unix only)
func Statfs(path string) string {
// Types of file systems that may be returned by `statfs`
fsTypes := map[int64]string{
0xadf5: "ADFS_SUPER_MAGIC",

View file

@ -23,7 +23,6 @@ import (
// Statfs returns the file system type (Unix only)
func Statfs(path string) string {
// Types of file systems that may be returned by `statfs`
fsTypes := map[int32]string{
0xadf5: "ADFS_SUPER_MAGIC",

View file

@ -23,7 +23,6 @@ import (
// Statfs returns the file system type (Unix only)
func Statfs(path string) string {
// Types of file systems that may be returned by `statfs`
fsTypes := map[uint32]string{
0xadf5: "ADFS_SUPER_MAGIC",

View file

@ -457,9 +457,7 @@ func BenchmarkParse(b *testing.B) {
total := 0
for i := 0; i < b.N; i += promtestdataSampleCount {
var (
decSamples = make(model.Vector, 0, 50)
)
decSamples := make(model.Vector, 0, 50)
sdec := expfmt.SampleDecoder{
Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.FmtText),
Opts: &expfmt.DecodeOptions{
@ -480,6 +478,7 @@ func BenchmarkParse(b *testing.B) {
}
}
}
func BenchmarkGzip(b *testing.B) {
for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} {
b.Run(fn, func(b *testing.B) {

View file

@ -87,12 +87,15 @@ type (
func (e ErrQueryTimeout) Error() string {
return fmt.Sprintf("query timed out in %s", string(e))
}
func (e ErrQueryCanceled) Error() string {
return fmt.Sprintf("query was canceled in %s", string(e))
}
func (e ErrTooManySamples) Error() string {
return fmt.Sprintf("query processing would load too many samples into memory in %s", string(e))
}
func (e ErrStorage) Error() string {
return e.Err.Error()
}
@ -402,8 +405,10 @@ func (ng *Engine) newQuery(q storage.Queryable, expr parser.Expr, start, end tim
return qry, nil
}
var ErrValidationAtModifierDisabled = errors.New("@ modifier is disabled")
var ErrValidationNegativeOffsetDisabled = errors.New("negative offset is disabled")
var (
ErrValidationAtModifierDisabled = errors.New("@ modifier is disabled")
ErrValidationNegativeOffsetDisabled = errors.New("negative offset is disabled")
)
func (ng *Engine) validateOpts(expr parser.Expr) error {
if ng.enableAtModifier && ng.enableNegativeOffset {
@ -2139,7 +2144,6 @@ type groupedAggregation struct {
// aggregation evaluates an aggregation operation on a Vector. The provided grouping labels
// must be sorted.
func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without bool, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
result := map[uint64]*groupedAggregation{}
orderedResult := []*groupedAggregation{}
var k int64
@ -2509,7 +2513,6 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool {
}
if isStepInvariant {
// The function and all arguments are step invariant.
return true
}
@ -2559,7 +2562,6 @@ func newStepInvariantExpr(expr parser.Expr) parser.Expr {
// Wrapping the inside of () makes it easy to unwrap the paren later.
// But this effectively unwraps the paren.
return newStepInvariantExpr(e.Expr)
}
return &parser.StepInvariantExpr{Expr: expr}
}

File diff suppressed because it is too large Load diff

View file

@ -56,7 +56,7 @@ func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper)
// It calculates the rate (allowing for counter resets if isCounter is true),
// extrapolates if the first/last sample is close to the boundary, and returns
// the result as either per-second (if isRate is true) or overall.
func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter bool, isRate bool) Vector {
func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) Vector {
ms := args[0].(*parser.MatrixSelector)
vs := ms.VectorSelector.(*parser.VectorSelector)
var (

View file

@ -407,7 +407,7 @@ type PositionRange struct {
// mergeRanges is a helper function to merge the PositionRanges of two Nodes.
// Note that the arguments must be in the same order as they
// occur in the input string.
func mergeRanges(first Node, last Node) PositionRange {
func mergeRanges(first, last Node) PositionRange {
return PositionRange{
Start: first.PositionRange().Start,
End: last.PositionRange().End,
@ -426,15 +426,19 @@ func (i *Item) PositionRange() PositionRange {
func (e *AggregateExpr) PositionRange() PositionRange {
return e.PosRange
}
func (e *BinaryExpr) PositionRange() PositionRange {
return mergeRanges(e.LHS, e.RHS)
}
func (e *Call) PositionRange() PositionRange {
return e.PosRange
}
func (e *EvalStmt) PositionRange() PositionRange {
return e.Expr.PositionRange()
}
func (e Expressions) PositionRange() PositionRange {
if len(e) == 0 {
// Position undefined.
@ -445,33 +449,40 @@ func (e Expressions) PositionRange() PositionRange {
}
return mergeRanges(e[0], e[len(e)-1])
}
func (e *MatrixSelector) PositionRange() PositionRange {
return PositionRange{
Start: e.VectorSelector.PositionRange().Start,
End: e.EndPos,
}
}
func (e *SubqueryExpr) PositionRange() PositionRange {
return PositionRange{
Start: e.Expr.PositionRange().Start,
End: e.EndPos,
}
}
func (e *NumberLiteral) PositionRange() PositionRange {
return e.PosRange
}
func (e *ParenExpr) PositionRange() PositionRange {
return e.PosRange
}
func (e *StringLiteral) PositionRange() PositionRange {
return e.PosRange
}
func (e *UnaryExpr) PositionRange() PositionRange {
return PositionRange{
Start: e.StartPos,
End: e.Expr.PositionRange().End,
}
}
func (e *VectorSelector) PositionRange() PositionRange {
return e.PosRange
}

View file

@ -33,82 +33,84 @@ type yySymType struct {
duration time.Duration
}
const EQL = 57346
const BLANK = 57347
const COLON = 57348
const COMMA = 57349
const COMMENT = 57350
const DURATION = 57351
const EOF = 57352
const ERROR = 57353
const IDENTIFIER = 57354
const LEFT_BRACE = 57355
const LEFT_BRACKET = 57356
const LEFT_PAREN = 57357
const METRIC_IDENTIFIER = 57358
const NUMBER = 57359
const RIGHT_BRACE = 57360
const RIGHT_BRACKET = 57361
const RIGHT_PAREN = 57362
const SEMICOLON = 57363
const SPACE = 57364
const STRING = 57365
const TIMES = 57366
const operatorsStart = 57367
const ADD = 57368
const DIV = 57369
const EQLC = 57370
const EQL_REGEX = 57371
const GTE = 57372
const GTR = 57373
const LAND = 57374
const LOR = 57375
const LSS = 57376
const LTE = 57377
const LUNLESS = 57378
const MOD = 57379
const MUL = 57380
const NEQ = 57381
const NEQ_REGEX = 57382
const POW = 57383
const SUB = 57384
const AT = 57385
const ATAN2 = 57386
const operatorsEnd = 57387
const aggregatorsStart = 57388
const AVG = 57389
const BOTTOMK = 57390
const COUNT = 57391
const COUNT_VALUES = 57392
const GROUP = 57393
const MAX = 57394
const MIN = 57395
const QUANTILE = 57396
const STDDEV = 57397
const STDVAR = 57398
const SUM = 57399
const TOPK = 57400
const aggregatorsEnd = 57401
const keywordsStart = 57402
const BOOL = 57403
const BY = 57404
const GROUP_LEFT = 57405
const GROUP_RIGHT = 57406
const IGNORING = 57407
const OFFSET = 57408
const ON = 57409
const WITHOUT = 57410
const keywordsEnd = 57411
const preprocessorStart = 57412
const START = 57413
const END = 57414
const preprocessorEnd = 57415
const startSymbolsStart = 57416
const START_METRIC = 57417
const START_SERIES_DESCRIPTION = 57418
const START_EXPRESSION = 57419
const START_METRIC_SELECTOR = 57420
const startSymbolsEnd = 57421
const (
EQL = 57346
BLANK = 57347
COLON = 57348
COMMA = 57349
COMMENT = 57350
DURATION = 57351
EOF = 57352
ERROR = 57353
IDENTIFIER = 57354
LEFT_BRACE = 57355
LEFT_BRACKET = 57356
LEFT_PAREN = 57357
METRIC_IDENTIFIER = 57358
NUMBER = 57359
RIGHT_BRACE = 57360
RIGHT_BRACKET = 57361
RIGHT_PAREN = 57362
SEMICOLON = 57363
SPACE = 57364
STRING = 57365
TIMES = 57366
operatorsStart = 57367
ADD = 57368
DIV = 57369
EQLC = 57370
EQL_REGEX = 57371
GTE = 57372
GTR = 57373
LAND = 57374
LOR = 57375
LSS = 57376
LTE = 57377
LUNLESS = 57378
MOD = 57379
MUL = 57380
NEQ = 57381
NEQ_REGEX = 57382
POW = 57383
SUB = 57384
AT = 57385
ATAN2 = 57386
operatorsEnd = 57387
aggregatorsStart = 57388
AVG = 57389
BOTTOMK = 57390
COUNT = 57391
COUNT_VALUES = 57392
GROUP = 57393
MAX = 57394
MIN = 57395
QUANTILE = 57396
STDDEV = 57397
STDVAR = 57398
SUM = 57399
TOPK = 57400
aggregatorsEnd = 57401
keywordsStart = 57402
BOOL = 57403
BY = 57404
GROUP_LEFT = 57405
GROUP_RIGHT = 57406
IGNORING = 57407
OFFSET = 57408
ON = 57409
WITHOUT = 57410
keywordsEnd = 57411
preprocessorStart = 57412
START = 57413
END = 57414
preprocessorEnd = 57415
startSymbolsStart = 57416
START_METRIC = 57417
START_SERIES_DESCRIPTION = 57418
START_EXPRESSION = 57419
START_METRIC_SELECTOR = 57420
startSymbolsEnd = 57421
)
var yyToknames = [...]string{
"$end",
@ -194,9 +196,11 @@ var yyToknames = [...]string{
var yyStatenames = [...]string{}
const yyEofCode = 1
const yyErrCode = 2
const yyInitialStackSize = 16
const (
yyEofCode = 1
yyErrCode = 2
yyInitialStackSize = 16
)
//line promql/parser/generated_parser.y:749

View file

@ -318,25 +318,32 @@ var tests = []struct {
{
input: "offset",
expected: []Item{{OFFSET, 0, "offset"}},
}, {
},
{
input: "by",
expected: []Item{{BY, 0, "by"}},
}, {
},
{
input: "without",
expected: []Item{{WITHOUT, 0, "without"}},
}, {
},
{
input: "on",
expected: []Item{{ON, 0, "on"}},
}, {
},
{
input: "ignoring",
expected: []Item{{IGNORING, 0, "ignoring"}},
}, {
},
{
input: "group_left",
expected: []Item{{GROUP_LEFT, 0, "group_left"}},
}, {
},
{
input: "group_right",
expected: []Item{{GROUP_RIGHT, 0, "group_right"}},
}, {
},
{
input: "bool",
expected: []Item{{BOOL, 0, "bool"}},
},
@ -569,7 +576,8 @@ var tests = []struct {
{DURATION, 24, `4s`},
{RIGHT_BRACKET, 26, `]`},
},
}, {
},
{
input: `test:name{on!~"b:ar"}[4m:4s]`,
expected: []Item{
{METRIC_IDENTIFIER, 0, `test:name`},
@ -584,7 +592,8 @@ var tests = []struct {
{DURATION, 25, `4s`},
{RIGHT_BRACKET, 27, `]`},
},
}, {
},
{
input: `test:name{on!~"b:ar"}[4m:]`,
expected: []Item{
{METRIC_IDENTIFIER, 0, `test:name`},
@ -598,7 +607,8 @@ var tests = []struct {
{COLON, 24, `:`},
{RIGHT_BRACKET, 25, `]`},
},
}, { // Nested Subquery.
},
{ // Nested Subquery.
input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:])[4m:3s]`,
expected: []Item{
@ -646,7 +656,8 @@ var tests = []struct {
{OFFSET, 29, "offset"},
{DURATION, 36, "10m"},
},
}, {
},
{
input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:] offset 6m)[4m:3s]`,
expected: []Item{
@ -737,7 +748,6 @@ func TestLexer(t *testing.T) {
if item.Typ == ERROR {
hasError = true
}
}
if !hasError {
t.Logf("%d: input %q", i, test.input)

View file

@ -241,7 +241,7 @@ func (p *parser) addParseErr(positionRange PositionRange, err error) {
// unexpected creates a parser error complaining about an unexpected lexer item.
// The item that is presented as unexpected is always the last item produced
// by the lexer.
func (p *parser) unexpected(context string, expected string) {
func (p *parser) unexpected(context, expected string) {
var errMsg strings.Builder
// Do not report lexer errors twice
@ -354,7 +354,8 @@ func (p *parser) InjectItem(typ ItemType) {
p.inject = typ
p.injecting = true
}
func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers Node, rhs Node) *BinaryExpr {
func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers, rhs Node) *BinaryExpr {
ret := modifiers.(*BinaryExpr)
ret.LHS = lhs.(Expr)
@ -374,7 +375,7 @@ func (p *parser) assembleVectorSelector(vs *VectorSelector) {
}
}
func (p *parser) newAggregateExpr(op Item, modifier Node, args Node) (ret *AggregateExpr) {
func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateExpr) {
ret = modifier.(*AggregateExpr)
arguments := args.(Expressions)
@ -650,10 +651,9 @@ func (p *parser) parseGenerated(startSymbol ItemType) interface{} {
p.yyParser.Parse(p)
return p.generatedParserResult
}
func (p *parser) newLabelMatcher(label Item, operator Item, value Item) *labels.Matcher {
func (p *parser) newLabelMatcher(label, operator, value Item) *labels.Matcher {
op := operator.Typ
val := p.unquoteString(value.Val)

File diff suppressed because it is too large Load diff

View file

@ -81,8 +81,7 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) {
}
func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, error) {
file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0666)
file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666)
if err != nil {
level.Error(logger).Log("msg", "Error opening query log file", "file", filename, "err", err)
return nil, err
@ -104,7 +103,7 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, er
}
func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker {
err := os.MkdirAll(localStoragePath, 0777)
err := os.MkdirAll(localStoragePath, 0o777)
if err != nil {
level.Error(logger).Log("msg", "Failed to create directory for logging active queries")
}
@ -147,7 +146,6 @@ func trimStringByBytes(str string, size int) string {
func _newJSONEntry(query string, timestamp int64, logger log.Logger) []byte {
entry := Entry{query, timestamp}
jsonEntry, err := json.Marshal(entry)
if err != nil {
level.Error(logger).Log("msg", "Cannot create json of query", "query", query)
return []byte{}

View file

@ -311,7 +311,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
resultFPs := map[uint64]struct{}{}
var vec promql.Vector
var alerts = make(map[uint64]*Alert, len(res))
alerts := make(map[uint64]*Alert, len(res))
for _, smpl := range res {
// Provide the alert information to the template.
l := make(map[string]string, len(smpl.Metric))
@ -479,7 +479,7 @@ func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) {
}
}
func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc) {
func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay, interval time.Duration, notifyFunc NotifyFunc) {
alerts := []*Alert{}
r.ForEachActiveAlert(func(alert *Alert) {
if alert.needsSending(ts, resendDelay) {

View file

@ -834,12 +834,10 @@ func (g *Group) RestoreForState(ts time.Time) {
level.Debug(g.logger).Log("msg", "'for' state restored",
labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850),
"labels", a.Labels.String())
})
alertRule.SetRestored(true)
}
}
// Equals return if two groups are the same.

View file

@ -119,17 +119,19 @@ func TestAlertingRule(t *testing.T) {
baseTime := time.Unix(0, 0)
var tests = []struct {
tests := []struct {
time time.Duration
result promql.Vector
}{
{
time: 0,
result: result[:2],
}, {
},
{
time: 5 * time.Minute,
result: result[2:],
}, {
},
{
time: 10 * time.Minute,
result: result[2:3],
},
@ -256,7 +258,7 @@ func TestForStateAddSamples(t *testing.T) {
baseTime := time.Unix(0, 0)
var tests = []struct {
tests := []struct {
time time.Duration
result promql.Vector
persistThisTime bool // If true, it means this 'time' is persisted for 'for'.
@ -769,7 +771,6 @@ func TestUpdate(t *testing.T) {
} else {
rgs.Groups[i].Interval = model.Duration(10)
}
}
reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs)

View file

@ -109,7 +109,6 @@ func TestDroppedTargetsList(t *testing.T) {
// TestDiscoveredLabelsUpdate checks that DiscoveredLabels are updated
// even when new labels don't affect the target `hash`.
func TestDiscoveredLabelsUpdate(t *testing.T) {
sp := &scrapePool{}
// These are used when syncing so need this to avoid a panic.
sp.config = &config.ScrapeConfig{
@ -350,7 +349,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
client: http.DefaultClient,
}
var tgs = []*targetgroup.Group{}
tgs := []*targetgroup.Group{}
for i := 0; i < 50; i++ {
tgs = append(tgs,
&targetgroup.Group{
@ -1000,6 +999,7 @@ func BenchmarkScrapeLoopAppend(b *testing.B) {
_, _, _, _ = sl.append(slApp, metrics, "", ts)
}
}
func BenchmarkScrapeLoopAppendOM(b *testing.B) {
ctx, sl := simpleTestScrapeLoop(b)
@ -1409,8 +1409,10 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
"Two target labels collide with existing labels, both with and without prefix 'exported'": {
targetLabels: []string{"foo", "3", "exported_foo", "4"},
exposedLabels: `metric{foo="1" exported_foo="2"} 0`,
expected: []string{"__name__", "metric", "exported_exported_foo", "1", "exported_exported_exported_foo",
"2", "exported_foo", "4", "foo", "3"},
expected: []string{
"__name__", "metric", "exported_exported_foo", "1", "exported_exported_exported_foo",
"2", "exported_foo", "4", "foo", "3",
},
},
"Extreme example": {
targetLabels: []string{"foo", "0", "exported_exported_foo", "1", "exported_exported_exported_foo", "2"},
@ -1743,7 +1745,8 @@ func TestScrapeLoopAppendExemplar(t *testing.T) {
exemplars: []exemplar.Exemplar{
{Labels: labels.FromStrings("a", "abc"), Value: 1},
},
}, {
},
{
title: "Metric with exemplars and TS",
scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0 10000\n# EOF",
discoveryLabels: []string{"n", "2"},
@ -1754,7 +1757,8 @@ func TestScrapeLoopAppendExemplar(t *testing.T) {
exemplars: []exemplar.Exemplar{
{Labels: labels.FromStrings("a", "abc"), Value: 1, Ts: 10000000, HasTs: true},
},
}, {
},
{
title: "Two metrics and exemplars",
scrapeText: `metric_total{n="1"} 1 # {t="1"} 1.0 10000
metric_total{n="2"} 2 # {t="2"} 2.0 20000
@ -2040,7 +2044,6 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
require.Equal(t, 1, total)
require.Equal(t, 1, added)
require.Equal(t, 0, seriesAdded)
}
func TestTargetScraperScrapeOK(t *testing.T) {

View file

@ -275,7 +275,7 @@ func (r *sampleRing) nthLast(n int) (int64, float64, bool) {
func (r *sampleRing) samples() []sample {
res := make([]sample, r.l)
var k = r.f + r.l
k := r.f + r.l
var j int
if k > len(r.buf) {
k = len(r.buf)

View file

@ -42,7 +42,7 @@ type mergeGenericQuerier struct {
// See NewFanout commentary to learn more about primary vs secondary differences.
//
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
func NewMergeQuerier(primaries []Querier, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier {
func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier {
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
for _, q := range primaries {
if _, ok := q.(noopQuerier); !ok && q != nil {
@ -71,7 +71,7 @@ func NewMergeQuerier(primaries []Querier, secondaries []Querier, mergeFn Vertica
//
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
// TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670
func NewMergeChunkQuerier(primaries []ChunkQuerier, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier {
func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier {
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
for _, q := range primaries {
if _, ok := q.(noopChunkQuerier); !ok && q != nil {
@ -104,7 +104,7 @@ func (q *mergeGenericQuerier) Select(sortSeries bool, hints *SelectHints, matche
return q.queriers[0].Select(sortSeries, hints, matchers...)
}
var seriesSets = make([]genericSeriesSet, 0, len(q.queriers))
seriesSets := make([]genericSeriesSet, 0, len(q.queriers))
if !q.concurrentSelect {
for _, querier := range q.queriers {
// We need to sort for merge to work.
@ -265,7 +265,6 @@ func NewMergeSeriesSet(sets []SeriesSet, mergeFunc VerticalSeriesMergeFunc) Seri
genericSets := make([]genericSeriesSet, 0, len(sets))
for _, s := range sets {
genericSets = append(genericSets, &genericSeriesSetAdapter{s})
}
return &seriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFunc}).Merge)}
}
@ -281,7 +280,6 @@ func NewMergeChunkSeriesSet(sets []ChunkSeriesSet, mergeFunc VerticalChunkSeries
genericSets := make([]genericSeriesSet, 0, len(sets))
for _, s := range sets {
genericSets = append(genericSets, &genericChunkSeriesSetAdapter{s})
}
return &chunkSeriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFunc}).Merge)}
}

View file

@ -616,7 +616,8 @@ func TestChainSampleIterator(t *testing.T) {
NewListSeriesIterator(samples{sample{2, 2}, sample{5, 5}}),
},
expected: []tsdbutil.Sample{
sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{4, 4}, sample{5, 5}},
sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{4, 4}, sample{5, 5},
},
},
// Overlap.
{

View file

@ -728,7 +728,7 @@ func (t *QueueManager) releaseLabels(ls labels.Labels) {
// processExternalLabels merges externalLabels into ls. If ls contains
// a label in externalLabels, the value in ls wins.
func processExternalLabels(ls labels.Labels, externalLabels labels.Labels) labels.Labels {
func processExternalLabels(ls, externalLabels labels.Labels) labels.Labels {
i, j, result := 0, 0, make(labels.Labels, 0, len(ls)+len(externalLabels))
for i < len(ls) && j < len(externalLabels) {
if ls[i].Name < externalLabels[j].Name {
@ -1048,7 +1048,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface
max += int(float64(max) * 0.1)
}
var pendingData = make([]prompb.TimeSeries, max)
pendingData := make([]prompb.TimeSeries, max)
for i := range pendingData {
pendingData[i].Samples = []prompb.Sample{{}}
if s.qm.sendExemplars {
@ -1142,7 +1142,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface
}
}
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) {
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) {
begin := time.Now()
err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, pBuf, buf)
if err != nil {
@ -1159,7 +1159,7 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s
}
// sendSamples to the remote storage with backoff for recoverable errors.
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) error {
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) error {
// Build the WriteRequest with no metadata.
req, highest, err := buildWriteRequest(samples, nil, pBuf, *buf)
if err != nil {

View file

@ -60,7 +60,6 @@ func newHighestTimestampMetric() *maxTimestamp {
}
func TestSampleDelivery(t *testing.T) {
testcases := []struct {
name string
samples bool
@ -107,7 +106,6 @@ func TestSampleDelivery(t *testing.T) {
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
var (
series []record.RefSeries
samples []record.RefSample
@ -715,7 +713,7 @@ func BenchmarkSampleDelivery(b *testing.B) {
const numSeries = 10000
// Extra labels to make a more realistic workload - taken from Kubernetes' embedded cAdvisor metrics.
var extraLabels = labels.Labels{
extraLabels := labels.Labels{
{Name: "kubernetes_io_arch", Value: "amd64"},
{Name: "kubernetes_io_instance_type", Value: "c3.somesize"},
{Name: "kubernetes_io_os", Value: "linux"},

View file

@ -506,7 +506,6 @@ func TestSampleAndChunkQueryableClient(t *testing.T) {
}
require.NoError(t, ss.Err())
require.Equal(t, tc.expectedSeries, got)
})
}
}

View file

@ -81,9 +81,7 @@ func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar,
}
func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) {
var (
outOfOrderExemplarErrs = 0
)
outOfOrderExemplarErrs := 0
app := h.appendable.Appender(ctx)
defer func() {

View file

@ -87,7 +87,7 @@ func query(ctx context.Context, q string, ts time.Time, queryFn QueryFunc) (quer
// promql.Vector is hard to work with in templates, so convert to
// base data types.
// TODO(fabxc): probably not true anymore after type rework.
var result = make(queryResult, len(vector))
result := make(queryResult, len(vector))
for n, v := range vector {
s := sample{
Value: v.V,
@ -301,7 +301,7 @@ func NewTemplateExpander(
}
// AlertTemplateData returns the interface to be used in expanding the template.
func AlertTemplateData(labels map[string]string, externalLabels map[string]string, externalURL string, value float64) interface{} {
func AlertTemplateData(labels, externalLabels map[string]string, externalURL string, value float64) interface{} {
return struct {
Labels map[string]string
ExternalLabels map[string]string

View file

@ -87,7 +87,8 @@ func TestTemplateExpansion(t *testing.T) {
{
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
Point: promql.Point{T: 0, V: 11},
}},
},
},
output: "11",
},
{
@ -98,7 +99,8 @@ func TestTemplateExpansion(t *testing.T) {
{
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
Point: promql.Point{T: 0, V: 11},
}},
},
},
output: "a",
},
{
@ -108,7 +110,8 @@ func TestTemplateExpansion(t *testing.T) {
{
Metric: labels.FromStrings(labels.MetricName, "metric", "__value__", "a"),
Point: promql.Point{T: 0, V: 11},
}},
},
},
output: "a",
},
{
@ -118,7 +121,8 @@ func TestTemplateExpansion(t *testing.T) {
{
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
Point: promql.Point{T: 0, V: 11},
}},
},
},
output: "",
},
{
@ -128,7 +132,8 @@ func TestTemplateExpansion(t *testing.T) {
{
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
Point: promql.Point{T: 0, V: 11},
}},
},
},
output: "",
},
{
@ -137,7 +142,8 @@ func TestTemplateExpansion(t *testing.T) {
{
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
Point: promql.Point{T: 0, V: 11},
}},
},
},
output: "",
html: true,
},
@ -151,7 +157,8 @@ func TestTemplateExpansion(t *testing.T) {
}, {
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
Point: promql.Point{T: 0, V: 11},
}},
},
},
output: "a:11: b:21: ",
},
{

View file

@ -36,9 +36,7 @@ import (
"go.uber.org/atomic"
)
var (
ErrUnsupported = errors.New("unsupported operation with WAL-only storage")
)
var ErrUnsupported = errors.New("unsupported operation with WAL-only storage")
// Default values for options.
var (

View file

@ -54,7 +54,7 @@ func (m seriesHashmap) Set(hash uint64, s *memSeries) {
m[hash] = append(seriesSet, s)
}
func (m seriesHashmap) Delete(hash uint64, ref uint64) {
func (m seriesHashmap) Delete(hash, ref uint64) {
var rem []*memSeries
for _, s := range m[hash] {
if s.ref != ref {

View file

@ -191,9 +191,11 @@ type BlockMetaCompaction struct {
Failed bool `json:"failed,omitempty"`
}
const indexFilename = "index"
const metaFilename = "meta.json"
const metaVersion1 = 1
const (
indexFilename = "index"
metaFilename = "meta.json"
metaVersion1 = 1
)
func chunkDir(dir string) string { return filepath.Join(dir, "chunks") }
@ -611,12 +613,12 @@ func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, bool, er
// Snapshot creates snapshot of the block into dir.
func (pb *Block) Snapshot(dir string) error {
blockDir := filepath.Join(dir, pb.meta.ULID.String())
if err := os.MkdirAll(blockDir, 0777); err != nil {
if err := os.MkdirAll(blockDir, 0o777); err != nil {
return errors.Wrap(err, "create snapshot block dir")
}
chunksDir := chunkDir(blockDir)
if err := os.MkdirAll(chunksDir, 0777); err != nil {
if err := os.MkdirAll(chunksDir, 0o777); err != nil {
return errors.Wrap(err, "create snapshot chunk dir")
}

View file

@ -185,7 +185,7 @@ func TestCorruptedChunk(t *testing.T) {
require.NoError(t, err)
require.Greater(t, len(files), 0, "No chunk created.")
f, err := os.OpenFile(files[0], os.O_RDWR, 0666)
f, err := os.OpenFile(files[0], os.O_RDWR, 0o666)
require.NoError(t, err)
// Apply corruption function.
@ -505,7 +505,7 @@ func createBlockFromHead(tb testing.TB, dir string, head *Head) string {
compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, nil)
require.NoError(tb, err)
require.NoError(tb, os.MkdirAll(dir, 0777))
require.NoError(tb, os.MkdirAll(dir, 0o777))
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.

View file

@ -97,9 +97,7 @@ func (cm *Meta) OverlapsClosedInterval(mint, maxt int64) bool {
return cm.MinTime <= maxt && mint <= cm.MaxTime
}
var (
errInvalidSize = fmt.Errorf("invalid size")
)
var errInvalidSize = fmt.Errorf("invalid size")
var castagnoliTable *crc32.Table
@ -148,7 +146,7 @@ func newWriter(dir string, segmentSize int64) (*Writer, error) {
segmentSize = DefaultChunkSegmentSize
}
if err := os.MkdirAll(dir, 0777); err != nil {
if err := os.MkdirAll(dir, 0o777); err != nil {
return nil, err
}
dirFile, err := fileutil.OpenDir(dir)
@ -224,7 +222,7 @@ func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, all
return 0, nil, 0, errors.Wrap(err, "next sequence file")
}
ptmp := p + ".tmp"
f, err := os.OpenFile(ptmp, os.O_WRONLY|os.O_CREATE, 0666)
f, err := os.OpenFile(ptmp, os.O_WRONLY|os.O_CREATE, 0o666)
if err != nil {
return 0, nil, 0, errors.Wrap(err, "open temp file")
}
@ -266,7 +264,7 @@ func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, all
return 0, nil, 0, errors.Wrap(err, "replace file")
}
f, err = os.OpenFile(p, os.O_WRONLY, 0666)
f, err = os.OpenFile(p, os.O_WRONLY, 0o666)
if err != nil {
return 0, nil, 0, errors.Wrap(err, "open final file")
}
@ -355,7 +353,7 @@ func (w *Writer) writeChunks(chks []Meta) error {
return nil
}
var seq = uint64(w.seq()) << 32
seq := uint64(w.seq()) << 32
for i := range chks {
chk := &chks[i]

View file

@ -42,11 +42,9 @@ const (
headChunksFormatV1 = 1
)
var (
// ErrChunkDiskMapperClosed returned by any method indicates
// that the ChunkDiskMapper was closed.
ErrChunkDiskMapperClosed = errors.New("ChunkDiskMapper closed")
)
// ErrChunkDiskMapperClosed returned by any method indicates
// that the ChunkDiskMapper was closed.
var ErrChunkDiskMapperClosed = errors.New("ChunkDiskMapper closed")
const (
// MintMaxtSize is the size of the mint/maxt for head chunk file and chunks.
@ -83,7 +81,6 @@ func (ref ChunkDiskMapperRef) Unpack() (sgmIndex, chkStart int) {
sgmIndex = int(ref >> 32)
chkStart = int((ref << 32) >> 32)
return sgmIndex, chkStart
}
// CorruptionErr is an error that's returned when corruption is encountered.
@ -152,7 +149,7 @@ func NewChunkDiskMapper(dir string, pool chunkenc.Pool, writeBufferSize int) (*C
return nil, errors.Errorf("ChunkDiskMapper write buffer size should be a multiple of 1024 (actual: %d)", writeBufferSize)
}
if err := os.MkdirAll(dir, 0777); err != nil {
if err := os.MkdirAll(dir, 0o777); err != nil {
return nil, err
}
dirFile, err := fileutil.OpenDir(dir)

View file

@ -16,8 +16,6 @@
package chunks
var (
// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut.
// Windows needs pre-allocations while the other OS does not.
HeadChunkFilePreallocationSize int64
)
// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut.
// Windows needs pre-allocations while the other OS does not.
var HeadChunkFilePreallocationSize int64

View file

@ -379,7 +379,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
// Write an empty last file mimicking an abrupt shutdown on file creation.
emptyFileName := segmentFile(dir, lastFile+1)
f, err := os.OpenFile(emptyFileName, os.O_WRONLY|os.O_CREATE, 0666)
f, err := os.OpenFile(emptyFileName, os.O_WRONLY|os.O_CREATE, 0o666)
require.NoError(t, err)
require.NoError(t, f.Sync())
stat, err := f.Stat()
@ -409,7 +409,6 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
require.NoError(t, err)
require.LessOrEqual(t, seq, uint64(lastFile), "file index on disk is bigger than previous last file")
}
}
func testChunkDiskMapper(t *testing.T) *ChunkDiskMapper {

View file

@ -13,8 +13,6 @@
package chunks
var (
// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut.
// Windows needs pre-allocation to m-map the file.
HeadChunkFilePreallocationSize int64 = MaxHeadChunkFileSize
)
// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut.
// Windows needs pre-allocation to m-map the file.
var HeadChunkFilePreallocationSize int64 = MaxHeadChunkFileSize

View file

@ -564,7 +564,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
return err
}
if err = os.MkdirAll(tmp, 0777); err != nil {
if err = os.MkdirAll(tmp, 0o777); err != nil {
return err
}

View file

@ -65,10 +65,8 @@ const (
lockfileCreatedCleanly = 1
)
var (
// ErrNotReady is returned if the underlying storage is not ready yet.
ErrNotReady = errors.New("TSDB not ready")
)
// ErrNotReady is returned if the underlying storage is not ready yet.
var ErrNotReady = errors.New("TSDB not ready")
// DefaultOptions used for the DB. They are sane for setups using
// millisecond precision timestamps.
@ -609,7 +607,7 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) {
}
func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs []int64, stats *DBStats) (_ *DB, returnedErr error) {
if err := os.MkdirAll(dir, 0777); err != nil {
if err := os.MkdirAll(dir, 0o777); err != nil {
return nil, err
}
if l == nil {
@ -1642,7 +1640,7 @@ func (db *DB) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, err
return db.head.exemplars.ExemplarQuerier(ctx)
}
func rangeForTimestamp(t int64, width int64) (maxt int64) {
func rangeForTimestamp(t, width int64) (maxt int64) {
return (t/width)*width + width
}

View file

@ -228,7 +228,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) {
{
walFiles, err := ioutil.ReadDir(path.Join(db.Dir(), "wal"))
require.NoError(t, err)
f, err := os.OpenFile(path.Join(db.Dir(), "wal", walFiles[0].Name()), os.O_RDWR, 0666)
f, err := os.OpenFile(path.Join(db.Dir(), "wal", walFiles[0].Name()), os.O_RDWR, 0o666)
require.NoError(t, err)
r := wal.NewReader(bufio.NewReader(f))
require.True(t, r.Next(), "reading the series record")
@ -1245,7 +1245,6 @@ func TestTombstoneCleanRetentionLimitsRace(t *testing.T) {
require.NoError(t, db.Close())
}
}
func intersection(oldBlocks, actualBlocks []string) (intersection []string) {
@ -1272,6 +1271,7 @@ type mockCompactorFailing struct {
func (*mockCompactorFailing) Plan(dir string) ([]string, error) {
return nil, nil
}
func (c *mockCompactorFailing) Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error) {
if len(c.blocks) >= c.max {
return ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail")
@ -1559,7 +1559,7 @@ func expandSeriesSet(ss storage.SeriesSet) ([]labels.Labels, map[string][]sample
func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) {
// Create 10 blocks that does not overlap (0-10, 10-20, ..., 100-110) but in reverse order to ensure our algorithm
// will handle that.
var metas = make([]BlockMeta, 11)
metas := make([]BlockMeta, 11)
for i := 10; i >= 0; i-- {
metas[i] = BlockMeta{MinTime: int64(i * 10), MaxTime: int64((i + 1) * 10)}
}
@ -1781,7 +1781,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
require.NoError(t, os.RemoveAll(dir))
}()
require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0777))
require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777))
w, err := wal.New(nil, nil, path.Join(dir, "wal"), false)
require.NoError(t, err)
@ -1831,7 +1831,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
createBlock(t, dir, genSeries(1, 1, 1000, 6000))
require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0777))
require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777))
w, err := wal.New(nil, nil, path.Join(dir, "wal"), false)
require.NoError(t, err)
@ -2663,7 +2663,6 @@ func TestChunkWriter_ReadAfterWrite(t *testing.T) {
for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
tempDir, err := ioutil.TempDir("", "test_chunk_writer")
require.NoError(t, err)
defer func() { require.NoError(t, os.RemoveAll(tempDir)) }()
@ -2899,7 +2898,7 @@ func TestOpen_VariousBlockStates(t *testing.T) {
expectedLoadedDirs[outDir] = struct{}{}
// Touch chunks dir in block.
require.NoError(t, os.MkdirAll(filepath.Join(dbDir, "chunks"), 0777))
require.NoError(t, os.MkdirAll(filepath.Join(dbDir, "chunks"), 0o777))
defer func() {
require.NoError(t, os.RemoveAll(filepath.Join(dbDir, "chunks")))
}()
@ -3166,7 +3165,7 @@ func TestLockfileMetric(t *testing.T) {
// Test preconditions (file already exists + lockfile option)
lockfilePath := filepath.Join(absdir, "lock")
if c.fileAlreadyExists {
err = ioutil.WriteFile(lockfilePath, []byte{}, 0644)
err = ioutil.WriteFile(lockfilePath, []byte{}, 0o644)
require.NoError(t, err)
}
opts := DefaultOptions()

View file

@ -133,7 +133,6 @@ func NewDecbufAt(bs ByteSlice, off int, castagnoliTable *crc32.Table) Decbuf {
dec := Decbuf{B: b[:len(b)-4]}
if castagnoliTable != nil {
if exp := binary.BigEndian.Uint32(b[len(b)-4:]); dec.Crc32(castagnoliTable) != exp {
return Decbuf{E: ErrInvalidChecksum}
}

View file

@ -284,7 +284,7 @@ func (ce *CircularExemplarStorage) Resize(l int64) int {
// This math is essentially looking at nextIndex, where we would write the next exemplar to,
// and find the index in the old exemplar buffer that we should start migrating exemplars from.
// This way we don't migrate exemplars that would just be overwritten when migrating later exemplars.
var startIndex = (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer))
startIndex := (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer))
for i := int64(0); i < count; i++ {
idx := (startIndex + i) % int64(len(oldBuffer))

View file

@ -448,7 +448,6 @@ func TestResize(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
exs, err := NewCircularExemplarStorage(tc.startSize, eMetrics)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
@ -456,7 +455,8 @@ func TestResize(t *testing.T) {
for i := 0; int64(i) < tc.startSize; i++ {
err = es.AddExemplar(labels.FromStrings("service", strconv.Itoa(i)), exemplar.Exemplar{
Value: float64(i),
Ts: int64(i)})
Ts: int64(i),
})
require.NoError(t, err)
}

View file

@ -27,7 +27,7 @@ import (
// CopyDirs copies all directories, subdirectories and files recursively including the empty folders.
// Source and destination must be full paths.
func CopyDirs(src, dest string) error {
if err := os.MkdirAll(dest, 0777); err != nil {
if err := os.MkdirAll(dest, 0o777); err != nil {
return err
}
files, err := readDirs(src)
@ -46,7 +46,7 @@ func CopyDirs(src, dest string) error {
// Empty directories are also created.
if stat.IsDir() {
if err := os.MkdirAll(dp, 0777); err != nil {
if err := os.MkdirAll(dp, 0o777); err != nil {
return err
}
continue
@ -65,7 +65,7 @@ func copyFile(src, dest string) error {
return err
}
err = ioutil.WriteFile(dest, data, 0666)
err = ioutil.WriteFile(dest, data, 0o666)
if err != nil {
return err
}

View file

@ -29,7 +29,7 @@ type Releaser interface {
// locking has failed. Neither this function nor the returned Releaser is
// goroutine-safe.
func Flock(fileName string) (r Releaser, existed bool, err error) {
if err = os.MkdirAll(filepath.Dir(fileName), 0755); err != nil {
if err = os.MkdirAll(filepath.Dir(fileName), 0o755); err != nil {
return nil, false, err
}

View file

@ -24,7 +24,7 @@ func (l *plan9Lock) Release() error {
}
func newLock(fileName string) (Releaser, error) {
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0666)
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0o666)
if err != nil {
return nil, err
}

View file

@ -46,7 +46,7 @@ func (l *unixLock) set(lock bool) error {
}
func newLock(fileName string) (Releaser, error) {
f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0666)
f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0o666)
if err != nil {
return nil, err
}

View file

@ -41,7 +41,7 @@ func (l *unixLock) set(lock bool) error {
}
func newLock(fileName string) (Releaser, error) {
f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0666)
f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0o666)
if err != nil {
return nil, err
}

Some files were not shown because too many files have changed in this diff Show more