Format Go source files using 'gofumpt -w -s -extra'

Part of #9557

Signed-off-by: Mateusz Gozdek <mgozdekof@gmail.com>
This commit is contained in:
Mateusz Gozdek 2021-10-22 10:06:44 +02:00 committed by Julien Pivotto
parent b1e8e8a0ca
commit 1a6c2283a3
130 changed files with 1697 additions and 1350 deletions

View file

@ -664,7 +664,7 @@ func main() {
) )
// This is passed to ruleManager.Update(). // This is passed to ruleManager.Update().
var externalURL = cfg.web.ExternalURL.String() externalURL := cfg.web.ExternalURL.String()
reloaders := []reloader{ reloaders := []reloader{
{ {
@ -896,7 +896,6 @@ func main() {
return nil return nil
} }
} }
}, },
func(err error) { func(err error) {
// Wait for any in-progress reloads to complete to avoid // Wait for any in-progress reloads to complete to avoid
@ -1146,6 +1145,7 @@ type safePromQLNoStepSubqueryInterval struct {
func durationToInt64Millis(d time.Duration) int64 { func durationToInt64Millis(d time.Duration) int64 {
return int64(d / time.Millisecond) return int64(d / time.Millisecond)
} }
func (i *safePromQLNoStepSubqueryInterval) Set(ev model.Duration) { func (i *safePromQLNoStepSubqueryInterval) Set(ev model.Duration) {
i.value.Store(durationToInt64Millis(time.Duration(ev))) i.value.Store(durationToInt64Millis(time.Duration(ev)))
} }
@ -1159,7 +1159,7 @@ type reloader struct {
reloader func(*config.Config) error reloader func(*config.Config) error
} }
func reloadConfig(filename string, expandExternalLabels bool, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) { func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) {
start := time.Now() start := time.Now()
timings := []interface{}{} timings := []interface{}{}
level.Info(logger).Log("msg", "Loading configuration file", "filename", filename) level.Info(logger).Log("msg", "Loading configuration file", "filename", filename)

View file

@ -35,10 +35,12 @@ import (
"github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/rules"
) )
var promPath = os.Args[0] var (
var promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml") promPath = os.Args[0]
var agentConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus-agent.yml") promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml")
var promData = filepath.Join(os.TempDir(), "data") agentConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus-agent.yml")
promData = filepath.Join(os.TempDir(), "data")
)
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
for i, arg := range os.Args { for i, arg := range os.Args {

View file

@ -21,7 +21,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
const filePerm = 0666 const filePerm = 0o666
type tarGzFileWriter struct { type tarGzFileWriter struct {
tarWriter *tar.Writer tarWriter *tar.Writer

View file

@ -105,7 +105,6 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
// The next sample is not in this timerange, we can avoid parsing // The next sample is not in this timerange, we can avoid parsing
// the file for this timerange. // the file for this timerange.
continue continue
} }
nextSampleTs = math.MaxInt64 nextSampleTs = math.MaxInt64
@ -207,13 +206,11 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
return nil return nil
}() }()
if err != nil { if err != nil {
return errors.Wrap(err, "process blocks") return errors.Wrap(err, "process blocks")
} }
} }
return nil return nil
} }
func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) (err error) { func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) (err error) {

View file

@ -57,7 +57,6 @@ func debugWrite(cfg debugWriterConfig) error {
return errors.Wrap(err, "error writing into the archive") return errors.Wrap(err, "error writing into the archive")
} }
} }
} }
if err := archiver.close(); err != nil { if err := archiver.close(); err != nil {

View file

@ -257,7 +257,7 @@ func main() {
case tsdbDumpCmd.FullCommand(): case tsdbDumpCmd.FullCommand():
os.Exit(checkErr(dumpSamples(*dumpPath, *dumpMinTime, *dumpMaxTime))) os.Exit(checkErr(dumpSamples(*dumpPath, *dumpMinTime, *dumpMaxTime)))
//TODO(aSquare14): Work on adding support for custom block size. // TODO(aSquare14): Work on adding support for custom block size.
case openMetricsImportCmd.FullCommand(): case openMetricsImportCmd.FullCommand():
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration)) os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))
@ -560,7 +560,6 @@ func checkDuplicates(groups []rulefmt.RuleGroup) []compareRuleType {
var rules compareRuleTypes var rules compareRuleTypes
for _, group := range groups { for _, group := range groups {
for _, rule := range group.Rules { for _, rule := range group.Rules {
rules = append(rules, compareRuleType{ rules = append(rules, compareRuleType{
metric: ruleMetric(rule), metric: ruleMetric(rule),
@ -774,7 +773,7 @@ func QuerySeries(url *url.URL, matchers []string, start, end string, p printer)
} }
// QueryLabels queries for label values against a Prometheus server. // QueryLabels queries for label values against a Prometheus server.
func QueryLabels(url *url.URL, name string, start, end string, p printer) int { func QueryLabels(url *url.URL, name, start, end string, p printer) int {
if url.Scheme == "" { if url.Scheme == "" {
url.Scheme = "http" url.Scheme = "http"
} }
@ -952,11 +951,13 @@ type promqlPrinter struct{}
func (p *promqlPrinter) printValue(v model.Value) { func (p *promqlPrinter) printValue(v model.Value) {
fmt.Println(v) fmt.Println(v)
} }
func (p *promqlPrinter) printSeries(val []model.LabelSet) { func (p *promqlPrinter) printSeries(val []model.LabelSet) {
for _, v := range val { for _, v := range val {
fmt.Println(v) fmt.Println(v)
} }
} }
func (p *promqlPrinter) printLabelValues(val model.LabelValues) { func (p *promqlPrinter) printLabelValues(val model.LabelValues) {
for _, v := range val { for _, v := range val {
fmt.Println(v) fmt.Println(v)
@ -969,10 +970,12 @@ func (j *jsonPrinter) printValue(v model.Value) {
//nolint:errcheck //nolint:errcheck
json.NewEncoder(os.Stdout).Encode(v) json.NewEncoder(os.Stdout).Encode(v)
} }
func (j *jsonPrinter) printSeries(v []model.LabelSet) { func (j *jsonPrinter) printSeries(v []model.LabelSet) {
//nolint:errcheck //nolint:errcheck
json.NewEncoder(os.Stdout).Encode(v) json.NewEncoder(os.Stdout).Encode(v)
} }
func (j *jsonPrinter) printLabelValues(v model.LabelValues) { func (j *jsonPrinter) printLabelValues(v model.LabelValues) {
//nolint:errcheck //nolint:errcheck
json.NewEncoder(os.Stdout).Encode(v) json.NewEncoder(os.Stdout).Encode(v)
@ -980,7 +983,7 @@ func (j *jsonPrinter) printLabelValues(v model.LabelValues) {
// importRules backfills recording rules from the files provided. The output are blocks of data // importRules backfills recording rules from the files provided. The output are blocks of data
// at the outputDir location. // at the outputDir location.
func importRules(url *url.URL, start, end, outputDir string, evalInterval time.Duration, maxBlockDuration time.Duration, files ...string) error { func importRules(url *url.URL, start, end, outputDir string, evalInterval, maxBlockDuration time.Duration, files ...string) error {
ctx := context.Background() ctx := context.Background()
var stime, etime time.Time var stime, etime time.Time
var err error var err error

View file

@ -54,7 +54,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
twentyFourHourDuration, _ = time.ParseDuration("24h") twentyFourHourDuration, _ = time.ParseDuration("24h")
) )
var testCases = []struct { testCases := []struct {
name string name string
runcount int runcount int
maxBlockDuration time.Duration maxBlockDuration time.Duration
@ -192,7 +192,7 @@ func createSingleRuleTestFiles(path string) error {
labels: labels:
testlabel11: testlabelvalue11 testlabel11: testlabelvalue11
` `
return ioutil.WriteFile(path, []byte(recordingRules), 0777) return ioutil.WriteFile(path, []byte(recordingRules), 0o777)
} }
func createMultiRuleTestFiles(path string) error { func createMultiRuleTestFiles(path string) error {
@ -212,7 +212,7 @@ func createMultiRuleTestFiles(path string) error {
labels: labels:
testlabel11: testlabelvalue13 testlabel11: testlabelvalue13
` `
return ioutil.WriteFile(path, []byte(recordingRules), 0777) return ioutil.WriteFile(path, []byte(recordingRules), 0o777)
} }
// TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics // TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics
@ -244,7 +244,7 @@ func TestBackfillLabels(t *testing.T) {
labels: labels:
name1: value-from-rule name1: value-from-rule
` `
require.NoError(t, ioutil.WriteFile(path, []byte(recordingRules), 0777)) require.NoError(t, ioutil.WriteFile(path, []byte(recordingRules), 0o777))
errs := ruleImporter.loadGroups(ctx, []string{path}) errs := ruleImporter.loadGroups(ctx, []string{path})
for _, err := range errs { for _, err := range errs {
require.NoError(t, err) require.NoError(t, err)

View file

@ -46,21 +46,24 @@ func TestSDCheckResult(t *testing.T) {
} }
expectedSDCheckResult := []sdCheckResult{ expectedSDCheckResult := []sdCheckResult{
sdCheckResult{ {
DiscoveredLabels: labels.Labels{ DiscoveredLabels: labels.Labels{
labels.Label{Name: "__address__", Value: "localhost:8080"}, labels.Label{Name: "__address__", Value: "localhost:8080"},
labels.Label{Name: "__scrape_interval__", Value: "0s"}, labels.Label{Name: "__scrape_interval__", Value: "0s"},
labels.Label{Name: "__scrape_timeout__", Value: "0s"}, labels.Label{Name: "__scrape_timeout__", Value: "0s"},
labels.Label{Name: "foo", Value: "bar"}}, labels.Label{Name: "foo", Value: "bar"},
},
Labels: labels.Labels{ Labels: labels.Labels{
labels.Label{Name: "__address__", Value: "localhost:8080"}, labels.Label{Name: "__address__", Value: "localhost:8080"},
labels.Label{Name: "__scrape_interval__", Value: "0s"}, labels.Label{Name: "__scrape_interval__", Value: "0s"},
labels.Label{Name: "__scrape_timeout__", Value: "0s"}, labels.Label{Name: "__scrape_timeout__", Value: "0s"},
labels.Label{Name: "foo", Value: "bar"}, labels.Label{Name: "foo", Value: "bar"},
labels.Label{Name: "instance", Value: "localhost:8080"}, labels.Label{Name: "instance", Value: "localhost:8080"},
labels.Label{Name: "newfoo", Value: "bar"}}, labels.Label{Name: "newfoo", Value: "bar"},
},
Error: nil, Error: nil,
}} },
}
require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig)) require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig))
} }

View file

@ -17,7 +17,6 @@ import (
"bufio" "bufio"
"context" "context"
"fmt" "fmt"
"github.com/prometheus/prometheus/tsdb/index"
"io" "io"
"io/ioutil" "io/ioutil"
"math" "math"
@ -32,6 +31,8 @@ import (
"text/tabwriter" "text/tabwriter"
"time" "time"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/alecthomas/units" "github.com/alecthomas/units"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -78,7 +79,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err
if err := os.RemoveAll(b.outPath); err != nil { if err := os.RemoveAll(b.outPath); err != nil {
return err return err
} }
if err := os.MkdirAll(b.outPath, 0777); err != nil { if err := os.MkdirAll(b.outPath, 0o777); err != nil {
return err return err
} }
@ -589,7 +590,7 @@ func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err err
histogram := make([]int, nBuckets) histogram := make([]int, nBuckets)
totalChunks := 0 totalChunks := 0
for postingsr.Next() { for postingsr.Next() {
var lbsl = labels.Labels{} lbsl := labels.Labels{}
var chks []chunks.Meta var chks []chunks.Meta
if err := indexr.Series(postingsr.At(), &lbsl, &chks); err != nil { if err := indexr.Series(postingsr.At(), &lbsl, &chks); err != nil {
return err return err
@ -671,14 +672,14 @@ func checkErr(err error) int {
return 0 return 0
} }
func backfillOpenMetrics(path string, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int { func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int {
inputFile, err := fileutil.OpenMmapFile(path) inputFile, err := fileutil.OpenMmapFile(path)
if err != nil { if err != nil {
return checkErr(err) return checkErr(err)
} }
defer inputFile.Close() defer inputFile.Close()
if err := os.MkdirAll(outputDir, 0777); err != nil { if err := os.MkdirAll(outputDir, 0o777); err != nil {
return checkErr(errors.Wrap(err, "create output dir")) return checkErr(errors.Wrap(err, "create output dir"))
} }

View file

@ -387,7 +387,6 @@ Outer:
// seriesLoadingString returns the input series in PromQL notation. // seriesLoadingString returns the input series in PromQL notation.
func (tg *testGroup) seriesLoadingString() string { func (tg *testGroup) seriesLoadingString() string {
result := fmt.Sprintf("load %v\n", shortDuration(tg.Interval)) result := fmt.Sprintf("load %v\n", shortDuration(tg.Interval))
for _, is := range tg.InputSeries { for _, is := range tg.InputSeries {
result += fmt.Sprintf(" %v %v\n", is.Series, is.Values) result += fmt.Sprintf(" %v %v\n", is.Series, is.Values)

View file

@ -99,7 +99,7 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
} }
// LoadFile parses the given YAML file into a Config. // LoadFile parses the given YAML file into a Config.
func LoadFile(filename string, agentMode bool, expandExternalLabels bool, logger log.Logger) (*Config, error) { func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) {
content, err := ioutil.ReadFile(filename) content, err := ioutil.ReadFile(filename)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -784,17 +784,19 @@ var expectedConf = &Config{
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{&openstack.SDConfig{ ServiceDiscoveryConfigs: discovery.Configs{
Role: "instance", &openstack.SDConfig{
Region: "RegionOne", Role: "instance",
Port: 80, Region: "RegionOne",
Availability: "public", Port: 80,
RefreshInterval: model.Duration(60 * time.Second), Availability: "public",
TLSConfig: config.TLSConfig{ RefreshInterval: model.Duration(60 * time.Second),
CAFile: "testdata/valid_ca_file", TLSConfig: config.TLSConfig{
CertFile: "testdata/valid_cert_file", CAFile: "testdata/valid_ca_file",
KeyFile: "testdata/valid_key_file", CertFile: "testdata/valid_cert_file",
}}, KeyFile: "testdata/valid_key_file",
},
},
}, },
}, },
{ {
@ -808,22 +810,23 @@ var expectedConf = &Config{
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{&puppetdb.SDConfig{ ServiceDiscoveryConfigs: discovery.Configs{
URL: "https://puppetserver/", &puppetdb.SDConfig{
Query: "resources { type = \"Package\" and title = \"httpd\" }", URL: "https://puppetserver/",
IncludeParameters: true, Query: "resources { type = \"Package\" and title = \"httpd\" }",
Port: 80, IncludeParameters: true,
RefreshInterval: model.Duration(60 * time.Second), Port: 80,
HTTPClientConfig: config.HTTPClientConfig{ RefreshInterval: model.Duration(60 * time.Second),
FollowRedirects: true, HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{ FollowRedirects: true,
CAFile: "testdata/valid_ca_file", TLSConfig: config.TLSConfig{
CertFile: "testdata/valid_cert_file", CAFile: "testdata/valid_ca_file",
KeyFile: "testdata/valid_key_file", CertFile: "testdata/valid_cert_file",
KeyFile: "testdata/valid_key_file",
},
}, },
}, },
}, },
},
}, },
{ {
JobName: "hetzner", JobName: "hetzner",
@ -1086,170 +1089,224 @@ var expectedErrors = []struct {
{ {
filename: "jobname.bad.yml", filename: "jobname.bad.yml",
errMsg: `job_name is empty`, errMsg: `job_name is empty`,
}, { },
{
filename: "jobname_dup.bad.yml", filename: "jobname_dup.bad.yml",
errMsg: `found multiple scrape configs with job name "prometheus"`, errMsg: `found multiple scrape configs with job name "prometheus"`,
}, { },
{
filename: "scrape_interval.bad.yml", filename: "scrape_interval.bad.yml",
errMsg: `scrape timeout greater than scrape interval`, errMsg: `scrape timeout greater than scrape interval`,
}, { },
{
filename: "labelname.bad.yml", filename: "labelname.bad.yml",
errMsg: `"not$allowed" is not a valid label name`, errMsg: `"not$allowed" is not a valid label name`,
}, { },
{
filename: "labelname2.bad.yml", filename: "labelname2.bad.yml",
errMsg: `"not:allowed" is not a valid label name`, errMsg: `"not:allowed" is not a valid label name`,
}, { },
{
filename: "labelvalue.bad.yml", filename: "labelvalue.bad.yml",
errMsg: `"\xff" is not a valid label value`, errMsg: `"\xff" is not a valid label value`,
}, { },
{
filename: "regex.bad.yml", filename: "regex.bad.yml",
errMsg: "error parsing regexp", errMsg: "error parsing regexp",
}, { },
{
filename: "modulus_missing.bad.yml", filename: "modulus_missing.bad.yml",
errMsg: "relabel configuration for hashmod requires non-zero modulus", errMsg: "relabel configuration for hashmod requires non-zero modulus",
}, { },
{
filename: "labelkeep.bad.yml", filename: "labelkeep.bad.yml",
errMsg: "labelkeep action requires only 'regex', and no other fields", errMsg: "labelkeep action requires only 'regex', and no other fields",
}, { },
{
filename: "labelkeep2.bad.yml", filename: "labelkeep2.bad.yml",
errMsg: "labelkeep action requires only 'regex', and no other fields", errMsg: "labelkeep action requires only 'regex', and no other fields",
}, { },
{
filename: "labelkeep3.bad.yml", filename: "labelkeep3.bad.yml",
errMsg: "labelkeep action requires only 'regex', and no other fields", errMsg: "labelkeep action requires only 'regex', and no other fields",
}, { },
{
filename: "labelkeep4.bad.yml", filename: "labelkeep4.bad.yml",
errMsg: "labelkeep action requires only 'regex', and no other fields", errMsg: "labelkeep action requires only 'regex', and no other fields",
}, { },
{
filename: "labelkeep5.bad.yml", filename: "labelkeep5.bad.yml",
errMsg: "labelkeep action requires only 'regex', and no other fields", errMsg: "labelkeep action requires only 'regex', and no other fields",
}, { },
{
filename: "labeldrop.bad.yml", filename: "labeldrop.bad.yml",
errMsg: "labeldrop action requires only 'regex', and no other fields", errMsg: "labeldrop action requires only 'regex', and no other fields",
}, { },
{
filename: "labeldrop2.bad.yml", filename: "labeldrop2.bad.yml",
errMsg: "labeldrop action requires only 'regex', and no other fields", errMsg: "labeldrop action requires only 'regex', and no other fields",
}, { },
{
filename: "labeldrop3.bad.yml", filename: "labeldrop3.bad.yml",
errMsg: "labeldrop action requires only 'regex', and no other fields", errMsg: "labeldrop action requires only 'regex', and no other fields",
}, { },
{
filename: "labeldrop4.bad.yml", filename: "labeldrop4.bad.yml",
errMsg: "labeldrop action requires only 'regex', and no other fields", errMsg: "labeldrop action requires only 'regex', and no other fields",
}, { },
{
filename: "labeldrop5.bad.yml", filename: "labeldrop5.bad.yml",
errMsg: "labeldrop action requires only 'regex', and no other fields", errMsg: "labeldrop action requires only 'regex', and no other fields",
}, { },
{
filename: "labelmap.bad.yml", filename: "labelmap.bad.yml",
errMsg: "\"l-$1\" is invalid 'replacement' for labelmap action", errMsg: "\"l-$1\" is invalid 'replacement' for labelmap action",
}, { },
{
filename: "rules.bad.yml", filename: "rules.bad.yml",
errMsg: "invalid rule file path", errMsg: "invalid rule file path",
}, { },
{
filename: "unknown_attr.bad.yml", filename: "unknown_attr.bad.yml",
errMsg: "field consult_sd_configs not found in type", errMsg: "field consult_sd_configs not found in type",
}, { },
{
filename: "bearertoken.bad.yml", filename: "bearertoken.bad.yml",
errMsg: "at most one of bearer_token & bearer_token_file must be configured", errMsg: "at most one of bearer_token & bearer_token_file must be configured",
}, { },
{
filename: "bearertoken_basicauth.bad.yml", filename: "bearertoken_basicauth.bad.yml",
errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured", errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured",
}, { },
{
filename: "kubernetes_http_config_without_api_server.bad.yml", filename: "kubernetes_http_config_without_api_server.bad.yml",
errMsg: "to use custom HTTP client configuration please provide the 'api_server' URL explicitly", errMsg: "to use custom HTTP client configuration please provide the 'api_server' URL explicitly",
}, { },
{
filename: "kubernetes_kubeconfig_with_apiserver.bad.yml", filename: "kubernetes_kubeconfig_with_apiserver.bad.yml",
errMsg: "cannot use 'kubeconfig_file' and 'api_server' simultaneously", errMsg: "cannot use 'kubeconfig_file' and 'api_server' simultaneously",
}, { },
{
filename: "kubernetes_kubeconfig_with_http_config.bad.yml", filename: "kubernetes_kubeconfig_with_http_config.bad.yml",
errMsg: "cannot use a custom HTTP client configuration together with 'kubeconfig_file'", errMsg: "cannot use a custom HTTP client configuration together with 'kubeconfig_file'",
}, },
{ {
filename: "kubernetes_bearertoken.bad.yml", filename: "kubernetes_bearertoken.bad.yml",
errMsg: "at most one of bearer_token & bearer_token_file must be configured", errMsg: "at most one of bearer_token & bearer_token_file must be configured",
}, { },
{
filename: "kubernetes_role.bad.yml", filename: "kubernetes_role.bad.yml",
errMsg: "role", errMsg: "role",
}, { },
{
filename: "kubernetes_selectors_endpoints.bad.yml", filename: "kubernetes_selectors_endpoints.bad.yml",
errMsg: "endpoints role supports only pod, service, endpoints selectors", errMsg: "endpoints role supports only pod, service, endpoints selectors",
}, { },
{
filename: "kubernetes_selectors_ingress.bad.yml", filename: "kubernetes_selectors_ingress.bad.yml",
errMsg: "ingress role supports only ingress selectors", errMsg: "ingress role supports only ingress selectors",
}, { },
{
filename: "kubernetes_selectors_node.bad.yml", filename: "kubernetes_selectors_node.bad.yml",
errMsg: "node role supports only node selectors", errMsg: "node role supports only node selectors",
}, { },
{
filename: "kubernetes_selectors_pod.bad.yml", filename: "kubernetes_selectors_pod.bad.yml",
errMsg: "pod role supports only pod selectors", errMsg: "pod role supports only pod selectors",
}, { },
{
filename: "kubernetes_selectors_service.bad.yml", filename: "kubernetes_selectors_service.bad.yml",
errMsg: "service role supports only service selectors", errMsg: "service role supports only service selectors",
}, { },
{
filename: "kubernetes_namespace_discovery.bad.yml", filename: "kubernetes_namespace_discovery.bad.yml",
errMsg: "field foo not found in type kubernetes.plain", errMsg: "field foo not found in type kubernetes.plain",
}, { },
{
filename: "kubernetes_selectors_duplicated_role.bad.yml", filename: "kubernetes_selectors_duplicated_role.bad.yml",
errMsg: "duplicated selector role: pod", errMsg: "duplicated selector role: pod",
}, { },
{
filename: "kubernetes_selectors_incorrect_selector.bad.yml", filename: "kubernetes_selectors_incorrect_selector.bad.yml",
errMsg: "invalid selector: 'metadata.status-Running'; can't understand 'metadata.status-Running'", errMsg: "invalid selector: 'metadata.status-Running'; can't understand 'metadata.status-Running'",
}, { },
{
filename: "kubernetes_bearertoken_basicauth.bad.yml", filename: "kubernetes_bearertoken_basicauth.bad.yml",
errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured", errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured",
}, { },
{
filename: "kubernetes_authorization_basicauth.bad.yml", filename: "kubernetes_authorization_basicauth.bad.yml",
errMsg: "at most one of basic_auth, oauth2 & authorization must be configured", errMsg: "at most one of basic_auth, oauth2 & authorization must be configured",
}, { },
{
filename: "marathon_no_servers.bad.yml", filename: "marathon_no_servers.bad.yml",
errMsg: "marathon_sd: must contain at least one Marathon server", errMsg: "marathon_sd: must contain at least one Marathon server",
}, { },
{
filename: "marathon_authtoken_authtokenfile.bad.yml", filename: "marathon_authtoken_authtokenfile.bad.yml",
errMsg: "marathon_sd: at most one of auth_token & auth_token_file must be configured", errMsg: "marathon_sd: at most one of auth_token & auth_token_file must be configured",
}, { },
{
filename: "marathon_authtoken_basicauth.bad.yml", filename: "marathon_authtoken_basicauth.bad.yml",
errMsg: "marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured", errMsg: "marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured",
}, { },
{
filename: "marathon_authtoken_bearertoken.bad.yml", filename: "marathon_authtoken_bearertoken.bad.yml",
errMsg: "marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured", errMsg: "marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured",
}, { },
{
filename: "marathon_authtoken_authorization.bad.yml", filename: "marathon_authtoken_authorization.bad.yml",
errMsg: "marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured", errMsg: "marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured",
}, { },
{
filename: "openstack_role.bad.yml", filename: "openstack_role.bad.yml",
errMsg: "unknown OpenStack SD role", errMsg: "unknown OpenStack SD role",
}, { },
{
filename: "openstack_availability.bad.yml", filename: "openstack_availability.bad.yml",
errMsg: "unknown availability invalid, must be one of admin, internal or public", errMsg: "unknown availability invalid, must be one of admin, internal or public",
}, { },
{
filename: "url_in_targetgroup.bad.yml", filename: "url_in_targetgroup.bad.yml",
errMsg: "\"http://bad\" is not a valid hostname", errMsg: "\"http://bad\" is not a valid hostname",
}, { },
{
filename: "target_label_missing.bad.yml", filename: "target_label_missing.bad.yml",
errMsg: "relabel configuration for replace action requires 'target_label' value", errMsg: "relabel configuration for replace action requires 'target_label' value",
}, { },
{
filename: "target_label_hashmod_missing.bad.yml", filename: "target_label_hashmod_missing.bad.yml",
errMsg: "relabel configuration for hashmod action requires 'target_label' value", errMsg: "relabel configuration for hashmod action requires 'target_label' value",
}, { },
{
filename: "unknown_global_attr.bad.yml", filename: "unknown_global_attr.bad.yml",
errMsg: "field nonexistent_field not found in type config.plain", errMsg: "field nonexistent_field not found in type config.plain",
}, { },
{
filename: "remote_read_url_missing.bad.yml", filename: "remote_read_url_missing.bad.yml",
errMsg: `url for remote_read is empty`, errMsg: `url for remote_read is empty`,
}, { },
{
filename: "remote_write_header.bad.yml", filename: "remote_write_header.bad.yml",
errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`, errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`,
}, { },
{
filename: "remote_read_header.bad.yml", filename: "remote_read_header.bad.yml",
errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`, errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`,
}, { },
{
filename: "remote_write_authorization_header.bad.yml", filename: "remote_write_authorization_header.bad.yml",
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter`, errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter`,
}, { },
{
filename: "remote_write_url_missing.bad.yml", filename: "remote_write_url_missing.bad.yml",
errMsg: `url for remote_write is empty`, errMsg: `url for remote_write is empty`,
}, { },
{
filename: "remote_write_dup.bad.yml", filename: "remote_write_dup.bad.yml",
errMsg: `found multiple remote write configs with job name "queue1"`, errMsg: `found multiple remote write configs with job name "queue1"`,
}, { },
{
filename: "remote_read_dup.bad.yml", filename: "remote_read_dup.bad.yml",
errMsg: `found multiple remote read configs with job name "queue1"`, errMsg: `found multiple remote read configs with job name "queue1"`,
}, },

View file

@ -63,13 +63,11 @@ const (
ec2LabelSeparator = "," ec2LabelSeparator = ","
) )
var ( // DefaultEC2SDConfig is the default EC2 SD configuration.
// DefaultEC2SDConfig is the default EC2 SD configuration. var DefaultEC2SDConfig = EC2SDConfig{
DefaultEC2SDConfig = EC2SDConfig{ Port: 80,
Port: 80, RefreshInterval: model.Duration(60 * time.Second),
RefreshInterval: model.Duration(60 * time.Second), }
}
)
func init() { func init() {
discovery.RegisterConfig(&EC2SDConfig{}) discovery.RegisterConfig(&EC2SDConfig{})

View file

@ -53,13 +53,11 @@ const (
lightsailLabelSeparator = "," lightsailLabelSeparator = ","
) )
var ( // DefaultLightsailSDConfig is the default Lightsail SD configuration.
// DefaultLightsailSDConfig is the default Lightsail SD configuration. var DefaultLightsailSDConfig = LightsailSDConfig{
DefaultLightsailSDConfig = LightsailSDConfig{ Port: 80,
Port: 80, RefreshInterval: model.Duration(60 * time.Second),
RefreshInterval: model.Duration(60 * time.Second), }
}
)
func init() { func init() {
discovery.RegisterConfig(&LightsailSDConfig{}) discovery.RegisterConfig(&LightsailSDConfig{})

View file

@ -339,7 +339,6 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
// Get the IP address information via separate call to the network provider. // Get the IP address information via separate call to the network provider.
for _, nicID := range vm.NetworkInterfaces { for _, nicID := range vm.NetworkInterfaces {
networkInterface, err := client.getNetworkInterfaceByID(ctx, nicID) networkInterface, err := client.getNetworkInterfaceByID(ctx, nicID)
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "Unable to get network interface", "name", nicID, "err", err) level.Error(d.logger).Log("msg", "Unable to get network interface", "name", nicID, "err", err)
ch <- target{labelSet: nil, err: err} ch <- target{labelSet: nil, err: err}
@ -437,9 +436,8 @@ func (client *azureClient) getScaleSets(ctx context.Context) ([]compute.VirtualM
func (client *azureClient) getScaleSetVMs(ctx context.Context, scaleSet compute.VirtualMachineScaleSet) ([]virtualMachine, error) { func (client *azureClient) getScaleSetVMs(ctx context.Context, scaleSet compute.VirtualMachineScaleSet) ([]virtualMachine, error) {
var vms []virtualMachine var vms []virtualMachine
//TODO do we really need to fetch the resourcegroup this way? // TODO do we really need to fetch the resourcegroup this way?
r, err := newAzureResourceFromID(*scaleSet.ID, nil) r, err := newAzureResourceFromID(*scaleSet.ID, nil)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "could not parse scale set ID") return nil, errors.Wrap(err, "could not parse scale set ID")
} }

View file

@ -54,7 +54,7 @@ const (
healthLabel = model.MetaLabelPrefix + "consul_health" healthLabel = model.MetaLabelPrefix + "consul_health"
// serviceAddressLabel is the name of the label containing the (optional) service address. // serviceAddressLabel is the name of the label containing the (optional) service address.
serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address" serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address"
//servicePortLabel is the name of the label containing the service port. // servicePortLabel is the name of the label containing the service port.
servicePortLabel = model.MetaLabelPrefix + "consul_service_port" servicePortLabel = model.MetaLabelPrefix + "consul_service_port"
// datacenterLabel is the name of the label containing the datacenter ID. // datacenterLabel is the name of the label containing the datacenter ID.
datacenterLabel = model.MetaLabelPrefix + "consul_dc" datacenterLabel = model.MetaLabelPrefix + "consul_dc"
@ -530,7 +530,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
for _, serviceNode := range serviceNodes { for _, serviceNode := range serviceNodes {
// We surround the separated list with the separator as well. This way regular expressions // We surround the separated list with the separator as well. This way regular expressions
// in relabeling rules don't have to consider tag positions. // in relabeling rules don't have to consider tag positions.
var tags = srv.tagSeparator + strings.Join(serviceNode.Service.Tags, srv.tagSeparator) + srv.tagSeparator tags := srv.tagSeparator + strings.Join(serviceNode.Service.Tags, srv.tagSeparator) + srv.tagSeparator
// If the service address is not empty it should be used instead of the node address // If the service address is not empty it should be used instead of the node address
// since the service may be registered remotely through a different node. // since the service may be registered remotely through a different node.

View file

@ -37,9 +37,9 @@ func TestMain(m *testing.M) {
func TestConfiguredService(t *testing.T) { func TestConfiguredService(t *testing.T) {
conf := &SDConfig{ conf := &SDConfig{
Services: []string{"configuredServiceName"}} Services: []string{"configuredServiceName"},
}
consulDiscovery, err := NewDiscovery(conf, nil) consulDiscovery, err := NewDiscovery(conf, nil)
if err != nil { if err != nil {
t.Errorf("Unexpected error when initializing discovery %v", err) t.Errorf("Unexpected error when initializing discovery %v", err)
} }
@ -57,7 +57,6 @@ func TestConfiguredServiceWithTag(t *testing.T) {
ServiceTags: []string{"http"}, ServiceTags: []string{"http"},
} }
consulDiscovery, err := NewDiscovery(conf, nil) consulDiscovery, err := NewDiscovery(conf, nil)
if err != nil { if err != nil {
t.Errorf("Unexpected error when initializing discovery %v", err) t.Errorf("Unexpected error when initializing discovery %v", err)
} }
@ -153,7 +152,6 @@ func TestConfiguredServiceWithTags(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
consulDiscovery, err := NewDiscovery(tc.conf, nil) consulDiscovery, err := NewDiscovery(tc.conf, nil)
if err != nil { if err != nil {
t.Errorf("Unexpected error when initializing discovery %v", err) t.Errorf("Unexpected error when initializing discovery %v", err)
} }
@ -168,7 +166,6 @@ func TestConfiguredServiceWithTags(t *testing.T) {
func TestNonConfiguredService(t *testing.T) { func TestNonConfiguredService(t *testing.T) {
conf := &SDConfig{} conf := &SDConfig{}
consulDiscovery, err := NewDiscovery(conf, nil) consulDiscovery, err := NewDiscovery(conf, nil)
if err != nil { if err != nil {
t.Errorf("Unexpected error when initializing discovery %v", err) t.Errorf("Unexpected error when initializing discovery %v", err)
} }

View file

@ -75,7 +75,8 @@ func (m *SDMock) HandleDropletsList() {
panic(err) panic(err)
} }
} }
fmt.Fprint(w, []string{` fmt.Fprint(w, []string{
`
{ {
"droplets": [ "droplets": [
{ {

View file

@ -73,7 +73,7 @@ func (t *testRunner) copyFile(src string) string {
} }
// copyFileTo atomically copies a file with a different name to the runner's directory. // copyFileTo atomically copies a file with a different name to the runner's directory.
func (t *testRunner) copyFileTo(src string, name string) string { func (t *testRunner) copyFileTo(src, name string) string {
t.Helper() t.Helper()
newf, err := ioutil.TempFile(t.dir, "") newf, err := ioutil.TempFile(t.dir, "")
@ -95,7 +95,7 @@ func (t *testRunner) copyFileTo(src string, name string) string {
} }
// writeString writes atomically a string to a file. // writeString writes atomically a string to a file.
func (t *testRunner) writeString(file string, data string) { func (t *testRunner) writeString(file, data string) {
t.Helper() t.Helper()
newf, err := ioutil.TempFile(t.dir, "") newf, err := ioutil.TempFile(t.dir, "")
@ -477,6 +477,7 @@ func TestRemoveFile(t *testing.T) {
}, },
{ {
Source: fileSource(sdFile, 1), Source: fileSource(sdFile, 1),
}}, },
},
) )
} }

View file

@ -78,6 +78,7 @@ func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, er
) )
return d, nil return d, nil
} }
func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
servers, err := d.client.Server.All(ctx) servers, err := d.client.Server.All(ctx)
if err != nil { if err != nil {

View file

@ -489,8 +489,10 @@ func (m *SDMock) HandleHcloudNetworks() {
}) })
} }
const robotTestUsername = "my-hetzner" const (
const robotTestPassword = "my-password" robotTestUsername = "my-hetzner"
robotTestPassword = "my-password"
)
// HandleRobotServers mocks the robot servers list endpoint. // HandleRobotServers mocks the robot servers list endpoint.
func (m *SDMock) HandleRobotServers() { func (m *SDMock) HandleRobotServers() {

View file

@ -70,6 +70,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro
return d, nil return d, nil
} }
func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
req, err := http.NewRequest("GET", d.endpoint+"/server", nil) req, err := http.NewRequest("GET", d.endpoint+"/server", nil)
if err != nil { if err != nil {

View file

@ -60,7 +60,6 @@ func TestHTTPValidRefresh(t *testing.T) {
}, },
} }
require.Equal(t, tgs, expectedTargets) require.Equal(t, tgs, expectedTargets)
} }
func TestHTTPInvalidCode(t *testing.T) { func TestHTTPInvalidCode(t *testing.T) {
@ -398,5 +397,4 @@ func TestSourceDisappeared(t *testing.T) {
require.Equal(t, test.expectedTargets[i], tgs) require.Equal(t, test.expectedTargets[i], tgs)
} }
} }
} }

View file

@ -121,9 +121,11 @@ func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer
clientGoRequestLatencyMetricVec, clientGoRequestLatencyMetricVec,
) )
} }
func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code string, method string, host string) {
func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code, method, host string) {
clientGoRequestResultMetricVec.WithLabelValues(code).Inc() clientGoRequestResultMetricVec.WithLabelValues(code).Inc()
} }
func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) { func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) {
clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds()) clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds())
} }
@ -146,21 +148,27 @@ func (f *clientGoWorkqueueMetricsProvider) Register(registerer prometheus.Regist
func (f *clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { func (f *clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric {
return clientGoWorkqueueDepthMetricVec.WithLabelValues(name) return clientGoWorkqueueDepthMetricVec.WithLabelValues(name)
} }
func (f *clientGoWorkqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { func (f *clientGoWorkqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric {
return clientGoWorkqueueAddsMetricVec.WithLabelValues(name) return clientGoWorkqueueAddsMetricVec.WithLabelValues(name)
} }
func (f *clientGoWorkqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { func (f *clientGoWorkqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric {
return clientGoWorkqueueLatencyMetricVec.WithLabelValues(name) return clientGoWorkqueueLatencyMetricVec.WithLabelValues(name)
} }
func (f *clientGoWorkqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { func (f *clientGoWorkqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric {
return clientGoWorkqueueWorkDurationMetricVec.WithLabelValues(name) return clientGoWorkqueueWorkDurationMetricVec.WithLabelValues(name)
} }
func (f *clientGoWorkqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { func (f *clientGoWorkqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric {
return clientGoWorkqueueUnfinishedWorkSecondsMetricVec.WithLabelValues(name) return clientGoWorkqueueUnfinishedWorkSecondsMetricVec.WithLabelValues(name)
} }
func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric {
return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name) return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name)
} }
func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric {
// Retries are not used so the metric is omitted. // Retries are not used so the metric is omitted.
return noopMetric{} return noopMetric{}

View file

@ -27,7 +27,7 @@ import (
) )
func makeEndpoints() *v1.Endpoints { func makeEndpoints() *v1.Endpoints {
var nodeName = "foobar" nodeName := "foobar"
return &v1.Endpoints{ return &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints", Name: "testendpoints",

View file

@ -171,13 +171,15 @@ type hasSynced interface {
hasSynced() bool hasSynced() bool
} }
var _ hasSynced = &Discovery{} var (
var _ hasSynced = &Node{} _ hasSynced = &Discovery{}
var _ hasSynced = &Endpoints{} _ hasSynced = &Node{}
var _ hasSynced = &EndpointSlice{} _ hasSynced = &Endpoints{}
var _ hasSynced = &Ingress{} _ hasSynced = &EndpointSlice{}
var _ hasSynced = &Pod{} _ hasSynced = &Ingress{}
var _ hasSynced = &Service{} _ hasSynced = &Pod{}
_ hasSynced = &Service{}
)
func (d *Discovery) hasSynced() bool { func (d *Discovery) hasSynced() bool {
d.RLock() d.RLock()

View file

@ -25,7 +25,7 @@ import (
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
) )
func makeNode(name, address string, labels map[string]string, annotations map[string]string) *v1.Node { func makeNode(name, address string, labels, annotations map[string]string) *v1.Node {
return &v1.Node{ return &v1.Node{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,

View file

@ -37,7 +37,6 @@ func TestMain(m *testing.M) {
// TestTargetUpdatesOrder checks that the target updates are received in the expected order. // TestTargetUpdatesOrder checks that the target updates are received in the expected order.
func TestTargetUpdatesOrder(t *testing.T) { func TestTargetUpdatesOrder(t *testing.T) {
// The order by which the updates are send is determined by the interval passed to the mock discovery adapter // The order by which the updates are send is determined by the interval passed to the mock discovery adapter
// Final targets array is ordered alphabetically by the name of the discoverer. // Final targets array is ordered alphabetically by the name of the discoverer.
// For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge. // For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge.
@ -117,7 +116,8 @@ func TestTargetUpdatesOrder(t *testing.T) {
{ {
Source: "tp1_group2", Source: "tp1_group2",
Targets: []model.LabelSet{{"__instance__": "2"}}, Targets: []model.LabelSet{{"__instance__": "2"}},
}}, },
},
}, },
}, },
}, },
@ -729,14 +729,12 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou
match := false match := false
var mergedTargets string var mergedTargets string
for _, targetGroup := range tSets[poolKey] { for _, targetGroup := range tSets[poolKey] {
for _, l := range targetGroup.Targets { for _, l := range targetGroup.Targets {
mergedTargets = mergedTargets + " " + l.String() mergedTargets = mergedTargets + " " + l.String()
if l.String() == label { if l.String() == label {
match = true match = true
} }
} }
} }
if match != present { if match != present {
msg := "" msg := ""
@ -926,7 +924,6 @@ func TestGaugeFailedConfigs(t *testing.T) {
if failedCount != 0 { if failedCount != 0 {
t.Fatalf("Expected to get no failed config, got: %v", failedCount) t.Fatalf("Expected to get no failed config, got: %v", failedCount)
} }
} }
func TestCoordinationWithReceiver(t *testing.T) { func TestCoordinationWithReceiver(t *testing.T) {

View file

@ -37,7 +37,6 @@ func TestMain(m *testing.M) {
// TestTargetUpdatesOrder checks that the target updates are received in the expected order. // TestTargetUpdatesOrder checks that the target updates are received in the expected order.
func TestTargetUpdatesOrder(t *testing.T) { func TestTargetUpdatesOrder(t *testing.T) {
// The order by which the updates are send is determined by the interval passed to the mock discovery adapter // The order by which the updates are send is determined by the interval passed to the mock discovery adapter
// Final targets array is ordered alphabetically by the name of the discoverer. // Final targets array is ordered alphabetically by the name of the discoverer.
// For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge. // For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge.
@ -117,7 +116,8 @@ func TestTargetUpdatesOrder(t *testing.T) {
{ {
Source: "tp1_group2", Source: "tp1_group2",
Targets: []model.LabelSet{{"__instance__": "2"}}, Targets: []model.LabelSet{{"__instance__": "2"}},
}}, },
},
}, },
}, },
}, },
@ -719,7 +719,7 @@ func staticConfig(addrs ...string) StaticConfig {
return cfg return cfg
} }
func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group, key string, label string, present bool) { func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group, key, label string, present bool) {
t.Helper() t.Helper()
if _, ok := tGroups[key]; !ok { if _, ok := tGroups[key]; !ok {
t.Fatalf("'%s' should be present in Group map keys: %v", key, tGroups) t.Fatalf("'%s' should be present in Group map keys: %v", key, tGroups)
@ -734,7 +734,6 @@ func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group,
match = true match = true
} }
} }
} }
if match != present { if match != present {
msg := "" msg := ""
@ -755,14 +754,12 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou
match := false match := false
var mergedTargets string var mergedTargets string
for _, targetGroup := range tSets[poolKey] { for _, targetGroup := range tSets[poolKey] {
for _, l := range targetGroup.Targets { for _, l := range targetGroup.Targets {
mergedTargets = mergedTargets + " " + l.String() mergedTargets = mergedTargets + " " + l.String()
if l.String() == label { if l.String() == label {
match = true match = true
} }
} }
} }
if match != present { if match != present {
msg := "" msg := ""
@ -1062,7 +1059,6 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil { if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil {
t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls) t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls)
} }
} }
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
@ -1179,7 +1175,6 @@ func TestGaugeFailedConfigs(t *testing.T) {
if failedCount != 0 { if failedCount != 0 {
t.Fatalf("Expected to get no failed config, got: %v", failedCount) t.Fatalf("Expected to get no failed config, got: %v", failedCount)
} }
} }
func TestCoordinationWithReceiver(t *testing.T) { func TestCoordinationWithReceiver(t *testing.T) {

View file

@ -478,7 +478,6 @@ func targetsForApp(app *app) []model.LabelSet {
// Generate a target endpoint string in host:port format. // Generate a target endpoint string in host:port format.
func targetEndpoint(task *task, port uint32, containerNet bool) string { func targetEndpoint(task *task, port uint32, containerNet bool) string {
var host string var host string
// Use the task's ipAddress field when it's in a container network // Use the task's ipAddress field when it's in a container network
@ -493,7 +492,6 @@ func targetEndpoint(task *task, port uint32, containerNet bool) string {
// Get a list of ports and a list of labels from a PortMapping. // Get a list of ports and a list of labels from a PortMapping.
func extractPortMapping(portMappings []portMapping, containerNet bool) ([]uint32, []map[string]string) { func extractPortMapping(portMappings []portMapping, containerNet bool) ([]uint32, []map[string]string) {
ports := make([]uint32, len(portMappings)) ports := make([]uint32, len(portMappings))
labels := make([]map[string]string, len(portMappings)) labels := make([]map[string]string, len(portMappings))

View file

@ -60,9 +60,7 @@ func TestMarathonSDHandleError(t *testing.T) {
} }
func TestMarathonSDEmptyList(t *testing.T) { func TestMarathonSDEmptyList(t *testing.T) {
var ( client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return &appList{}, nil }
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return &appList{}, nil }
)
tgs, err := testUpdateServices(client) tgs, err := testUpdateServices(client)
if err != nil { if err != nil {
t.Fatalf("Got error: %s", err) t.Fatalf("Got error: %s", err)
@ -99,11 +97,9 @@ func marathonTestAppList(labels map[string]string, runningTasks int) *appList {
} }
func TestMarathonSDSendGroup(t *testing.T) { func TestMarathonSDSendGroup(t *testing.T) {
var ( client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return marathonTestAppList(marathonValidLabel, 1), nil
return marathonTestAppList(marathonValidLabel, 1), nil }
}
)
tgs, err := testUpdateServices(client) tgs, err := testUpdateServices(client)
if err != nil { if err != nil {
t.Fatalf("Got error: %s", err) t.Fatalf("Got error: %s", err)
@ -195,11 +191,9 @@ func marathonTestAppListWithMultiplePorts(labels map[string]string, runningTasks
} }
func TestMarathonSDSendGroupWithMultiplePort(t *testing.T) { func TestMarathonSDSendGroupWithMultiplePort(t *testing.T) {
var ( client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return marathonTestAppListWithMultiplePorts(marathonValidLabel, 1), nil
return marathonTestAppListWithMultiplePorts(marathonValidLabel, 1), nil }
}
)
tgs, err := testUpdateServices(client) tgs, err := testUpdateServices(client)
if err != nil { if err != nil {
t.Fatalf("Got error: %s", err) t.Fatalf("Got error: %s", err)
@ -254,11 +248,9 @@ func marathonTestZeroTaskPortAppList(labels map[string]string, runningTasks int)
} }
func TestMarathonZeroTaskPorts(t *testing.T) { func TestMarathonZeroTaskPorts(t *testing.T) {
var ( client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return marathonTestZeroTaskPortAppList(marathonValidLabel, 1), nil
return marathonTestZeroTaskPortAppList(marathonValidLabel, 1), nil }
}
)
tgs, err := testUpdateServices(client) tgs, err := testUpdateServices(client)
if err != nil { if err != nil {
t.Fatalf("Got error: %s", err) t.Fatalf("Got error: %s", err)
@ -331,11 +323,9 @@ func marathonTestAppListWithPortDefinitions(labels map[string]string, runningTas
} }
func TestMarathonSDSendGroupWithPortDefinitions(t *testing.T) { func TestMarathonSDSendGroupWithPortDefinitions(t *testing.T) {
var ( client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return marathonTestAppListWithPortDefinitions(marathonValidLabel, 1), nil
return marathonTestAppListWithPortDefinitions(marathonValidLabel, 1), nil }
}
)
tgs, err := testUpdateServices(client) tgs, err := testUpdateServices(client)
if err != nil { if err != nil {
t.Fatalf("Got error: %s", err) t.Fatalf("Got error: %s", err)
@ -403,11 +393,9 @@ func marathonTestAppListWithPortDefinitionsRequirePorts(labels map[string]string
} }
func TestMarathonSDSendGroupWithPortDefinitionsRequirePorts(t *testing.T) { func TestMarathonSDSendGroupWithPortDefinitionsRequirePorts(t *testing.T) {
var ( client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return marathonTestAppListWithPortDefinitionsRequirePorts(marathonValidLabel, 1), nil
return marathonTestAppListWithPortDefinitionsRequirePorts(marathonValidLabel, 1), nil }
}
)
tgs, err := testUpdateServices(client) tgs, err := testUpdateServices(client)
if err != nil { if err != nil {
t.Fatalf("Got error: %s", err) t.Fatalf("Got error: %s", err)
@ -470,11 +458,9 @@ func marathonTestAppListWithPorts(labels map[string]string, runningTasks int) *a
} }
func TestMarathonSDSendGroupWithPorts(t *testing.T) { func TestMarathonSDSendGroupWithPorts(t *testing.T) {
var ( client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return marathonTestAppListWithPorts(marathonValidLabel, 1), nil
return marathonTestAppListWithPorts(marathonValidLabel, 1), nil }
}
)
tgs, err := testUpdateServices(client) tgs, err := testUpdateServices(client)
if err != nil { if err != nil {
t.Fatalf("Got error: %s", err) t.Fatalf("Got error: %s", err)
@ -546,11 +532,9 @@ func marathonTestAppListWithContainerPortMappings(labels map[string]string, runn
} }
func TestMarathonSDSendGroupWithContainerPortMappings(t *testing.T) { func TestMarathonSDSendGroupWithContainerPortMappings(t *testing.T) {
var ( client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return marathonTestAppListWithContainerPortMappings(marathonValidLabel, 1), nil
return marathonTestAppListWithContainerPortMappings(marathonValidLabel, 1), nil }
}
)
tgs, err := testUpdateServices(client) tgs, err := testUpdateServices(client)
if err != nil { if err != nil {
t.Fatalf("Got error: %s", err) t.Fatalf("Got error: %s", err)
@ -622,11 +606,9 @@ func marathonTestAppListWithDockerContainerPortMappings(labels map[string]string
} }
func TestMarathonSDSendGroupWithDockerContainerPortMappings(t *testing.T) { func TestMarathonSDSendGroupWithDockerContainerPortMappings(t *testing.T) {
var ( client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return marathonTestAppListWithDockerContainerPortMappings(marathonValidLabel, 1), nil
return marathonTestAppListWithDockerContainerPortMappings(marathonValidLabel, 1), nil }
}
)
tgs, err := testUpdateServices(client) tgs, err := testUpdateServices(client)
if err != nil { if err != nil {
t.Fatalf("Got error: %s", err) t.Fatalf("Got error: %s", err)
@ -702,11 +684,9 @@ func marathonTestAppListWithContainerNetworkAndPortMappings(labels map[string]st
} }
func TestMarathonSDSendGroupWithContainerNetworkAndPortMapping(t *testing.T) { func TestMarathonSDSendGroupWithContainerNetworkAndPortMapping(t *testing.T) {
var ( client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return marathonTestAppListWithContainerNetworkAndPortMappings(marathonValidLabel, 1), nil
return marathonTestAppListWithContainerNetworkAndPortMappings(marathonValidLabel, 1), nil }
}
)
tgs, err := testUpdateServices(client) tgs, err := testUpdateServices(client)
if err != nil { if err != nil {
t.Fatalf("Got error: %s", err) t.Fatalf("Got error: %s", err)

View file

@ -51,8 +51,10 @@ type HypervisorDiscovery struct {
// newHypervisorDiscovery returns a new hypervisor discovery. // newHypervisorDiscovery returns a new hypervisor discovery.
func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions,
port int, region string, availability gophercloud.Availability, l log.Logger) *HypervisorDiscovery { port int, region string, availability gophercloud.Availability, l log.Logger) *HypervisorDiscovery {
return &HypervisorDiscovery{provider: provider, authOpts: opts, return &HypervisorDiscovery{
region: region, port: port, availability: availability, logger: l} provider: provider, authOpts: opts,
region: region, port: port, availability: availability, logger: l,
}
} }
func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {

View file

@ -47,7 +47,6 @@ func (s *OpenstackSDHypervisorTestSuite) openstackAuthSuccess() (refresher, erro
} }
func TestOpenstackSDHypervisorRefresh(t *testing.T) { func TestOpenstackSDHypervisorRefresh(t *testing.T) {
mock := &OpenstackSDHypervisorTestSuite{} mock := &OpenstackSDHypervisorTestSuite{}
mock.SetupTest(t) mock.SetupTest(t)

View file

@ -63,8 +63,10 @@ func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gopherclou
if l == nil { if l == nil {
l = log.NewNopLogger() l = log.NewNopLogger()
} }
return &InstanceDiscovery{provider: provider, authOpts: opts, return &InstanceDiscovery{
region: region, port: port, allTenants: allTenants, availability: availability, logger: l} provider: provider, authOpts: opts,
region: region, port: port, allTenants: allTenants, availability: availability, logger: l,
}
} }
type floatingIPKey struct { type floatingIPKey struct {

View file

@ -51,7 +51,6 @@ func (s *OpenstackSDInstanceTestSuite) openstackAuthSuccess() (refresher, error)
} }
func TestOpenstackSDInstanceRefresh(t *testing.T) { func TestOpenstackSDInstanceRefresh(t *testing.T) {
mock := &OpenstackSDInstanceTestSuite{} mock := &OpenstackSDInstanceTestSuite{}
mock.SetupTest(t) mock.SetupTest(t)

View file

@ -54,7 +54,7 @@ func testMethod(t *testing.T, r *http.Request, expected string) {
} }
} }
func testHeader(t *testing.T, r *http.Request, header string, expected string) { func testHeader(t *testing.T, r *http.Request, header, expected string) {
if actual := r.Header.Get(header); expected != actual { if actual := r.Header.Get(header); expected != actual {
t.Errorf("Header %s = %s, expected %s", header, actual, expected) t.Errorf("Header %s = %s, expected %s", header, actual, expected)
} }

View file

@ -145,7 +145,6 @@ func NewDiscovery(conf *SDConfig, l log.Logger) (*refresh.Discovery, error) {
time.Duration(conf.RefreshInterval), time.Duration(conf.RefreshInterval),
r.refresh, r.refresh,
), nil ), nil
} }
func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) { func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) {

View file

@ -173,8 +173,7 @@ func init() {
// Discovery periodically performs Scaleway requests. It implements // Discovery periodically performs Scaleway requests. It implements
// the Discoverer interface. // the Discoverer interface.
type Discovery struct { type Discovery struct{}
}
func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) {
r, err := newRefresher(conf) r, err := newRefresher(conf)

View file

@ -38,7 +38,8 @@ func TestTargetGroupStrictJsonUnmarshal(t *testing.T) {
expectedReply: nil, expectedReply: nil,
expectedGroup: Group{Targets: []model.LabelSet{ expectedGroup: Group{Targets: []model.LabelSet{
{"__address__": "localhost:9090"}, {"__address__": "localhost:9090"},
{"__address__": "localhost:9091"}}, Labels: model.LabelSet{"my": "label"}}, {"__address__": "localhost:9091"},
}, Labels: model.LabelSet{"my": "label"}},
}, },
{ {
json: ` {"label": {},"targets": []}`, json: ` {"label": {},"targets": []}`,
@ -56,7 +57,6 @@ func TestTargetGroupStrictJsonUnmarshal(t *testing.T) {
require.Equal(t, test.expectedReply, actual) require.Equal(t, test.expectedReply, actual)
require.Equal(t, test.expectedGroup, tg) require.Equal(t, test.expectedGroup, tg)
} }
} }
func TestTargetGroupYamlMarshal(t *testing.T) { func TestTargetGroupYamlMarshal(t *testing.T) {
@ -81,10 +81,13 @@ func TestTargetGroupYamlMarshal(t *testing.T) {
}, },
{ {
// targets only exposes addresses. // targets only exposes addresses.
group: Group{Targets: []model.LabelSet{ group: Group{
{"__address__": "localhost:9090"}, Targets: []model.LabelSet{
{"__address__": "localhost:9091"}}, {"__address__": "localhost:9090"},
Labels: model.LabelSet{"foo": "bar", "bar": "baz"}}, {"__address__": "localhost:9091"},
},
Labels: model.LabelSet{"foo": "bar", "bar": "baz"},
},
expectedYaml: "targets:\n- localhost:9090\n- localhost:9091\nlabels:\n bar: baz\n foo: bar\n", expectedYaml: "targets:\n- localhost:9090\n- localhost:9091\nlabels:\n bar: baz\n foo: bar\n",
expectedErr: nil, expectedErr: nil,
}, },
@ -120,7 +123,8 @@ func TestTargetGroupYamlUnmarshal(t *testing.T) {
expectedReply: nil, expectedReply: nil,
expectedGroup: Group{Targets: []model.LabelSet{ expectedGroup: Group{Targets: []model.LabelSet{
{"__address__": "localhost:9090"}, {"__address__": "localhost:9090"},
{"__address__": "localhost:9191"}}, Labels: model.LabelSet{"my": "label"}}, {"__address__": "localhost:9191"},
}, Labels: model.LabelSet{"my": "label"}},
}, },
{ {
// incorrect syntax. // incorrect syntax.
@ -135,21 +139,25 @@ func TestTargetGroupYamlUnmarshal(t *testing.T) {
require.Equal(t, test.expectedReply, actual) require.Equal(t, test.expectedReply, actual)
require.Equal(t, test.expectedGroup, tg) require.Equal(t, test.expectedGroup, tg)
} }
} }
func TestString(t *testing.T) { func TestString(t *testing.T) {
// String() should return only the source, regardless of other attributes. // String() should return only the source, regardless of other attributes.
group1 := group1 :=
Group{Targets: []model.LabelSet{ Group{
{"__address__": "localhost:9090"}, Targets: []model.LabelSet{
{"__address__": "localhost:9091"}}, {"__address__": "localhost:9090"},
{"__address__": "localhost:9091"},
},
Source: "<source>", Source: "<source>",
Labels: model.LabelSet{"foo": "bar", "bar": "baz"}} Labels: model.LabelSet{"foo": "bar", "bar": "baz"},
}
group2 := group2 :=
Group{Targets: []model.LabelSet{}, Group{
Source: "<source>", Targets: []model.LabelSet{},
Labels: model.LabelSet{}} Source: "<source>",
Labels: model.LabelSet{},
}
require.Equal(t, "<source>", group1.String()) require.Equal(t, "<source>", group1.String())
require.Equal(t, "<source>", group2.String()) require.Equal(t, "<source>", group2.String())
require.Equal(t, group1.String(), group2.String()) require.Equal(t, group1.String(), group2.String())

View file

@ -190,7 +190,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
default: default:
return nil, fmt.Errorf("unknown role '%s' in configuration", d.sdConfig.Role) return nil, fmt.Errorf("unknown role '%s' in configuration", d.sdConfig.Role)
} }
var endpoint = fmt.Sprintf(endpointFormat, d.sdConfig.Endpoint, d.sdConfig.Port, d.sdConfig.Version) endpoint := fmt.Sprintf(endpointFormat, d.sdConfig.Endpoint, d.sdConfig.Port, d.sdConfig.Version)
if len(d.sdConfig.Groups) > 0 { if len(d.sdConfig.Groups) > 0 {
groups := url.QueryEscape(strings.Join(d.sdConfig.Groups, ",")) groups := url.QueryEscape(strings.Join(d.sdConfig.Groups, ","))
endpoint = fmt.Sprintf("%s?groups=%s", endpoint, groups) endpoint = fmt.Sprintf("%s?groups=%s", endpoint, groups)

View file

@ -135,8 +135,7 @@ func TestTritonSDRefreshNoTargets(t *testing.T) {
} }
func TestTritonSDRefreshMultipleTargets(t *testing.T) { func TestTritonSDRefreshMultipleTargets(t *testing.T) {
var ( dstr := `{"containers":[
dstr = `{"containers":[
{ {
"groups":["foo","bar","baz"], "groups":["foo","bar","baz"],
"server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131", "server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131",
@ -153,7 +152,6 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) {
"vm_uuid":"7b27a514-89d7-11e6-bee6-3f96f367bee7" "vm_uuid":"7b27a514-89d7-11e6-bee6-3f96f367bee7"
}] }]
}` }`
)
tgts := testTritonSDRefresh(t, conf, dstr) tgts := testTritonSDRefresh(t, conf, dstr)
require.NotNil(t, tgts) require.NotNil(t, tgts)
@ -161,9 +159,7 @@ func TestTritonSDRefreshMultipleTargets(t *testing.T) {
} }
func TestTritonSDRefreshNoServer(t *testing.T) { func TestTritonSDRefreshNoServer(t *testing.T) {
var ( td, _ := newTritonDiscovery(conf)
td, _ = newTritonDiscovery(conf)
)
_, err := td.refresh(context.Background()) _, err := td.refresh(context.Background())
require.Error(t, err) require.Error(t, err)
@ -171,9 +167,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) {
} }
func TestTritonSDRefreshCancelled(t *testing.T) { func TestTritonSDRefreshCancelled(t *testing.T) {
var ( td, _ := newTritonDiscovery(conf)
td, _ = newTritonDiscovery(conf)
)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
cancel() cancel()
@ -183,8 +177,7 @@ func TestTritonSDRefreshCancelled(t *testing.T) {
} }
func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) { func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) {
var ( dstr := `{"cns":[
dstr = `{"cns":[
{ {
"server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131" "server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131"
}, },
@ -192,7 +185,6 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) {
"server_uuid":"a5894692-bd32-4ca1-908a-e2dda3c3a5e6" "server_uuid":"a5894692-bd32-4ca1-908a-e2dda3c3a5e6"
}] }]
}` }`
)
tgts := testTritonSDRefresh(t, cnconf, dstr) tgts := testTritonSDRefresh(t, cnconf, dstr)
require.NotNil(t, tgts) require.NotNil(t, tgts)
@ -200,8 +192,7 @@ func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) {
} }
func TestTritonSDRefreshCNsWithHostname(t *testing.T) { func TestTritonSDRefreshCNsWithHostname(t *testing.T) {
var ( dstr := `{"cns":[
dstr = `{"cns":[
{ {
"server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131", "server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131",
"server_hostname": "server01" "server_hostname": "server01"
@ -211,7 +202,6 @@ func TestTritonSDRefreshCNsWithHostname(t *testing.T) {
"server_hostname": "server02" "server_hostname": "server02"
}] }]
}` }`
)
tgts := testTritonSDRefresh(t, cnconf, dstr) tgts := testTritonSDRefresh(t, cnconf, dstr)
require.NotNil(t, tgts) require.NotNil(t, tgts)

View file

@ -119,7 +119,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultSDConfig *c = DefaultSDConfig
type plain SDConfig type plain SDConfig
err := unmarshal((*plain)(c)) err := unmarshal((*plain)(c))
if err != nil { if err != nil {
return err return err
} }
@ -141,7 +140,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil return nil
} }
func login(rpcclient *xmlrpc.Client, user string, pass string) (string, error) { func login(rpcclient *xmlrpc.Client, user, pass string) (string, error) {
var result string var result string
err := rpcclient.Call("auth.login", []interface{}{user, pass}, &result) err := rpcclient.Call("auth.login", []interface{}{user, pass}, &result)
return result, err return result, err
@ -151,7 +150,7 @@ func logout(rpcclient *xmlrpc.Client, token string) error {
return rpcclient.Call("auth.logout", token, nil) return rpcclient.Call("auth.logout", token, nil)
} }
func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token string, entitlement string) (map[int][]systemGroupID, error) { func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token, entitlement string) (map[int][]systemGroupID, error) {
var systemGroupsInfos []struct { var systemGroupsInfos []struct {
SystemID int `xmlrpc:"id"` SystemID int `xmlrpc:"id"`
SystemGroups []systemGroupID `xmlrpc:"system_groups"` SystemGroups []systemGroupID `xmlrpc:"system_groups"`
@ -234,7 +233,6 @@ func (d *Discovery) getEndpointLabels(
systemGroupIDs []systemGroupID, systemGroupIDs []systemGroupID,
networkInfo networkInfo, networkInfo networkInfo,
) model.LabelSet { ) model.LabelSet {
var addr, scheme string var addr, scheme string
managedGroupNames := getSystemGroupNames(systemGroupIDs) managedGroupNames := getSystemGroupNames(systemGroupIDs)
addr = fmt.Sprintf("%s:%d", networkInfo.Hostname, endpoint.Port) addr = fmt.Sprintf("%s:%d", networkInfo.Hostname, endpoint.Port)
@ -274,7 +272,6 @@ func (d *Discovery) getTargetsForSystems(
token string, token string,
entitlement string, entitlement string,
) ([]model.LabelSet, error) { ) ([]model.LabelSet, error) {
result := make([]model.LabelSet, 0) result := make([]model.LabelSet, 0)
systemGroupIDsBySystemID, err := getSystemGroupsInfoOfMonitoredClients(rpcClient, token, entitlement) systemGroupIDsBySystemID, err := getSystemGroupsInfoOfMonitoredClients(rpcClient, token, entitlement)

View file

@ -26,22 +26,19 @@ import (
"google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/anypb"
) )
var ( var httpResourceConf = &HTTPResourceClientConfig{
httpResourceConf = &HTTPResourceClientConfig{ HTTPClientConfig: config.HTTPClientConfig{
HTTPClientConfig: config.HTTPClientConfig{ TLSConfig: config.TLSConfig{InsecureSkipVerify: true},
TLSConfig: config.TLSConfig{InsecureSkipVerify: true}, },
}, ResourceType: "monitoring",
ResourceType: "monitoring", // Some known type.
// Some known type. ResourceTypeURL: "type.googleapis.com/envoy.service.discovery.v3.DiscoveryRequest",
ResourceTypeURL: "type.googleapis.com/envoy.service.discovery.v3.DiscoveryRequest", Server: "http://localhost",
Server: "http://localhost", ClientID: "test-id",
ClientID: "test-id", }
}
)
func urlMustParse(str string) *url.URL { func urlMustParse(str string) *url.URL {
parsed, err := url.Parse(str) parsed, err := url.Parse(str)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -92,7 +89,6 @@ func TestCreateNewHTTPResourceClient(t *testing.T) {
require.Equal(t, client.endpoint, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1") require.Equal(t, client.endpoint, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1")
require.Equal(t, client.client.Timeout, 1*time.Minute) require.Equal(t, client.client.Timeout, 1*time.Minute)
} }
func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig, protocolVersion ProtocolVersion, responder discoveryResponder) (*HTTPResourceClient, func()) { func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig, protocolVersion ProtocolVersion, responder discoveryResponder) (*HTTPResourceClient, func()) {

View file

@ -91,7 +91,6 @@ func getKumaMadsV1DiscoveryResponse(resources ...*MonitoringAssignment) (*v3.Dis
serialized := make([]*anypb.Any, len(resources)) serialized := make([]*anypb.Any, len(resources))
for i, res := range resources { for i, res := range resources {
data, err := proto.Marshal(res) data, err := proto.Marshal(res)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -50,7 +50,7 @@ var (
tagsLabel = model.MetaLabelPrefix + "consul_tags" tagsLabel = model.MetaLabelPrefix + "consul_tags"
// serviceAddressLabel is the name of the label containing the (optional) service address. // serviceAddressLabel is the name of the label containing the (optional) service address.
serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address" serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address"
//servicePortLabel is the name of the label containing the service port. // servicePortLabel is the name of the label containing the service port.
servicePortLabel = model.MetaLabelPrefix + "consul_service_port" servicePortLabel = model.MetaLabelPrefix + "consul_service_port"
// serviceIDLabel is the name of the label containing the service ID. // serviceIDLabel is the name of the label containing the service ID.
serviceIDLabel = model.MetaLabelPrefix + "consul_service_id" serviceIDLabel = model.MetaLabelPrefix + "consul_service_id"
@ -120,7 +120,7 @@ func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*target
for _, node := range nodes { for _, node := range nodes {
// We surround the separated list with the separator as well. This way regular expressions // We surround the separated list with the separator as well. This way regular expressions
// in relabeling rules don't have to consider tag positions. // in relabeling rules don't have to consider tag positions.
var tags = "," + strings.Join(node.ServiceTags, ",") + "," tags := "," + strings.Join(node.ServiceTags, ",") + ","
// If the service address is not empty it should be used instead of the node address // If the service address is not empty it should be used instead of the node address
// since the service may be registered remotely through a different node. // since the service may be registered remotely through a different node.
@ -162,7 +162,6 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
for c := time.Tick(time.Duration(d.refreshInterval) * time.Second); ; { for c := time.Tick(time.Duration(d.refreshInterval) * time.Second); ; {
var srvs map[string][]string var srvs map[string][]string
resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/services", d.address)) resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/services", d.address))
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "Error getting services list", "err", err) level.Error(d.logger).Log("msg", "Error getting services list", "err", err)
time.Sleep(time.Duration(d.refreshInterval) * time.Second) time.Sleep(time.Duration(d.refreshInterval) * time.Second)

View file

@ -163,7 +163,7 @@ func (a *Adapter) Run() {
} }
// NewAdapter creates a new instance of Adapter. // NewAdapter creates a new instance of Adapter.
func NewAdapter(ctx context.Context, file string, name string, d discovery.Discoverer, logger log.Logger) *Adapter { func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger log.Logger) *Adapter {
return &Adapter{ return &Adapter{
ctx: ctx, ctx: ctx,
disc: d, disc: d,

View file

@ -37,7 +37,7 @@ type Client struct {
} }
// NewClient creates a new Client. // NewClient creates a new Client.
func NewClient(logger log.Logger, address string, transport string, timeout time.Duration, prefix string) *Client { func NewClient(logger log.Logger, address, transport string, timeout time.Duration, prefix string) *Client {
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
} }

View file

@ -20,13 +20,11 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
var ( var metric = model.Metric{
metric = model.Metric{ model.MetricNameLabel: "test:metric",
model.MetricNameLabel: "test:metric", "testlabel": "test:value",
"testlabel": "test:value", "many_chars": "abc!ABC:012-3!45ö67~89./(){},=.\"\\",
"many_chars": "abc!ABC:012-3!45ö67~89./(){},=.\"\\", }
}
)
func TestEscape(t *testing.T) { func TestEscape(t *testing.T) {
// Can we correctly keep and escape valid chars. // Can we correctly keep and escape valid chars.

View file

@ -41,7 +41,7 @@ type Client struct {
} }
// NewClient creates a new Client. // NewClient creates a new Client.
func NewClient(logger log.Logger, conf influx.HTTPConfig, db string, rp string) *Client { func NewClient(logger log.Logger, conf influx.HTTPConfig, db, rp string) *Client {
c, err := influx.NewHTTPClient(conf) c, err := influx.NewHTTPClient(conf)
// Currently influx.NewClient() *should* never return an error. // Currently influx.NewClient() *should* never return an error.
if err != nil { if err != nil {

View file

@ -21,13 +21,11 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
var ( var metric = model.Metric{
metric = model.Metric{ model.MetricNameLabel: "test:metric",
model.MetricNameLabel: "test:metric", "testlabel": "test:value",
"testlabel": "test:value", "many_chars": "abc!ABC:012-3!45ö67~89./",
"many_chars": "abc!ABC:012-3!45ö67~89./", }
}
)
func TestTagsFromMetric(t *testing.T) { func TestTagsFromMetric(t *testing.T) {
expected := map[string]TagValue{ expected := map[string]TagValue{

View file

@ -303,7 +303,6 @@ func (n *Manager) nextBatch() []*Alert {
// Run dispatches notifications continuously. // Run dispatches notifications continuously.
func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) {
for { for {
select { select {
case <-n.ctx.Done(): case <-n.ctx.Done():

View file

@ -40,7 +40,7 @@ import (
) )
func TestPostPath(t *testing.T) { func TestPostPath(t *testing.T) {
var cases = []struct { cases := []struct {
in, out string in, out string
}{ }{
{ {
@ -456,7 +456,7 @@ func TestLabelSetNotReused(t *testing.T) {
} }
func TestReload(t *testing.T) { func TestReload(t *testing.T) {
var tests = []struct { tests := []struct {
in *targetgroup.Group in *targetgroup.Group
out string out string
}{ }{
@ -500,11 +500,10 @@ alerting:
require.Equal(t, tt.out, res) require.Equal(t, tt.out, res)
} }
} }
func TestDroppedAlertmanagers(t *testing.T) { func TestDroppedAlertmanagers(t *testing.T) {
var tests = []struct { tests := []struct {
in *targetgroup.Group in *targetgroup.Group
out string out string
}{ }{

View file

@ -704,7 +704,7 @@ func BenchmarkLabels_Hash(b *testing.B) {
lbls: func() Labels { lbls: func() Labels {
lbls := make(Labels, 10) lbls := make(Labels, 10)
for i := 0; i < len(lbls); i++ { for i := 0; i < len(lbls); i++ {
//Label ~50B name, 50B value. // Label ~50B name, 50B value.
lbls[i] = Label{Name: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), Value: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)} lbls[i] = Label{Name: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), Value: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)}
} }
return lbls return lbls

View file

@ -21,11 +21,9 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
var ( var timestampFormat = log.TimestampFormat(
timestampFormat = log.TimestampFormat( func() time.Time { return time.Now().UTC() },
func() time.Time { return time.Now().UTC() }, "2006-01-02T15:04:05.000Z07:00",
"2006-01-02T15:04:05.000Z07:00",
)
) )
// JSONFileLogger represents a logger that writes JSON to a file. // JSONFileLogger represents a logger that writes JSON to a file.
@ -40,7 +38,7 @@ func NewJSONFileLogger(s string) (*JSONFileLogger, error) {
return nil, nil return nil, nil
} }
f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666) f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "can't create json logger") return nil, errors.Wrap(err, "can't create json logger")
} }

View file

@ -23,7 +23,6 @@ import (
// Statfs returns the file system type (Unix only) // Statfs returns the file system type (Unix only)
func Statfs(path string) string { func Statfs(path string) string {
// Types of file systems that may be returned by `statfs` // Types of file systems that may be returned by `statfs`
fsTypes := map[int64]string{ fsTypes := map[int64]string{
0xadf5: "ADFS_SUPER_MAGIC", 0xadf5: "ADFS_SUPER_MAGIC",

View file

@ -23,7 +23,6 @@ import (
// Statfs returns the file system type (Unix only) // Statfs returns the file system type (Unix only)
func Statfs(path string) string { func Statfs(path string) string {
// Types of file systems that may be returned by `statfs` // Types of file systems that may be returned by `statfs`
fsTypes := map[int32]string{ fsTypes := map[int32]string{
0xadf5: "ADFS_SUPER_MAGIC", 0xadf5: "ADFS_SUPER_MAGIC",

View file

@ -23,7 +23,6 @@ import (
// Statfs returns the file system type (Unix only) // Statfs returns the file system type (Unix only)
func Statfs(path string) string { func Statfs(path string) string {
// Types of file systems that may be returned by `statfs` // Types of file systems that may be returned by `statfs`
fsTypes := map[uint32]string{ fsTypes := map[uint32]string{
0xadf5: "ADFS_SUPER_MAGIC", 0xadf5: "ADFS_SUPER_MAGIC",

View file

@ -457,9 +457,7 @@ func BenchmarkParse(b *testing.B) {
total := 0 total := 0
for i := 0; i < b.N; i += promtestdataSampleCount { for i := 0; i < b.N; i += promtestdataSampleCount {
var ( decSamples := make(model.Vector, 0, 50)
decSamples = make(model.Vector, 0, 50)
)
sdec := expfmt.SampleDecoder{ sdec := expfmt.SampleDecoder{
Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.FmtText), Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.FmtText),
Opts: &expfmt.DecodeOptions{ Opts: &expfmt.DecodeOptions{
@ -480,6 +478,7 @@ func BenchmarkParse(b *testing.B) {
} }
} }
} }
func BenchmarkGzip(b *testing.B) { func BenchmarkGzip(b *testing.B) {
for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} {
b.Run(fn, func(b *testing.B) { b.Run(fn, func(b *testing.B) {

View file

@ -87,12 +87,15 @@ type (
func (e ErrQueryTimeout) Error() string { func (e ErrQueryTimeout) Error() string {
return fmt.Sprintf("query timed out in %s", string(e)) return fmt.Sprintf("query timed out in %s", string(e))
} }
func (e ErrQueryCanceled) Error() string { func (e ErrQueryCanceled) Error() string {
return fmt.Sprintf("query was canceled in %s", string(e)) return fmt.Sprintf("query was canceled in %s", string(e))
} }
func (e ErrTooManySamples) Error() string { func (e ErrTooManySamples) Error() string {
return fmt.Sprintf("query processing would load too many samples into memory in %s", string(e)) return fmt.Sprintf("query processing would load too many samples into memory in %s", string(e))
} }
func (e ErrStorage) Error() string { func (e ErrStorage) Error() string {
return e.Err.Error() return e.Err.Error()
} }
@ -402,8 +405,10 @@ func (ng *Engine) newQuery(q storage.Queryable, expr parser.Expr, start, end tim
return qry, nil return qry, nil
} }
var ErrValidationAtModifierDisabled = errors.New("@ modifier is disabled") var (
var ErrValidationNegativeOffsetDisabled = errors.New("negative offset is disabled") ErrValidationAtModifierDisabled = errors.New("@ modifier is disabled")
ErrValidationNegativeOffsetDisabled = errors.New("negative offset is disabled")
)
func (ng *Engine) validateOpts(expr parser.Expr) error { func (ng *Engine) validateOpts(expr parser.Expr) error {
if ng.enableAtModifier && ng.enableNegativeOffset { if ng.enableAtModifier && ng.enableNegativeOffset {
@ -2139,7 +2144,6 @@ type groupedAggregation struct {
// aggregation evaluates an aggregation operation on a Vector. The provided grouping labels // aggregation evaluates an aggregation operation on a Vector. The provided grouping labels
// must be sorted. // must be sorted.
func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without bool, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) Vector { func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without bool, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
result := map[uint64]*groupedAggregation{} result := map[uint64]*groupedAggregation{}
orderedResult := []*groupedAggregation{} orderedResult := []*groupedAggregation{}
var k int64 var k int64
@ -2509,7 +2513,6 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool {
} }
if isStepInvariant { if isStepInvariant {
// The function and all arguments are step invariant. // The function and all arguments are step invariant.
return true return true
} }
@ -2559,7 +2562,6 @@ func newStepInvariantExpr(expr parser.Expr) parser.Expr {
// Wrapping the inside of () makes it easy to unwrap the paren later. // Wrapping the inside of () makes it easy to unwrap the paren later.
// But this effectively unwraps the paren. // But this effectively unwraps the paren.
return newStepInvariantExpr(e.Expr) return newStepInvariantExpr(e.Expr)
} }
return &parser.StepInvariantExpr{Expr: expr} return &parser.StepInvariantExpr{Expr: expr}
} }

File diff suppressed because it is too large Load diff

View file

@ -56,7 +56,7 @@ func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper)
// It calculates the rate (allowing for counter resets if isCounter is true), // It calculates the rate (allowing for counter resets if isCounter is true),
// extrapolates if the first/last sample is close to the boundary, and returns // extrapolates if the first/last sample is close to the boundary, and returns
// the result as either per-second (if isRate is true) or overall. // the result as either per-second (if isRate is true) or overall.
func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter bool, isRate bool) Vector { func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter, isRate bool) Vector {
ms := args[0].(*parser.MatrixSelector) ms := args[0].(*parser.MatrixSelector)
vs := ms.VectorSelector.(*parser.VectorSelector) vs := ms.VectorSelector.(*parser.VectorSelector)
var ( var (

View file

@ -407,7 +407,7 @@ type PositionRange struct {
// mergeRanges is a helper function to merge the PositionRanges of two Nodes. // mergeRanges is a helper function to merge the PositionRanges of two Nodes.
// Note that the arguments must be in the same order as they // Note that the arguments must be in the same order as they
// occur in the input string. // occur in the input string.
func mergeRanges(first Node, last Node) PositionRange { func mergeRanges(first, last Node) PositionRange {
return PositionRange{ return PositionRange{
Start: first.PositionRange().Start, Start: first.PositionRange().Start,
End: last.PositionRange().End, End: last.PositionRange().End,
@ -426,15 +426,19 @@ func (i *Item) PositionRange() PositionRange {
func (e *AggregateExpr) PositionRange() PositionRange { func (e *AggregateExpr) PositionRange() PositionRange {
return e.PosRange return e.PosRange
} }
func (e *BinaryExpr) PositionRange() PositionRange { func (e *BinaryExpr) PositionRange() PositionRange {
return mergeRanges(e.LHS, e.RHS) return mergeRanges(e.LHS, e.RHS)
} }
func (e *Call) PositionRange() PositionRange { func (e *Call) PositionRange() PositionRange {
return e.PosRange return e.PosRange
} }
func (e *EvalStmt) PositionRange() PositionRange { func (e *EvalStmt) PositionRange() PositionRange {
return e.Expr.PositionRange() return e.Expr.PositionRange()
} }
func (e Expressions) PositionRange() PositionRange { func (e Expressions) PositionRange() PositionRange {
if len(e) == 0 { if len(e) == 0 {
// Position undefined. // Position undefined.
@ -445,33 +449,40 @@ func (e Expressions) PositionRange() PositionRange {
} }
return mergeRanges(e[0], e[len(e)-1]) return mergeRanges(e[0], e[len(e)-1])
} }
func (e *MatrixSelector) PositionRange() PositionRange { func (e *MatrixSelector) PositionRange() PositionRange {
return PositionRange{ return PositionRange{
Start: e.VectorSelector.PositionRange().Start, Start: e.VectorSelector.PositionRange().Start,
End: e.EndPos, End: e.EndPos,
} }
} }
func (e *SubqueryExpr) PositionRange() PositionRange { func (e *SubqueryExpr) PositionRange() PositionRange {
return PositionRange{ return PositionRange{
Start: e.Expr.PositionRange().Start, Start: e.Expr.PositionRange().Start,
End: e.EndPos, End: e.EndPos,
} }
} }
func (e *NumberLiteral) PositionRange() PositionRange { func (e *NumberLiteral) PositionRange() PositionRange {
return e.PosRange return e.PosRange
} }
func (e *ParenExpr) PositionRange() PositionRange { func (e *ParenExpr) PositionRange() PositionRange {
return e.PosRange return e.PosRange
} }
func (e *StringLiteral) PositionRange() PositionRange { func (e *StringLiteral) PositionRange() PositionRange {
return e.PosRange return e.PosRange
} }
func (e *UnaryExpr) PositionRange() PositionRange { func (e *UnaryExpr) PositionRange() PositionRange {
return PositionRange{ return PositionRange{
Start: e.StartPos, Start: e.StartPos,
End: e.Expr.PositionRange().End, End: e.Expr.PositionRange().End,
} }
} }
func (e *VectorSelector) PositionRange() PositionRange { func (e *VectorSelector) PositionRange() PositionRange {
return e.PosRange return e.PosRange
} }

View file

@ -33,82 +33,84 @@ type yySymType struct {
duration time.Duration duration time.Duration
} }
const EQL = 57346 const (
const BLANK = 57347 EQL = 57346
const COLON = 57348 BLANK = 57347
const COMMA = 57349 COLON = 57348
const COMMENT = 57350 COMMA = 57349
const DURATION = 57351 COMMENT = 57350
const EOF = 57352 DURATION = 57351
const ERROR = 57353 EOF = 57352
const IDENTIFIER = 57354 ERROR = 57353
const LEFT_BRACE = 57355 IDENTIFIER = 57354
const LEFT_BRACKET = 57356 LEFT_BRACE = 57355
const LEFT_PAREN = 57357 LEFT_BRACKET = 57356
const METRIC_IDENTIFIER = 57358 LEFT_PAREN = 57357
const NUMBER = 57359 METRIC_IDENTIFIER = 57358
const RIGHT_BRACE = 57360 NUMBER = 57359
const RIGHT_BRACKET = 57361 RIGHT_BRACE = 57360
const RIGHT_PAREN = 57362 RIGHT_BRACKET = 57361
const SEMICOLON = 57363 RIGHT_PAREN = 57362
const SPACE = 57364 SEMICOLON = 57363
const STRING = 57365 SPACE = 57364
const TIMES = 57366 STRING = 57365
const operatorsStart = 57367 TIMES = 57366
const ADD = 57368 operatorsStart = 57367
const DIV = 57369 ADD = 57368
const EQLC = 57370 DIV = 57369
const EQL_REGEX = 57371 EQLC = 57370
const GTE = 57372 EQL_REGEX = 57371
const GTR = 57373 GTE = 57372
const LAND = 57374 GTR = 57373
const LOR = 57375 LAND = 57374
const LSS = 57376 LOR = 57375
const LTE = 57377 LSS = 57376
const LUNLESS = 57378 LTE = 57377
const MOD = 57379 LUNLESS = 57378
const MUL = 57380 MOD = 57379
const NEQ = 57381 MUL = 57380
const NEQ_REGEX = 57382 NEQ = 57381
const POW = 57383 NEQ_REGEX = 57382
const SUB = 57384 POW = 57383
const AT = 57385 SUB = 57384
const ATAN2 = 57386 AT = 57385
const operatorsEnd = 57387 ATAN2 = 57386
const aggregatorsStart = 57388 operatorsEnd = 57387
const AVG = 57389 aggregatorsStart = 57388
const BOTTOMK = 57390 AVG = 57389
const COUNT = 57391 BOTTOMK = 57390
const COUNT_VALUES = 57392 COUNT = 57391
const GROUP = 57393 COUNT_VALUES = 57392
const MAX = 57394 GROUP = 57393
const MIN = 57395 MAX = 57394
const QUANTILE = 57396 MIN = 57395
const STDDEV = 57397 QUANTILE = 57396
const STDVAR = 57398 STDDEV = 57397
const SUM = 57399 STDVAR = 57398
const TOPK = 57400 SUM = 57399
const aggregatorsEnd = 57401 TOPK = 57400
const keywordsStart = 57402 aggregatorsEnd = 57401
const BOOL = 57403 keywordsStart = 57402
const BY = 57404 BOOL = 57403
const GROUP_LEFT = 57405 BY = 57404
const GROUP_RIGHT = 57406 GROUP_LEFT = 57405
const IGNORING = 57407 GROUP_RIGHT = 57406
const OFFSET = 57408 IGNORING = 57407
const ON = 57409 OFFSET = 57408
const WITHOUT = 57410 ON = 57409
const keywordsEnd = 57411 WITHOUT = 57410
const preprocessorStart = 57412 keywordsEnd = 57411
const START = 57413 preprocessorStart = 57412
const END = 57414 START = 57413
const preprocessorEnd = 57415 END = 57414
const startSymbolsStart = 57416 preprocessorEnd = 57415
const START_METRIC = 57417 startSymbolsStart = 57416
const START_SERIES_DESCRIPTION = 57418 START_METRIC = 57417
const START_EXPRESSION = 57419 START_SERIES_DESCRIPTION = 57418
const START_METRIC_SELECTOR = 57420 START_EXPRESSION = 57419
const startSymbolsEnd = 57421 START_METRIC_SELECTOR = 57420
startSymbolsEnd = 57421
)
var yyToknames = [...]string{ var yyToknames = [...]string{
"$end", "$end",
@ -194,9 +196,11 @@ var yyToknames = [...]string{
var yyStatenames = [...]string{} var yyStatenames = [...]string{}
const yyEofCode = 1 const (
const yyErrCode = 2 yyEofCode = 1
const yyInitialStackSize = 16 yyErrCode = 2
yyInitialStackSize = 16
)
//line promql/parser/generated_parser.y:749 //line promql/parser/generated_parser.y:749

View file

@ -318,25 +318,32 @@ var tests = []struct {
{ {
input: "offset", input: "offset",
expected: []Item{{OFFSET, 0, "offset"}}, expected: []Item{{OFFSET, 0, "offset"}},
}, { },
{
input: "by", input: "by",
expected: []Item{{BY, 0, "by"}}, expected: []Item{{BY, 0, "by"}},
}, { },
{
input: "without", input: "without",
expected: []Item{{WITHOUT, 0, "without"}}, expected: []Item{{WITHOUT, 0, "without"}},
}, { },
{
input: "on", input: "on",
expected: []Item{{ON, 0, "on"}}, expected: []Item{{ON, 0, "on"}},
}, { },
{
input: "ignoring", input: "ignoring",
expected: []Item{{IGNORING, 0, "ignoring"}}, expected: []Item{{IGNORING, 0, "ignoring"}},
}, { },
{
input: "group_left", input: "group_left",
expected: []Item{{GROUP_LEFT, 0, "group_left"}}, expected: []Item{{GROUP_LEFT, 0, "group_left"}},
}, { },
{
input: "group_right", input: "group_right",
expected: []Item{{GROUP_RIGHT, 0, "group_right"}}, expected: []Item{{GROUP_RIGHT, 0, "group_right"}},
}, { },
{
input: "bool", input: "bool",
expected: []Item{{BOOL, 0, "bool"}}, expected: []Item{{BOOL, 0, "bool"}},
}, },
@ -569,7 +576,8 @@ var tests = []struct {
{DURATION, 24, `4s`}, {DURATION, 24, `4s`},
{RIGHT_BRACKET, 26, `]`}, {RIGHT_BRACKET, 26, `]`},
}, },
}, { },
{
input: `test:name{on!~"b:ar"}[4m:4s]`, input: `test:name{on!~"b:ar"}[4m:4s]`,
expected: []Item{ expected: []Item{
{METRIC_IDENTIFIER, 0, `test:name`}, {METRIC_IDENTIFIER, 0, `test:name`},
@ -584,7 +592,8 @@ var tests = []struct {
{DURATION, 25, `4s`}, {DURATION, 25, `4s`},
{RIGHT_BRACKET, 27, `]`}, {RIGHT_BRACKET, 27, `]`},
}, },
}, { },
{
input: `test:name{on!~"b:ar"}[4m:]`, input: `test:name{on!~"b:ar"}[4m:]`,
expected: []Item{ expected: []Item{
{METRIC_IDENTIFIER, 0, `test:name`}, {METRIC_IDENTIFIER, 0, `test:name`},
@ -598,7 +607,8 @@ var tests = []struct {
{COLON, 24, `:`}, {COLON, 24, `:`},
{RIGHT_BRACKET, 25, `]`}, {RIGHT_BRACKET, 25, `]`},
}, },
}, { // Nested Subquery. },
{ // Nested Subquery.
input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:])[4m:3s]`, input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:])[4m:3s]`,
expected: []Item{ expected: []Item{
@ -646,7 +656,8 @@ var tests = []struct {
{OFFSET, 29, "offset"}, {OFFSET, 29, "offset"},
{DURATION, 36, "10m"}, {DURATION, 36, "10m"},
}, },
}, { },
{
input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:] offset 6m)[4m:3s]`, input: `min_over_time(rate(foo{bar="baz"}[2s])[5m:] offset 6m)[4m:3s]`,
expected: []Item{ expected: []Item{
@ -737,7 +748,6 @@ func TestLexer(t *testing.T) {
if item.Typ == ERROR { if item.Typ == ERROR {
hasError = true hasError = true
} }
} }
if !hasError { if !hasError {
t.Logf("%d: input %q", i, test.input) t.Logf("%d: input %q", i, test.input)

View file

@ -241,7 +241,7 @@ func (p *parser) addParseErr(positionRange PositionRange, err error) {
// unexpected creates a parser error complaining about an unexpected lexer item. // unexpected creates a parser error complaining about an unexpected lexer item.
// The item that is presented as unexpected is always the last item produced // The item that is presented as unexpected is always the last item produced
// by the lexer. // by the lexer.
func (p *parser) unexpected(context string, expected string) { func (p *parser) unexpected(context, expected string) {
var errMsg strings.Builder var errMsg strings.Builder
// Do not report lexer errors twice // Do not report lexer errors twice
@ -354,7 +354,8 @@ func (p *parser) InjectItem(typ ItemType) {
p.inject = typ p.inject = typ
p.injecting = true p.injecting = true
} }
func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers Node, rhs Node) *BinaryExpr {
func (p *parser) newBinaryExpression(lhs Node, op Item, modifiers, rhs Node) *BinaryExpr {
ret := modifiers.(*BinaryExpr) ret := modifiers.(*BinaryExpr)
ret.LHS = lhs.(Expr) ret.LHS = lhs.(Expr)
@ -374,7 +375,7 @@ func (p *parser) assembleVectorSelector(vs *VectorSelector) {
} }
} }
func (p *parser) newAggregateExpr(op Item, modifier Node, args Node) (ret *AggregateExpr) { func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateExpr) {
ret = modifier.(*AggregateExpr) ret = modifier.(*AggregateExpr)
arguments := args.(Expressions) arguments := args.(Expressions)
@ -650,10 +651,9 @@ func (p *parser) parseGenerated(startSymbol ItemType) interface{} {
p.yyParser.Parse(p) p.yyParser.Parse(p)
return p.generatedParserResult return p.generatedParserResult
} }
func (p *parser) newLabelMatcher(label Item, operator Item, value Item) *labels.Matcher { func (p *parser) newLabelMatcher(label, operator, value Item) *labels.Matcher {
op := operator.Typ op := operator.Typ
val := p.unquoteString(value.Val) val := p.unquoteString(value.Val)

File diff suppressed because it is too large Load diff

View file

@ -81,8 +81,7 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) {
} }
func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, error) { func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, error) {
file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666)
file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0666)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Error opening query log file", "file", filename, "err", err) level.Error(logger).Log("msg", "Error opening query log file", "file", filename, "err", err)
return nil, err return nil, err
@ -104,7 +103,7 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, er
} }
func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker { func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker {
err := os.MkdirAll(localStoragePath, 0777) err := os.MkdirAll(localStoragePath, 0o777)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Failed to create directory for logging active queries") level.Error(logger).Log("msg", "Failed to create directory for logging active queries")
} }
@ -147,7 +146,6 @@ func trimStringByBytes(str string, size int) string {
func _newJSONEntry(query string, timestamp int64, logger log.Logger) []byte { func _newJSONEntry(query string, timestamp int64, logger log.Logger) []byte {
entry := Entry{query, timestamp} entry := Entry{query, timestamp}
jsonEntry, err := json.Marshal(entry) jsonEntry, err := json.Marshal(entry)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Cannot create json of query", "query", query) level.Error(logger).Log("msg", "Cannot create json of query", "query", query)
return []byte{} return []byte{}

View file

@ -311,7 +311,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
resultFPs := map[uint64]struct{}{} resultFPs := map[uint64]struct{}{}
var vec promql.Vector var vec promql.Vector
var alerts = make(map[uint64]*Alert, len(res)) alerts := make(map[uint64]*Alert, len(res))
for _, smpl := range res { for _, smpl := range res {
// Provide the alert information to the template. // Provide the alert information to the template.
l := make(map[string]string, len(smpl.Metric)) l := make(map[string]string, len(smpl.Metric))
@ -479,7 +479,7 @@ func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) {
} }
} }
func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc) { func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay, interval time.Duration, notifyFunc NotifyFunc) {
alerts := []*Alert{} alerts := []*Alert{}
r.ForEachActiveAlert(func(alert *Alert) { r.ForEachActiveAlert(func(alert *Alert) {
if alert.needsSending(ts, resendDelay) { if alert.needsSending(ts, resendDelay) {

View file

@ -834,12 +834,10 @@ func (g *Group) RestoreForState(ts time.Time) {
level.Debug(g.logger).Log("msg", "'for' state restored", level.Debug(g.logger).Log("msg", "'for' state restored",
labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850), labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850),
"labels", a.Labels.String()) "labels", a.Labels.String())
}) })
alertRule.SetRestored(true) alertRule.SetRestored(true)
} }
} }
// Equals return if two groups are the same. // Equals return if two groups are the same.

View file

@ -119,17 +119,19 @@ func TestAlertingRule(t *testing.T) {
baseTime := time.Unix(0, 0) baseTime := time.Unix(0, 0)
var tests = []struct { tests := []struct {
time time.Duration time time.Duration
result promql.Vector result promql.Vector
}{ }{
{ {
time: 0, time: 0,
result: result[:2], result: result[:2],
}, { },
{
time: 5 * time.Minute, time: 5 * time.Minute,
result: result[2:], result: result[2:],
}, { },
{
time: 10 * time.Minute, time: 10 * time.Minute,
result: result[2:3], result: result[2:3],
}, },
@ -256,7 +258,7 @@ func TestForStateAddSamples(t *testing.T) {
baseTime := time.Unix(0, 0) baseTime := time.Unix(0, 0)
var tests = []struct { tests := []struct {
time time.Duration time time.Duration
result promql.Vector result promql.Vector
persistThisTime bool // If true, it means this 'time' is persisted for 'for'. persistThisTime bool // If true, it means this 'time' is persisted for 'for'.
@ -769,7 +771,6 @@ func TestUpdate(t *testing.T) {
} else { } else {
rgs.Groups[i].Interval = model.Duration(10) rgs.Groups[i].Interval = model.Duration(10)
} }
} }
reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs) reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs)

View file

@ -109,7 +109,6 @@ func TestDroppedTargetsList(t *testing.T) {
// TestDiscoveredLabelsUpdate checks that DiscoveredLabels are updated // TestDiscoveredLabelsUpdate checks that DiscoveredLabels are updated
// even when new labels don't affect the target `hash`. // even when new labels don't affect the target `hash`.
func TestDiscoveredLabelsUpdate(t *testing.T) { func TestDiscoveredLabelsUpdate(t *testing.T) {
sp := &scrapePool{} sp := &scrapePool{}
// These are used when syncing so need this to avoid a panic. // These are used when syncing so need this to avoid a panic.
sp.config = &config.ScrapeConfig{ sp.config = &config.ScrapeConfig{
@ -350,7 +349,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
client: http.DefaultClient, client: http.DefaultClient,
} }
var tgs = []*targetgroup.Group{} tgs := []*targetgroup.Group{}
for i := 0; i < 50; i++ { for i := 0; i < 50; i++ {
tgs = append(tgs, tgs = append(tgs,
&targetgroup.Group{ &targetgroup.Group{
@ -1000,6 +999,7 @@ func BenchmarkScrapeLoopAppend(b *testing.B) {
_, _, _, _ = sl.append(slApp, metrics, "", ts) _, _, _, _ = sl.append(slApp, metrics, "", ts)
} }
} }
func BenchmarkScrapeLoopAppendOM(b *testing.B) { func BenchmarkScrapeLoopAppendOM(b *testing.B) {
ctx, sl := simpleTestScrapeLoop(b) ctx, sl := simpleTestScrapeLoop(b)
@ -1409,8 +1409,10 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
"Two target labels collide with existing labels, both with and without prefix 'exported'": { "Two target labels collide with existing labels, both with and without prefix 'exported'": {
targetLabels: []string{"foo", "3", "exported_foo", "4"}, targetLabels: []string{"foo", "3", "exported_foo", "4"},
exposedLabels: `metric{foo="1" exported_foo="2"} 0`, exposedLabels: `metric{foo="1" exported_foo="2"} 0`,
expected: []string{"__name__", "metric", "exported_exported_foo", "1", "exported_exported_exported_foo", expected: []string{
"2", "exported_foo", "4", "foo", "3"}, "__name__", "metric", "exported_exported_foo", "1", "exported_exported_exported_foo",
"2", "exported_foo", "4", "foo", "3",
},
}, },
"Extreme example": { "Extreme example": {
targetLabels: []string{"foo", "0", "exported_exported_foo", "1", "exported_exported_exported_foo", "2"}, targetLabels: []string{"foo", "0", "exported_exported_foo", "1", "exported_exported_exported_foo", "2"},
@ -1743,7 +1745,8 @@ func TestScrapeLoopAppendExemplar(t *testing.T) {
exemplars: []exemplar.Exemplar{ exemplars: []exemplar.Exemplar{
{Labels: labels.FromStrings("a", "abc"), Value: 1}, {Labels: labels.FromStrings("a", "abc"), Value: 1},
}, },
}, { },
{
title: "Metric with exemplars and TS", title: "Metric with exemplars and TS",
scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0 10000\n# EOF", scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0 10000\n# EOF",
discoveryLabels: []string{"n", "2"}, discoveryLabels: []string{"n", "2"},
@ -1754,7 +1757,8 @@ func TestScrapeLoopAppendExemplar(t *testing.T) {
exemplars: []exemplar.Exemplar{ exemplars: []exemplar.Exemplar{
{Labels: labels.FromStrings("a", "abc"), Value: 1, Ts: 10000000, HasTs: true}, {Labels: labels.FromStrings("a", "abc"), Value: 1, Ts: 10000000, HasTs: true},
}, },
}, { },
{
title: "Two metrics and exemplars", title: "Two metrics and exemplars",
scrapeText: `metric_total{n="1"} 1 # {t="1"} 1.0 10000 scrapeText: `metric_total{n="1"} 1 # {t="1"} 1.0 10000
metric_total{n="2"} 2 # {t="2"} 2.0 20000 metric_total{n="2"} 2 # {t="2"} 2.0 20000
@ -2040,7 +2044,6 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
require.Equal(t, 1, total) require.Equal(t, 1, total)
require.Equal(t, 1, added) require.Equal(t, 1, added)
require.Equal(t, 0, seriesAdded) require.Equal(t, 0, seriesAdded)
} }
func TestTargetScraperScrapeOK(t *testing.T) { func TestTargetScraperScrapeOK(t *testing.T) {

View file

@ -275,7 +275,7 @@ func (r *sampleRing) nthLast(n int) (int64, float64, bool) {
func (r *sampleRing) samples() []sample { func (r *sampleRing) samples() []sample {
res := make([]sample, r.l) res := make([]sample, r.l)
var k = r.f + r.l k := r.f + r.l
var j int var j int
if k > len(r.buf) { if k > len(r.buf) {
k = len(r.buf) k = len(r.buf)

View file

@ -42,7 +42,7 @@ type mergeGenericQuerier struct {
// See NewFanout commentary to learn more about primary vs secondary differences. // See NewFanout commentary to learn more about primary vs secondary differences.
// //
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
func NewMergeQuerier(primaries []Querier, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier { func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier {
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
for _, q := range primaries { for _, q := range primaries {
if _, ok := q.(noopQuerier); !ok && q != nil { if _, ok := q.(noopQuerier); !ok && q != nil {
@ -71,7 +71,7 @@ func NewMergeQuerier(primaries []Querier, secondaries []Querier, mergeFn Vertica
// //
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
// TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670 // TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670
func NewMergeChunkQuerier(primaries []ChunkQuerier, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier { func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier {
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
for _, q := range primaries { for _, q := range primaries {
if _, ok := q.(noopChunkQuerier); !ok && q != nil { if _, ok := q.(noopChunkQuerier); !ok && q != nil {
@ -104,7 +104,7 @@ func (q *mergeGenericQuerier) Select(sortSeries bool, hints *SelectHints, matche
return q.queriers[0].Select(sortSeries, hints, matchers...) return q.queriers[0].Select(sortSeries, hints, matchers...)
} }
var seriesSets = make([]genericSeriesSet, 0, len(q.queriers)) seriesSets := make([]genericSeriesSet, 0, len(q.queriers))
if !q.concurrentSelect { if !q.concurrentSelect {
for _, querier := range q.queriers { for _, querier := range q.queriers {
// We need to sort for merge to work. // We need to sort for merge to work.
@ -265,7 +265,6 @@ func NewMergeSeriesSet(sets []SeriesSet, mergeFunc VerticalSeriesMergeFunc) Seri
genericSets := make([]genericSeriesSet, 0, len(sets)) genericSets := make([]genericSeriesSet, 0, len(sets))
for _, s := range sets { for _, s := range sets {
genericSets = append(genericSets, &genericSeriesSetAdapter{s}) genericSets = append(genericSets, &genericSeriesSetAdapter{s})
} }
return &seriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFunc}).Merge)} return &seriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFunc}).Merge)}
} }
@ -281,7 +280,6 @@ func NewMergeChunkSeriesSet(sets []ChunkSeriesSet, mergeFunc VerticalChunkSeries
genericSets := make([]genericSeriesSet, 0, len(sets)) genericSets := make([]genericSeriesSet, 0, len(sets))
for _, s := range sets { for _, s := range sets {
genericSets = append(genericSets, &genericChunkSeriesSetAdapter{s}) genericSets = append(genericSets, &genericChunkSeriesSetAdapter{s})
} }
return &chunkSeriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFunc}).Merge)} return &chunkSeriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFunc}).Merge)}
} }

View file

@ -616,7 +616,8 @@ func TestChainSampleIterator(t *testing.T) {
NewListSeriesIterator(samples{sample{2, 2}, sample{5, 5}}), NewListSeriesIterator(samples{sample{2, 2}, sample{5, 5}}),
}, },
expected: []tsdbutil.Sample{ expected: []tsdbutil.Sample{
sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{4, 4}, sample{5, 5}}, sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{4, 4}, sample{5, 5},
},
}, },
// Overlap. // Overlap.
{ {

View file

@ -728,7 +728,7 @@ func (t *QueueManager) releaseLabels(ls labels.Labels) {
// processExternalLabels merges externalLabels into ls. If ls contains // processExternalLabels merges externalLabels into ls. If ls contains
// a label in externalLabels, the value in ls wins. // a label in externalLabels, the value in ls wins.
func processExternalLabels(ls labels.Labels, externalLabels labels.Labels) labels.Labels { func processExternalLabels(ls, externalLabels labels.Labels) labels.Labels {
i, j, result := 0, 0, make(labels.Labels, 0, len(ls)+len(externalLabels)) i, j, result := 0, 0, make(labels.Labels, 0, len(ls)+len(externalLabels))
for i < len(ls) && j < len(externalLabels) { for i < len(ls) && j < len(externalLabels) {
if ls[i].Name < externalLabels[j].Name { if ls[i].Name < externalLabels[j].Name {
@ -1048,7 +1048,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface
max += int(float64(max) * 0.1) max += int(float64(max) * 0.1)
} }
var pendingData = make([]prompb.TimeSeries, max) pendingData := make([]prompb.TimeSeries, max)
for i := range pendingData { for i := range pendingData {
pendingData[i].Samples = []prompb.Sample{{}} pendingData[i].Samples = []prompb.Sample{{}}
if s.qm.sendExemplars { if s.qm.sendExemplars {
@ -1142,7 +1142,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface
} }
} }
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) { func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) {
begin := time.Now() begin := time.Now()
err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, pBuf, buf) err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, pBuf, buf)
if err != nil { if err != nil {
@ -1159,7 +1159,7 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s
} }
// sendSamples to the remote storage with backoff for recoverable errors. // sendSamples to the remote storage with backoff for recoverable errors.
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) error { func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) error {
// Build the WriteRequest with no metadata. // Build the WriteRequest with no metadata.
req, highest, err := buildWriteRequest(samples, nil, pBuf, *buf) req, highest, err := buildWriteRequest(samples, nil, pBuf, *buf)
if err != nil { if err != nil {

View file

@ -60,7 +60,6 @@ func newHighestTimestampMetric() *maxTimestamp {
} }
func TestSampleDelivery(t *testing.T) { func TestSampleDelivery(t *testing.T) {
testcases := []struct { testcases := []struct {
name string name string
samples bool samples bool
@ -107,7 +106,6 @@ func TestSampleDelivery(t *testing.T) {
for _, tc := range testcases { for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
var ( var (
series []record.RefSeries series []record.RefSeries
samples []record.RefSample samples []record.RefSample
@ -715,7 +713,7 @@ func BenchmarkSampleDelivery(b *testing.B) {
const numSeries = 10000 const numSeries = 10000
// Extra labels to make a more realistic workload - taken from Kubernetes' embedded cAdvisor metrics. // Extra labels to make a more realistic workload - taken from Kubernetes' embedded cAdvisor metrics.
var extraLabels = labels.Labels{ extraLabels := labels.Labels{
{Name: "kubernetes_io_arch", Value: "amd64"}, {Name: "kubernetes_io_arch", Value: "amd64"},
{Name: "kubernetes_io_instance_type", Value: "c3.somesize"}, {Name: "kubernetes_io_instance_type", Value: "c3.somesize"},
{Name: "kubernetes_io_os", Value: "linux"}, {Name: "kubernetes_io_os", Value: "linux"},

View file

@ -506,7 +506,6 @@ func TestSampleAndChunkQueryableClient(t *testing.T) {
} }
require.NoError(t, ss.Err()) require.NoError(t, ss.Err())
require.Equal(t, tc.expectedSeries, got) require.Equal(t, tc.expectedSeries, got)
}) })
} }
} }

View file

@ -81,9 +81,7 @@ func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar,
} }
func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) { func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) {
var ( outOfOrderExemplarErrs := 0
outOfOrderExemplarErrs = 0
)
app := h.appendable.Appender(ctx) app := h.appendable.Appender(ctx)
defer func() { defer func() {

View file

@ -87,7 +87,7 @@ func query(ctx context.Context, q string, ts time.Time, queryFn QueryFunc) (quer
// promql.Vector is hard to work with in templates, so convert to // promql.Vector is hard to work with in templates, so convert to
// base data types. // base data types.
// TODO(fabxc): probably not true anymore after type rework. // TODO(fabxc): probably not true anymore after type rework.
var result = make(queryResult, len(vector)) result := make(queryResult, len(vector))
for n, v := range vector { for n, v := range vector {
s := sample{ s := sample{
Value: v.V, Value: v.V,
@ -301,7 +301,7 @@ func NewTemplateExpander(
} }
// AlertTemplateData returns the interface to be used in expanding the template. // AlertTemplateData returns the interface to be used in expanding the template.
func AlertTemplateData(labels map[string]string, externalLabels map[string]string, externalURL string, value float64) interface{} { func AlertTemplateData(labels, externalLabels map[string]string, externalURL string, value float64) interface{} {
return struct { return struct {
Labels map[string]string Labels map[string]string
ExternalLabels map[string]string ExternalLabels map[string]string

View file

@ -87,7 +87,8 @@ func TestTemplateExpansion(t *testing.T) {
{ {
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
Point: promql.Point{T: 0, V: 11}, Point: promql.Point{T: 0, V: 11},
}}, },
},
output: "11", output: "11",
}, },
{ {
@ -98,7 +99,8 @@ func TestTemplateExpansion(t *testing.T) {
{ {
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
Point: promql.Point{T: 0, V: 11}, Point: promql.Point{T: 0, V: 11},
}}, },
},
output: "a", output: "a",
}, },
{ {
@ -108,7 +110,8 @@ func TestTemplateExpansion(t *testing.T) {
{ {
Metric: labels.FromStrings(labels.MetricName, "metric", "__value__", "a"), Metric: labels.FromStrings(labels.MetricName, "metric", "__value__", "a"),
Point: promql.Point{T: 0, V: 11}, Point: promql.Point{T: 0, V: 11},
}}, },
},
output: "a", output: "a",
}, },
{ {
@ -118,7 +121,8 @@ func TestTemplateExpansion(t *testing.T) {
{ {
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
Point: promql.Point{T: 0, V: 11}, Point: promql.Point{T: 0, V: 11},
}}, },
},
output: "", output: "",
}, },
{ {
@ -128,7 +132,8 @@ func TestTemplateExpansion(t *testing.T) {
{ {
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
Point: promql.Point{T: 0, V: 11}, Point: promql.Point{T: 0, V: 11},
}}, },
},
output: "", output: "",
}, },
{ {
@ -137,7 +142,8 @@ func TestTemplateExpansion(t *testing.T) {
{ {
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
Point: promql.Point{T: 0, V: 11}, Point: promql.Point{T: 0, V: 11},
}}, },
},
output: "", output: "",
html: true, html: true,
}, },
@ -151,7 +157,8 @@ func TestTemplateExpansion(t *testing.T) {
}, { }, {
Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"), Metric: labels.FromStrings(labels.MetricName, "metric", "instance", "a"),
Point: promql.Point{T: 0, V: 11}, Point: promql.Point{T: 0, V: 11},
}}, },
},
output: "a:11: b:21: ", output: "a:11: b:21: ",
}, },
{ {

View file

@ -36,9 +36,7 @@ import (
"go.uber.org/atomic" "go.uber.org/atomic"
) )
var ( var ErrUnsupported = errors.New("unsupported operation with WAL-only storage")
ErrUnsupported = errors.New("unsupported operation with WAL-only storage")
)
// Default values for options. // Default values for options.
var ( var (

View file

@ -54,7 +54,7 @@ func (m seriesHashmap) Set(hash uint64, s *memSeries) {
m[hash] = append(seriesSet, s) m[hash] = append(seriesSet, s)
} }
func (m seriesHashmap) Delete(hash uint64, ref uint64) { func (m seriesHashmap) Delete(hash, ref uint64) {
var rem []*memSeries var rem []*memSeries
for _, s := range m[hash] { for _, s := range m[hash] {
if s.ref != ref { if s.ref != ref {

View file

@ -191,9 +191,11 @@ type BlockMetaCompaction struct {
Failed bool `json:"failed,omitempty"` Failed bool `json:"failed,omitempty"`
} }
const indexFilename = "index" const (
const metaFilename = "meta.json" indexFilename = "index"
const metaVersion1 = 1 metaFilename = "meta.json"
metaVersion1 = 1
)
func chunkDir(dir string) string { return filepath.Join(dir, "chunks") } func chunkDir(dir string) string { return filepath.Join(dir, "chunks") }
@ -611,12 +613,12 @@ func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, bool, er
// Snapshot creates snapshot of the block into dir. // Snapshot creates snapshot of the block into dir.
func (pb *Block) Snapshot(dir string) error { func (pb *Block) Snapshot(dir string) error {
blockDir := filepath.Join(dir, pb.meta.ULID.String()) blockDir := filepath.Join(dir, pb.meta.ULID.String())
if err := os.MkdirAll(blockDir, 0777); err != nil { if err := os.MkdirAll(blockDir, 0o777); err != nil {
return errors.Wrap(err, "create snapshot block dir") return errors.Wrap(err, "create snapshot block dir")
} }
chunksDir := chunkDir(blockDir) chunksDir := chunkDir(blockDir)
if err := os.MkdirAll(chunksDir, 0777); err != nil { if err := os.MkdirAll(chunksDir, 0o777); err != nil {
return errors.Wrap(err, "create snapshot chunk dir") return errors.Wrap(err, "create snapshot chunk dir")
} }

View file

@ -185,7 +185,7 @@ func TestCorruptedChunk(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Greater(t, len(files), 0, "No chunk created.") require.Greater(t, len(files), 0, "No chunk created.")
f, err := os.OpenFile(files[0], os.O_RDWR, 0666) f, err := os.OpenFile(files[0], os.O_RDWR, 0o666)
require.NoError(t, err) require.NoError(t, err)
// Apply corruption function. // Apply corruption function.
@ -505,7 +505,7 @@ func createBlockFromHead(tb testing.TB, dir string, head *Head) string {
compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, nil) compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, nil)
require.NoError(tb, err) require.NoError(tb, err)
require.NoError(tb, os.MkdirAll(dir, 0777)) require.NoError(tb, os.MkdirAll(dir, 0o777))
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes. // Because of this block intervals are always +1 than the total samples it includes.

View file

@ -97,9 +97,7 @@ func (cm *Meta) OverlapsClosedInterval(mint, maxt int64) bool {
return cm.MinTime <= maxt && mint <= cm.MaxTime return cm.MinTime <= maxt && mint <= cm.MaxTime
} }
var ( var errInvalidSize = fmt.Errorf("invalid size")
errInvalidSize = fmt.Errorf("invalid size")
)
var castagnoliTable *crc32.Table var castagnoliTable *crc32.Table
@ -148,7 +146,7 @@ func newWriter(dir string, segmentSize int64) (*Writer, error) {
segmentSize = DefaultChunkSegmentSize segmentSize = DefaultChunkSegmentSize
} }
if err := os.MkdirAll(dir, 0777); err != nil { if err := os.MkdirAll(dir, 0o777); err != nil {
return nil, err return nil, err
} }
dirFile, err := fileutil.OpenDir(dir) dirFile, err := fileutil.OpenDir(dir)
@ -224,7 +222,7 @@ func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, all
return 0, nil, 0, errors.Wrap(err, "next sequence file") return 0, nil, 0, errors.Wrap(err, "next sequence file")
} }
ptmp := p + ".tmp" ptmp := p + ".tmp"
f, err := os.OpenFile(ptmp, os.O_WRONLY|os.O_CREATE, 0666) f, err := os.OpenFile(ptmp, os.O_WRONLY|os.O_CREATE, 0o666)
if err != nil { if err != nil {
return 0, nil, 0, errors.Wrap(err, "open temp file") return 0, nil, 0, errors.Wrap(err, "open temp file")
} }
@ -266,7 +264,7 @@ func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, all
return 0, nil, 0, errors.Wrap(err, "replace file") return 0, nil, 0, errors.Wrap(err, "replace file")
} }
f, err = os.OpenFile(p, os.O_WRONLY, 0666) f, err = os.OpenFile(p, os.O_WRONLY, 0o666)
if err != nil { if err != nil {
return 0, nil, 0, errors.Wrap(err, "open final file") return 0, nil, 0, errors.Wrap(err, "open final file")
} }
@ -355,7 +353,7 @@ func (w *Writer) writeChunks(chks []Meta) error {
return nil return nil
} }
var seq = uint64(w.seq()) << 32 seq := uint64(w.seq()) << 32
for i := range chks { for i := range chks {
chk := &chks[i] chk := &chks[i]

View file

@ -42,11 +42,9 @@ const (
headChunksFormatV1 = 1 headChunksFormatV1 = 1
) )
var ( // ErrChunkDiskMapperClosed returned by any method indicates
// ErrChunkDiskMapperClosed returned by any method indicates // that the ChunkDiskMapper was closed.
// that the ChunkDiskMapper was closed. var ErrChunkDiskMapperClosed = errors.New("ChunkDiskMapper closed")
ErrChunkDiskMapperClosed = errors.New("ChunkDiskMapper closed")
)
const ( const (
// MintMaxtSize is the size of the mint/maxt for head chunk file and chunks. // MintMaxtSize is the size of the mint/maxt for head chunk file and chunks.
@ -83,7 +81,6 @@ func (ref ChunkDiskMapperRef) Unpack() (sgmIndex, chkStart int) {
sgmIndex = int(ref >> 32) sgmIndex = int(ref >> 32)
chkStart = int((ref << 32) >> 32) chkStart = int((ref << 32) >> 32)
return sgmIndex, chkStart return sgmIndex, chkStart
} }
// CorruptionErr is an error that's returned when corruption is encountered. // CorruptionErr is an error that's returned when corruption is encountered.
@ -152,7 +149,7 @@ func NewChunkDiskMapper(dir string, pool chunkenc.Pool, writeBufferSize int) (*C
return nil, errors.Errorf("ChunkDiskMapper write buffer size should be a multiple of 1024 (actual: %d)", writeBufferSize) return nil, errors.Errorf("ChunkDiskMapper write buffer size should be a multiple of 1024 (actual: %d)", writeBufferSize)
} }
if err := os.MkdirAll(dir, 0777); err != nil { if err := os.MkdirAll(dir, 0o777); err != nil {
return nil, err return nil, err
} }
dirFile, err := fileutil.OpenDir(dir) dirFile, err := fileutil.OpenDir(dir)

View file

@ -16,8 +16,6 @@
package chunks package chunks
var ( // HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut.
// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut. // Windows needs pre-allocations while the other OS does not.
// Windows needs pre-allocations while the other OS does not. var HeadChunkFilePreallocationSize int64
HeadChunkFilePreallocationSize int64
)

View file

@ -379,7 +379,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
// Write an empty last file mimicking an abrupt shutdown on file creation. // Write an empty last file mimicking an abrupt shutdown on file creation.
emptyFileName := segmentFile(dir, lastFile+1) emptyFileName := segmentFile(dir, lastFile+1)
f, err := os.OpenFile(emptyFileName, os.O_WRONLY|os.O_CREATE, 0666) f, err := os.OpenFile(emptyFileName, os.O_WRONLY|os.O_CREATE, 0o666)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, f.Sync()) require.NoError(t, f.Sync())
stat, err := f.Stat() stat, err := f.Stat()
@ -409,7 +409,6 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.LessOrEqual(t, seq, uint64(lastFile), "file index on disk is bigger than previous last file") require.LessOrEqual(t, seq, uint64(lastFile), "file index on disk is bigger than previous last file")
} }
} }
func testChunkDiskMapper(t *testing.T) *ChunkDiskMapper { func testChunkDiskMapper(t *testing.T) *ChunkDiskMapper {

View file

@ -13,8 +13,6 @@
package chunks package chunks
var ( // HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut.
// HeadChunkFilePreallocationSize is the size to which the m-map file should be preallocated when a new file is cut. // Windows needs pre-allocation to m-map the file.
// Windows needs pre-allocation to m-map the file. var HeadChunkFilePreallocationSize int64 = MaxHeadChunkFileSize
HeadChunkFilePreallocationSize int64 = MaxHeadChunkFileSize
)

View file

@ -564,7 +564,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe
return err return err
} }
if err = os.MkdirAll(tmp, 0777); err != nil { if err = os.MkdirAll(tmp, 0o777); err != nil {
return err return err
} }

View file

@ -65,10 +65,8 @@ const (
lockfileCreatedCleanly = 1 lockfileCreatedCleanly = 1
) )
var ( // ErrNotReady is returned if the underlying storage is not ready yet.
// ErrNotReady is returned if the underlying storage is not ready yet. var ErrNotReady = errors.New("TSDB not ready")
ErrNotReady = errors.New("TSDB not ready")
)
// DefaultOptions used for the DB. They are sane for setups using // DefaultOptions used for the DB. They are sane for setups using
// millisecond precision timestamps. // millisecond precision timestamps.
@ -609,7 +607,7 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) {
} }
func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs []int64, stats *DBStats) (_ *DB, returnedErr error) { func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs []int64, stats *DBStats) (_ *DB, returnedErr error) {
if err := os.MkdirAll(dir, 0777); err != nil { if err := os.MkdirAll(dir, 0o777); err != nil {
return nil, err return nil, err
} }
if l == nil { if l == nil {
@ -1642,7 +1640,7 @@ func (db *DB) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, err
return db.head.exemplars.ExemplarQuerier(ctx) return db.head.exemplars.ExemplarQuerier(ctx)
} }
func rangeForTimestamp(t int64, width int64) (maxt int64) { func rangeForTimestamp(t, width int64) (maxt int64) {
return (t/width)*width + width return (t/width)*width + width
} }

View file

@ -228,7 +228,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) {
{ {
walFiles, err := ioutil.ReadDir(path.Join(db.Dir(), "wal")) walFiles, err := ioutil.ReadDir(path.Join(db.Dir(), "wal"))
require.NoError(t, err) require.NoError(t, err)
f, err := os.OpenFile(path.Join(db.Dir(), "wal", walFiles[0].Name()), os.O_RDWR, 0666) f, err := os.OpenFile(path.Join(db.Dir(), "wal", walFiles[0].Name()), os.O_RDWR, 0o666)
require.NoError(t, err) require.NoError(t, err)
r := wal.NewReader(bufio.NewReader(f)) r := wal.NewReader(bufio.NewReader(f))
require.True(t, r.Next(), "reading the series record") require.True(t, r.Next(), "reading the series record")
@ -1245,7 +1245,6 @@ func TestTombstoneCleanRetentionLimitsRace(t *testing.T) {
require.NoError(t, db.Close()) require.NoError(t, db.Close())
} }
} }
func intersection(oldBlocks, actualBlocks []string) (intersection []string) { func intersection(oldBlocks, actualBlocks []string) (intersection []string) {
@ -1272,6 +1271,7 @@ type mockCompactorFailing struct {
func (*mockCompactorFailing) Plan(dir string) ([]string, error) { func (*mockCompactorFailing) Plan(dir string) ([]string, error) {
return nil, nil return nil, nil
} }
func (c *mockCompactorFailing) Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error) { func (c *mockCompactorFailing) Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error) {
if len(c.blocks) >= c.max { if len(c.blocks) >= c.max {
return ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail") return ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail")
@ -1559,7 +1559,7 @@ func expandSeriesSet(ss storage.SeriesSet) ([]labels.Labels, map[string][]sample
func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) { func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) {
// Create 10 blocks that does not overlap (0-10, 10-20, ..., 100-110) but in reverse order to ensure our algorithm // Create 10 blocks that does not overlap (0-10, 10-20, ..., 100-110) but in reverse order to ensure our algorithm
// will handle that. // will handle that.
var metas = make([]BlockMeta, 11) metas := make([]BlockMeta, 11)
for i := 10; i >= 0; i-- { for i := 10; i >= 0; i-- {
metas[i] = BlockMeta{MinTime: int64(i * 10), MaxTime: int64((i + 1) * 10)} metas[i] = BlockMeta{MinTime: int64(i * 10), MaxTime: int64((i + 1) * 10)}
} }
@ -1781,7 +1781,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
require.NoError(t, os.RemoveAll(dir)) require.NoError(t, os.RemoveAll(dir))
}() }()
require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0777)) require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777))
w, err := wal.New(nil, nil, path.Join(dir, "wal"), false) w, err := wal.New(nil, nil, path.Join(dir, "wal"), false)
require.NoError(t, err) require.NoError(t, err)
@ -1831,7 +1831,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
createBlock(t, dir, genSeries(1, 1, 1000, 6000)) createBlock(t, dir, genSeries(1, 1, 1000, 6000))
require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0777)) require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777))
w, err := wal.New(nil, nil, path.Join(dir, "wal"), false) w, err := wal.New(nil, nil, path.Join(dir, "wal"), false)
require.NoError(t, err) require.NoError(t, err)
@ -2663,7 +2663,6 @@ func TestChunkWriter_ReadAfterWrite(t *testing.T) {
for i, test := range tests { for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
tempDir, err := ioutil.TempDir("", "test_chunk_writer") tempDir, err := ioutil.TempDir("", "test_chunk_writer")
require.NoError(t, err) require.NoError(t, err)
defer func() { require.NoError(t, os.RemoveAll(tempDir)) }() defer func() { require.NoError(t, os.RemoveAll(tempDir)) }()
@ -2899,7 +2898,7 @@ func TestOpen_VariousBlockStates(t *testing.T) {
expectedLoadedDirs[outDir] = struct{}{} expectedLoadedDirs[outDir] = struct{}{}
// Touch chunks dir in block. // Touch chunks dir in block.
require.NoError(t, os.MkdirAll(filepath.Join(dbDir, "chunks"), 0777)) require.NoError(t, os.MkdirAll(filepath.Join(dbDir, "chunks"), 0o777))
defer func() { defer func() {
require.NoError(t, os.RemoveAll(filepath.Join(dbDir, "chunks"))) require.NoError(t, os.RemoveAll(filepath.Join(dbDir, "chunks")))
}() }()
@ -3166,7 +3165,7 @@ func TestLockfileMetric(t *testing.T) {
// Test preconditions (file already exists + lockfile option) // Test preconditions (file already exists + lockfile option)
lockfilePath := filepath.Join(absdir, "lock") lockfilePath := filepath.Join(absdir, "lock")
if c.fileAlreadyExists { if c.fileAlreadyExists {
err = ioutil.WriteFile(lockfilePath, []byte{}, 0644) err = ioutil.WriteFile(lockfilePath, []byte{}, 0o644)
require.NoError(t, err) require.NoError(t, err)
} }
opts := DefaultOptions() opts := DefaultOptions()

View file

@ -133,7 +133,6 @@ func NewDecbufAt(bs ByteSlice, off int, castagnoliTable *crc32.Table) Decbuf {
dec := Decbuf{B: b[:len(b)-4]} dec := Decbuf{B: b[:len(b)-4]}
if castagnoliTable != nil { if castagnoliTable != nil {
if exp := binary.BigEndian.Uint32(b[len(b)-4:]); dec.Crc32(castagnoliTable) != exp { if exp := binary.BigEndian.Uint32(b[len(b)-4:]); dec.Crc32(castagnoliTable) != exp {
return Decbuf{E: ErrInvalidChecksum} return Decbuf{E: ErrInvalidChecksum}
} }

View file

@ -284,7 +284,7 @@ func (ce *CircularExemplarStorage) Resize(l int64) int {
// This math is essentially looking at nextIndex, where we would write the next exemplar to, // This math is essentially looking at nextIndex, where we would write the next exemplar to,
// and find the index in the old exemplar buffer that we should start migrating exemplars from. // and find the index in the old exemplar buffer that we should start migrating exemplars from.
// This way we don't migrate exemplars that would just be overwritten when migrating later exemplars. // This way we don't migrate exemplars that would just be overwritten when migrating later exemplars.
var startIndex = (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer)) startIndex := (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer))
for i := int64(0); i < count; i++ { for i := int64(0); i < count; i++ {
idx := (startIndex + i) % int64(len(oldBuffer)) idx := (startIndex + i) % int64(len(oldBuffer))

View file

@ -448,7 +448,6 @@ func TestResize(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
exs, err := NewCircularExemplarStorage(tc.startSize, eMetrics) exs, err := NewCircularExemplarStorage(tc.startSize, eMetrics)
require.NoError(t, err) require.NoError(t, err)
es := exs.(*CircularExemplarStorage) es := exs.(*CircularExemplarStorage)
@ -456,7 +455,8 @@ func TestResize(t *testing.T) {
for i := 0; int64(i) < tc.startSize; i++ { for i := 0; int64(i) < tc.startSize; i++ {
err = es.AddExemplar(labels.FromStrings("service", strconv.Itoa(i)), exemplar.Exemplar{ err = es.AddExemplar(labels.FromStrings("service", strconv.Itoa(i)), exemplar.Exemplar{
Value: float64(i), Value: float64(i),
Ts: int64(i)}) Ts: int64(i),
})
require.NoError(t, err) require.NoError(t, err)
} }

View file

@ -27,7 +27,7 @@ import (
// CopyDirs copies all directories, subdirectories and files recursively including the empty folders. // CopyDirs copies all directories, subdirectories and files recursively including the empty folders.
// Source and destination must be full paths. // Source and destination must be full paths.
func CopyDirs(src, dest string) error { func CopyDirs(src, dest string) error {
if err := os.MkdirAll(dest, 0777); err != nil { if err := os.MkdirAll(dest, 0o777); err != nil {
return err return err
} }
files, err := readDirs(src) files, err := readDirs(src)
@ -46,7 +46,7 @@ func CopyDirs(src, dest string) error {
// Empty directories are also created. // Empty directories are also created.
if stat.IsDir() { if stat.IsDir() {
if err := os.MkdirAll(dp, 0777); err != nil { if err := os.MkdirAll(dp, 0o777); err != nil {
return err return err
} }
continue continue
@ -65,7 +65,7 @@ func copyFile(src, dest string) error {
return err return err
} }
err = ioutil.WriteFile(dest, data, 0666) err = ioutil.WriteFile(dest, data, 0o666)
if err != nil { if err != nil {
return err return err
} }

View file

@ -29,7 +29,7 @@ type Releaser interface {
// locking has failed. Neither this function nor the returned Releaser is // locking has failed. Neither this function nor the returned Releaser is
// goroutine-safe. // goroutine-safe.
func Flock(fileName string) (r Releaser, existed bool, err error) { func Flock(fileName string) (r Releaser, existed bool, err error) {
if err = os.MkdirAll(filepath.Dir(fileName), 0755); err != nil { if err = os.MkdirAll(filepath.Dir(fileName), 0o755); err != nil {
return nil, false, err return nil, false, err
} }

View file

@ -24,7 +24,7 @@ func (l *plan9Lock) Release() error {
} }
func newLock(fileName string) (Releaser, error) { func newLock(fileName string) (Releaser, error) {
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0666) f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0o666)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -46,7 +46,7 @@ func (l *unixLock) set(lock bool) error {
} }
func newLock(fileName string) (Releaser, error) { func newLock(fileName string) (Releaser, error) {
f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0666) f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0o666)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -41,7 +41,7 @@ func (l *unixLock) set(lock bool) error {
} }
func newLock(fileName string) (Releaser, error) { func newLock(fileName string) (Releaser, error) {
f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0666) f, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0o666)
if err != nil { if err != nil {
return nil, err return nil, err
} }

Some files were not shown because too many files have changed in this diff Show more