Merge branch 'main' into api-response-format-extension-point

Signed-off-by: Charles Korn <charles.korn@grafana.com>
This commit is contained in:
Charles Korn 2023-03-09 12:06:26 +11:00
commit 38c1930f48
No known key found for this signature in database
71 changed files with 2246 additions and 899 deletions

View file

@ -17,6 +17,7 @@ jobs:
- uses: ./.github/promci/actions/setup_environment
- run: make GO_ONLY=1 SKIP_GOLANGCI_LINT=1
- run: go test ./tsdb/ -test.tsdb-isolation=false
- run: go test --tags=stringlabels ./...
- run: GOARCH=386 go test ./cmd/prometheus
- run: make -C documentation/examples/remote_storage
- run: make -C documentation/examples
@ -140,14 +141,14 @@ jobs:
- name: Install Go
uses: actions/setup-go@v3
with:
go-version: '<1.19'
go-version: 1.20.x
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
uses: golangci/golangci-lint-action@v3.4.0
with:
version: v1.50.1
version: v1.51.2
fuzzing:
uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request'

5
.gitignore vendored
View file

@ -19,7 +19,7 @@ benchmark.txt
!/.promu.yml
!/.golangci.yml
/documentation/examples/remote_storage/remote_storage_adapter/remote_storage_adapter
/documentation/examples/remote_storage/example_write_adapter/example_writer_adapter
/documentation/examples/remote_storage/example_write_adapter/example_write_adapter
npm_licenses.tar.bz2
/web/ui/static/react
@ -28,3 +28,6 @@ npm_licenses.tar.bz2
/.build
/**/node_modules
# Ignore parser debug
y.output

View file

@ -78,3 +78,20 @@ GO111MODULE=on go mod tidy
```
You have to commit the changes to `go.mod` and `go.sum` before submitting the pull request.
## Working with the PromQL parser
The PromQL parser grammar is located in `promql/parser/generated_parser.y` and it can be built using `make parser`.
The parser is built using [goyacc](https://pkg.go.dev/golang.org/x/tools/cmd/goyacc)
If doing some sort of debugging, then it is possible to add some verbose output. After generating the parser, then you
can modify the the `./promql/parser/generated_parser.y.go` manually.
```golang
// As of writing this was somewhere around line 600.
var (
yyDebug = 0 // This can be be a number 0 -> 5.
yyErrorVerbose = false // This can be set to true.
)
```

View file

@ -78,6 +78,17 @@ assets-tarball: assets
@echo '>> packaging assets'
scripts/package_assets.sh
# We only want to generate the parser when there's changes to the grammar.
.PHONY: parser
parser:
@echo ">> running goyacc to generate the .go file."
ifeq (, $(shell which goyacc))
@echo "goyacc not installed so skipping"
@echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0"
else
goyacc -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y
endif
.PHONY: test
# If we only want to only test go code we have to change the test target
# which is called by all.

View file

@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.50.1
GOLANGCI_LINT_VERSION ?= v1.51.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))

View file

@ -47,7 +47,7 @@ Release cadence of first pre-releases being cut is 6 weeks.
| v2.40 | 2022-11-02 | Ganesh Vernekar (GitHub: @codesome) |
| v2.41 | 2022-12-14 | Julien Pivotto (GitHub: @roidelapluie) |
| v2.42 | 2023-01-25 | Kemal Akkoyun (GitHub: @kakkoyun) |
| v2.43 | 2023-03-08 | **searching for volunteer** |
| v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) |
| v2.44 | 2023-04-19 | **searching for volunteer** |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.

View file

@ -33,6 +33,7 @@ import (
"syscall"
"time"
"github.com/alecthomas/kingpin/v2"
"github.com/alecthomas/units"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
@ -47,7 +48,6 @@ import (
toolkit_web "github.com/prometheus/exporter-toolkit/web"
"go.uber.org/atomic"
"go.uber.org/automaxprocs/maxprocs"
"gopkg.in/alecthomas/kingpin.v2"
"k8s.io/klog"
klogv2 "k8s.io/klog/v2"
@ -468,6 +468,14 @@ func main() {
level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err)
os.Exit(2)
}
if _, err := cfgFile.GetScrapeConfigs(); err != nil {
absPath, pathErr := filepath.Abs(cfg.configFile)
if pathErr != nil {
absPath = cfg.configFile
}
level.Error(logger).Log("msg", fmt.Sprintf("Error loading scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err)
os.Exit(2)
}
if cfg.tsdb.EnableExemplarStorage {
if cfgFile.StorageConfig.ExemplarsConfig == nil {
cfgFile.StorageConfig.ExemplarsConfig = &config.DefaultExemplarsConfig
@ -730,7 +738,11 @@ func main() {
name: "scrape_sd",
reloader: func(cfg *config.Config) error {
c := make(map[string]discovery.Configs)
for _, v := range cfg.ScrapeConfigs {
scfgs, err := cfg.GetScrapeConfigs()
if err != nil {
return err
}
for _, v := range scfgs {
c[v.JobName] = v.ServiceDiscoveryConfigs
}
return discoveryManagerScrape.ApplyConfig(c)

View file

@ -31,6 +31,7 @@ import (
"text/tabwriter"
"time"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/google/pprof/profile"
"github.com/prometheus/client_golang/api"
@ -41,7 +42,6 @@ import (
"github.com/prometheus/common/model"
"github.com/prometheus/common/version"
"github.com/prometheus/exporter-toolkit/web"
"gopkg.in/alecthomas/kingpin.v2"
"gopkg.in/yaml.v2"
dto "github.com/prometheus/client_model/go"
@ -463,7 +463,18 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin
}
}
for _, scfg := range cfg.ScrapeConfigs {
var scfgs []*config.ScrapeConfig
if checkSyntaxOnly {
scfgs = cfg.ScrapeConfigs
} else {
var err error
scfgs, err = cfg.GetScrapeConfigs()
if err != nil {
return nil, fmt.Errorf("error loading scrape configs: %w", err)
}
}
for _, scfg := range scfgs {
if !checkSyntaxOnly && scfg.HTTPClientConfig.Authorization != nil {
if err := checkFileExists(scfg.HTTPClientConfig.Authorization.CredentialsFile); err != nil {
return nil, fmt.Errorf("error checking authorization credentials or bearer token file %q: %w", scfg.HTTPClientConfig.Authorization.CredentialsFile, err)

View file

@ -47,9 +47,15 @@ func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefault
}
var scrapeConfig *config.ScrapeConfig
scfgs, err := cfg.GetScrapeConfigs()
if err != nil {
fmt.Fprintln(os.Stderr, "Cannot load scrape configs", err)
return failureExitCode
}
jobs := []string{}
jobMatched := false
for _, v := range cfg.ScrapeConfigs {
for _, v := range scfgs {
jobs = append(jobs, v.JobName)
if v.JobName == sdJobName {
jobMatched = true

View file

@ -216,12 +216,13 @@ var (
// Config is the top-level configuration for Prometheus's config files.
type Config struct {
GlobalConfig GlobalConfig `yaml:"global"`
AlertingConfig AlertingConfig `yaml:"alerting,omitempty"`
RuleFiles []string `yaml:"rule_files,omitempty"`
ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"`
StorageConfig StorageConfig `yaml:"storage,omitempty"`
TracingConfig TracingConfig `yaml:"tracing,omitempty"`
GlobalConfig GlobalConfig `yaml:"global"`
AlertingConfig AlertingConfig `yaml:"alerting,omitempty"`
RuleFiles []string `yaml:"rule_files,omitempty"`
ScrapeConfigFiles []string `yaml:"scrape_config_files,omitempty"`
ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"`
StorageConfig StorageConfig `yaml:"storage,omitempty"`
TracingConfig TracingConfig `yaml:"tracing,omitempty"`
RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"`
RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"`
@ -235,6 +236,9 @@ func (c *Config) SetDirectory(dir string) {
for i, file := range c.RuleFiles {
c.RuleFiles[i] = config.JoinDir(dir, file)
}
for i, file := range c.ScrapeConfigFiles {
c.ScrapeConfigFiles[i] = config.JoinDir(dir, file)
}
for _, c := range c.ScrapeConfigs {
c.SetDirectory(dir)
}
@ -254,6 +258,58 @@ func (c Config) String() string {
return string(b)
}
// ScrapeConfigs returns the scrape configurations.
func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs))
jobNames := map[string]string{}
for i, scfg := range c.ScrapeConfigs {
// We do these checks for library users that would not call Validate in
// Unmarshal.
if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil {
return nil, err
}
if _, ok := jobNames[scfg.JobName]; ok {
return nil, fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName)
}
jobNames[scfg.JobName] = "main config file"
scfgs[i] = scfg
}
for _, pat := range c.ScrapeConfigFiles {
fs, err := filepath.Glob(pat)
if err != nil {
// The only error can be a bad pattern.
return nil, fmt.Errorf("error retrieving scrape config files for %q: %w", pat, err)
}
for _, filename := range fs {
cfg := ScrapeConfigs{}
content, err := os.ReadFile(filename)
if err != nil {
return nil, fileErr(filename, err)
}
err = yaml.UnmarshalStrict(content, &cfg)
if err != nil {
return nil, fileErr(filename, err)
}
for _, scfg := range cfg.ScrapeConfigs {
if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil {
return nil, fileErr(filename, err)
}
if f, ok := jobNames[scfg.JobName]; ok {
return nil, fileErr(filename, fmt.Errorf("found multiple scrape configs with job name %q, first found in %s", scfg.JobName, f))
}
jobNames[scfg.JobName] = fmt.Sprintf("%q", filePath(filename))
scfg.SetDirectory(filepath.Dir(filename))
scfgs = append(scfgs, scfg)
}
}
}
return scfgs, nil
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultConfig
@ -276,26 +332,18 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
return fmt.Errorf("invalid rule file path %q", rf)
}
}
for _, sf := range c.ScrapeConfigFiles {
if !patRulePath.MatchString(sf) {
return fmt.Errorf("invalid scrape config file path %q", sf)
}
}
// Do global overrides and validate unique names.
jobNames := map[string]struct{}{}
for _, scfg := range c.ScrapeConfigs {
if scfg == nil {
return errors.New("empty or null scrape config section")
}
// First set the correct scrape interval, then check that the timeout
// (inferred or explicit) is not greater than that.
if scfg.ScrapeInterval == 0 {
scfg.ScrapeInterval = c.GlobalConfig.ScrapeInterval
}
if scfg.ScrapeTimeout > scfg.ScrapeInterval {
return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", scfg.JobName)
}
if scfg.ScrapeTimeout == 0 {
if c.GlobalConfig.ScrapeTimeout > scfg.ScrapeInterval {
scfg.ScrapeTimeout = scfg.ScrapeInterval
} else {
scfg.ScrapeTimeout = c.GlobalConfig.ScrapeTimeout
}
if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil {
return err
}
if _, ok := jobNames[scfg.JobName]; ok {
@ -401,6 +449,10 @@ func (c *GlobalConfig) isZero() bool {
c.QueryLogFile == ""
}
type ScrapeConfigs struct {
ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"`
}
// ScrapeConfig configures a scraping unit for Prometheus.
type ScrapeConfig struct {
// The job name to which the job label is set by default.
@ -494,6 +546,28 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil
}
func (c *ScrapeConfig) Validate(defaultInterval, defaultTimeout model.Duration) error {
if c == nil {
return errors.New("empty or null scrape config section")
}
// First set the correct scrape interval, then check that the timeout
// (inferred or explicit) is not greater than that.
if c.ScrapeInterval == 0 {
c.ScrapeInterval = defaultInterval
}
if c.ScrapeTimeout > c.ScrapeInterval {
return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName)
}
if c.ScrapeTimeout == 0 {
if defaultTimeout > c.ScrapeInterval {
c.ScrapeTimeout = c.ScrapeInterval
} else {
c.ScrapeTimeout = defaultTimeout
}
}
return nil
}
// MarshalYAML implements the yaml.Marshaler interface.
func (c *ScrapeConfig) MarshalYAML() (interface{}, error) {
return discovery.MarshalYAMLWithInlineConfigs(c)
@ -936,3 +1010,15 @@ func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) erro
// Thus we just do its validation here.
return c.HTTPClientConfig.Validate()
}
func filePath(filename string) string {
absPath, err := filepath.Abs(filename)
if err != nil {
return filename
}
return absPath
}
func fileErr(filename string, err error) error {
return fmt.Errorf("%q: %w", filePath(filename), err)
}

View file

@ -1693,6 +1693,10 @@ var expectedErrors = []struct {
filename: "ovhcloud_bad_service.bad.yml",
errMsg: "unknown service: fakeservice",
},
{
filename: "scrape_config_files_glob.bad.yml",
errMsg: `parsing YAML file testdata/scrape_config_files_glob.bad.yml: invalid scrape config file path "scrape_configs/*/*"`,
},
}
func TestBadConfigs(t *testing.T) {
@ -1779,6 +1783,156 @@ func TestEmptyGlobalBlock(t *testing.T) {
require.Equal(t, exp, *c)
}
func TestGetScrapeConfigs(t *testing.T) {
sc := func(jobName string, scrapeInterval, scrapeTimeout model.Duration) *ScrapeConfig {
return &ScrapeConfig{
JobName: jobName,
HonorTimestamps: true,
ScrapeInterval: scrapeInterval,
ScrapeTimeout: scrapeTimeout,
MetricsPath: "/metrics",
Scheme: "http",
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{
model.AddressLabel: "localhost:8080",
},
},
Source: "0",
},
},
},
}
}
testCases := []struct {
name string
configFile string
expectedResult []*ScrapeConfig
expectedError string
}{
{
name: "An included config file should be a valid global config.",
configFile: "testdata/scrape_config_files.good.yml",
expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second))},
},
{
name: "An global config that only include a scrape config file.",
configFile: "testdata/scrape_config_files_only.good.yml",
expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second))},
},
{
name: "An global config that combine scrape config files and scrape configs.",
configFile: "testdata/scrape_config_files_combined.good.yml",
expectedResult: []*ScrapeConfig{
sc("node", model.Duration(60*time.Second), model.Duration(10*time.Second)),
sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second)),
sc("alertmanager", model.Duration(60*time.Second), model.Duration(10*time.Second)),
},
},
{
name: "An global config that includes a scrape config file with globs",
configFile: "testdata/scrape_config_files_glob.good.yml",
expectedResult: []*ScrapeConfig{
{
JobName: "prometheus",
HonorTimestamps: true,
ScrapeInterval: model.Duration(60 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/scrape_configs/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/scrape_configs/valid_key_file"),
},
FollowRedirects: true,
EnableHTTP2: true,
},
ServiceDiscoveryConfigs: discovery.Configs{
discovery.StaticConfig{
{
Targets: []model.LabelSet{
{model.AddressLabel: "localhost:8080"},
},
Source: "0",
},
},
},
},
{
JobName: "node",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
},
FollowRedirects: true,
EnableHTTP2: true,
},
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
ServiceDiscoveryConfigs: discovery.Configs{
&vultr.SDConfig{
HTTPClientConfig: config.HTTPClientConfig{
Authorization: &config.Authorization{
Type: "Bearer",
Credentials: "abcdef",
},
FollowRedirects: true,
EnableHTTP2: true,
},
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
},
},
},
},
},
{
name: "An global config that includes twice the same scrape configs.",
configFile: "testdata/scrape_config_files_double_import.bad.yml",
expectedError: `found multiple scrape configs with job name "prometheus"`,
},
{
name: "An global config that includes a scrape config identical to a scrape config in the main file.",
configFile: "testdata/scrape_config_files_duplicate.bad.yml",
expectedError: `found multiple scrape configs with job name "prometheus"`,
},
{
name: "An global config that includes a scrape config file with errors.",
configFile: "testdata/scrape_config_files_global.bad.yml",
expectedError: `scrape timeout greater than scrape interval for scrape config with job name "prometheus"`,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
c, err := LoadFile(tc.configFile, false, false, log.NewNopLogger())
require.NoError(t, err)
scfgs, err := c.GetScrapeConfigs()
if len(tc.expectedError) > 0 {
require.ErrorContains(t, err, tc.expectedError)
}
require.Equal(t, tc.expectedResult, scfgs)
})
}
}
func kubernetesSDHostURL() config.URL {
tURL, _ := url.Parse("https://localhost:1234")
return config.URL{URL: tURL}

View file

@ -0,0 +1,6 @@
scrape_configs:
- job_name: prometheus
scrape_interval: 10s
scrape_timeout: 20s
static_configs:
- targets: ['localhost:8080']

View file

@ -0,0 +1,4 @@
scrape_configs:
- job_name: prometheus
static_configs:
- targets: ['localhost:8080']

View file

@ -0,0 +1,4 @@
scrape_configs:
- job_name: alertmanager
static_configs:
- targets: ['localhost:8080']

View file

@ -0,0 +1,7 @@
scrape_config_files:
- scrape_config_files.good.yml
- scrape_config_files2.good.yml
scrape_configs:
- job_name: node
static_configs:
- targets: ['localhost:8080']

View file

@ -0,0 +1,3 @@
scrape_config_files:
- scrape_config_files.good.yml
- scrape_config_files.good.yml

View file

@ -0,0 +1,6 @@
scrape_config_files:
- scrape_config_files.good.yml
scrape_configs:
- job_name: prometheus
static_configs:
- targets: ['localhost:8080']

View file

@ -0,0 +1,6 @@
scrape_config_files:
- scrape_configs/*/*
scrape_configs:
- job_name: node
static_configs:
- targets: ['localhost:8080']

View file

@ -0,0 +1,2 @@
scrape_config_files:
- scrape_configs/*.yml

View file

@ -0,0 +1,2 @@
scrape_config_files:
- scrape_config_files.bad.yml

View file

@ -0,0 +1,11 @@
global:
scrape_interval: 15s
scrape_config_files:
- scrape_config_files.good.yml
- scrape_config_files.good.yml
scrape_configs:
- job_name: prometheus
static_configs:
- targets: ['localhost:8080']

View file

@ -0,0 +1,2 @@
scrape_config_files:
- scrape_config_files.good.yml

View file

@ -0,0 +1,7 @@
scrape_configs:
- job_name: prometheus
static_configs:
- targets: ['localhost:8080']
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file

View file

@ -0,0 +1,9 @@
scrape_configs:
- job_name: node
scrape_interval: 15s
tls_config:
cert_file: ../valid_cert_file
key_file: ../valid_key_file
vultr_sd_configs:
- authorization:
credentials: abcdef

View file

@ -78,6 +78,11 @@ global:
rule_files:
[ - <filepath_glob> ... ]
# Scrape config files specifies a list of globs. Scrape configs are read from
# all matching files and appended to the list of scrape configs.
scrape_config_files:
[ - <filepath_glob> ... ]
# A list of scrape configurations.
scrape_configs:
[ - <scrape_config> ... ]
@ -208,6 +213,12 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -431,6 +442,12 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -513,6 +530,12 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -618,6 +641,12 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -699,6 +728,12 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -751,6 +786,12 @@ host: <string>
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -920,6 +961,12 @@ host: <string>
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -1121,6 +1168,12 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -1390,6 +1443,12 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -1601,6 +1660,12 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -1689,6 +1754,12 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -1766,6 +1837,12 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -1985,6 +2062,12 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -2065,6 +2148,12 @@ server: <string>
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -2186,6 +2275,12 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -2261,6 +2356,12 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -2363,6 +2464,12 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -2455,6 +2562,12 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -2635,6 +2748,12 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -2754,6 +2873,12 @@ tags_filter:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -2824,6 +2949,12 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -2900,6 +3031,12 @@ oauth2:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -3096,6 +3233,12 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -3308,6 +3451,12 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -3415,6 +3564,12 @@ tls_config:
# Optional proxy URL.
[ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names
# that should be excluded from proxying. IP and domain names can
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <bool> | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]

View file

@ -9,7 +9,7 @@ require (
github.com/influxdata/influxdb v1.11.0
github.com/prometheus/client_golang v1.14.0
github.com/prometheus/common v0.37.0
github.com/stretchr/testify v1.8.1
github.com/stretchr/testify v1.8.2
gopkg.in/alecthomas/kingpin.v2 v2.2.6
)

View file

@ -228,8 +228,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=

58
go.mod
View file

@ -6,11 +6,11 @@ require (
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible
github.com/Azure/go-autorest/autorest v0.11.28
github.com/Azure/go-autorest/autorest/adal v0.9.22
github.com/alecthomas/kingpin/v2 v2.3.2
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
github.com/aws/aws-sdk-go v1.44.187
github.com/aws/aws-sdk-go v1.44.207
github.com/cespare/xxhash/v2 v2.2.0
github.com/dennwc/varint v1.0.0
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245
github.com/digitalocean/godo v1.95.0
github.com/docker/docker v20.10.23+incompatible
github.com/edsrzf/mmap-go v1.1.0
@ -43,33 +43,33 @@ require (
github.com/prometheus/alertmanager v0.25.0
github.com/prometheus/client_golang v1.14.0
github.com/prometheus/client_model v0.3.0
github.com/prometheus/common v0.39.0
github.com/prometheus/common v0.42.0
github.com/prometheus/common/assets v0.2.0
github.com/prometheus/common/sigv4 v0.1.0
github.com/prometheus/exporter-toolkit v0.8.2
github.com/prometheus/exporter-toolkit v0.9.1
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.12
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
github.com/stretchr/testify v1.8.1
github.com/stretchr/testify v1.8.2
github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0
go.opentelemetry.io/otel v1.11.2
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.2
go.opentelemetry.io/otel/sdk v1.11.2
go.opentelemetry.io/otel/trace v1.11.2
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.39.0
go.opentelemetry.io/otel v1.14.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0
go.opentelemetry.io/otel/sdk v1.14.0
go.opentelemetry.io/otel/trace v1.14.0
go.uber.org/atomic v1.10.0
go.uber.org/automaxprocs v1.5.1
go.uber.org/goleak v1.2.0
golang.org/x/net v0.5.0
golang.org/x/oauth2 v0.4.0
go.uber.org/goleak v1.2.1
golang.org/x/net v0.8.0
golang.org/x/oauth2 v0.6.0
golang.org/x/sync v0.1.0
golang.org/x/sys v0.4.0
golang.org/x/sys v0.6.0
golang.org/x/time v0.3.0
golang.org/x/tools v0.5.0
golang.org/x/tools v0.6.0
google.golang.org/api v0.108.0
google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2
google.golang.org/grpc v1.52.1
google.golang.org/grpc v1.53.0
google.golang.org/protobuf v1.28.1
gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/yaml.v2 v2.4.0
@ -83,13 +83,15 @@ require (
require (
cloud.google.com/go/compute/metadata v0.2.3 // indirect
github.com/coreos/go-systemd/v22 v22.4.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/hashicorp/errwrap v1.0.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/rogpeppe/go-internal v1.9.0 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
)
require (
cloud.google.com/go/compute v1.14.0 // indirect
cloud.google.com/go/compute v1.15.1 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
@ -102,7 +104,7 @@ require (
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc // indirect
github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
@ -164,19 +166,19 @@ require (
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
go.mongodb.org/mongo-driver v1.11.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 // indirect
go.opentelemetry.io/otel/metric v0.34.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect
go.opentelemetry.io/otel/metric v0.36.0 // indirect
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
golang.org/x/crypto v0.1.0 // indirect
golang.org/x/crypto v0.7.0 // indirect
golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874
golang.org/x/mod v0.7.0 // indirect
golang.org/x/term v0.4.0 // indirect
golang.org/x/text v0.6.0 // indirect
golang.org/x/mod v0.8.0 // indirect
golang.org/x/term v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.66.6 // indirect

119
go.sum
View file

@ -12,15 +12,15 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y=
cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0=
cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo=
cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE=
cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
@ -73,6 +73,8 @@ github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWX
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU=
github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@ -97,8 +99,8 @@ github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:W
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.44.187 h1:D5CsRomPnlwDHJCanL2mtaLIcbhjiWxNh5j8zvaWdJA=
github.com/aws/aws-sdk-go v1.44.187/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go v1.44.207 h1:7O0AMKxTm+/GUx6zw+3dqc+fD3tTzv8xaZPYo+ywRwE=
github.com/aws/aws-sdk-go v1.44.207/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@ -129,14 +131,15 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk=
github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b h1:ACGZRIr7HsgBKHsueQ1yM4WaVaXh21ynwqsF8M8tXhA=
github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU=
github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
@ -148,8 +151,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso=
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/digitalocean/godo v1.95.0 h1:S48/byPKui7RHZc1wYEPfRvkcEvToADNb5I3guu95xg=
github.com/digitalocean/godo v1.95.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
@ -509,8 +510,8 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@ -666,28 +667,30 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/exporter-toolkit v0.8.2 h1:sbJAfBXQFkG6sUkbwBun8MNdzW9+wd5YfPYofbmj0YM=
github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0=
github.com/prometheus/exporter-toolkit v0.9.1 h1:cNkC01riqiOS+kh3zdnNwRsbe/Blh0WwK3ij5rPJ9Sw=
github.com/prometheus/exporter-toolkit v0.9.1/go.mod h1:iFlTmFISCix0vyuyBmm0UqOUCTao9+RsAsKJP3YM9ec=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@ -741,8 +744,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@ -756,6 +760,8 @@ github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@ -780,24 +786,24 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 h1:yt2NKzK7Vyo6h0+X8BA4FpreZQTlVEIarnsBP/H5mzs=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0/go.mod h1:+ARmXlUlc51J7sZeCBkBJNdHGySrdOzgzxp6VWRWM1U=
go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0=
go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 h1:htgM8vZIF8oPSCxa341e3IZ4yr/sKxgu8KZYllByiVY=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2/go.mod h1:rqbht/LlhVBgn5+k3M5QK96K5Xb0DvXpMJ5SFQpY6uw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 h1:fqR1kli93643au1RKo0Uma3d2aPQKT+WBKfTSBaKbOc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2/go.mod h1:5Qn6qvgkMsLDX+sYK64rHb1FPhpn0UtxF+ouX1uhyJE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 h1:ERwKPn9Aer7Gxsc0+ZlutlH1bEEAUXAUhqm3Y45ABbk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2/go.mod h1:jWZUM2MWhWCJ9J9xVbRx7tzK1mXKpAlze4CeulycwVY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.2 h1:Us8tbCmuN16zAnK5TC69AtODLycKbwnskQzaB6DfFhc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.2/go.mod h1:GZWSQQky8AgdJj50r1KJm8oiQiIPaAX7uZCFQX9GzC8=
go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8=
go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8=
go.opentelemetry.io/otel/sdk v1.11.2 h1:GF4JoaEx7iihdMFu30sOyRx52HDHOkl9xQ8SMqNXUiU=
go.opentelemetry.io/otel/sdk v1.11.2/go.mod h1:wZ1WxImwpq+lVRo4vsmSOxdd+xwoUJ6rqyLc3SyX9aU=
go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0=
go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.39.0 h1:vFEBG7SieZJzvnRWQ81jxpuEqe6J8Ex+hgc9CqOTzHc=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.39.0/go.mod h1:9rgTcOKdIhDOC0IcAu8a+R+FChqSUBihKpM1lVNi6T0=
go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM=
go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 h1:/fXHZHGvro6MVqV34fJzDhi7sHGpX3Ej/Qjmfn003ho=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0/go.mod h1:UFG7EBMRdXyFstOwH028U0sVf+AvukSGhF0g8+dmNG8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 h1:TKf2uAs2ueguzLaxOCBXNpHxfO/aC7PAdDsSH0IbeRQ=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0/go.mod h1:HrbCVv40OOLTABmOn1ZWty6CHXkU8DK/Urc43tHug70=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 h1:ap+y8RXX3Mu9apKVtOkM6WSFESLM8K3wNQyOU8sWHcc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0/go.mod h1:5w41DY6S9gZrbjuq6Y+753e96WfPha5IcsOSZTtullM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0 h1:3jAYbRHQAqzLjd9I4tzxwJ8Pk/N6AqBcF6m1ZHrxG94=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0/go.mod h1:+N7zNjIJv4K+DeX67XXET0P+eIciESgaFDBqh+ZJFS4=
go.opentelemetry.io/otel/metric v0.36.0 h1:t0lgGI+L68QWt3QtOIlqM9gXoxqxWLhZ3R/e5oOAY0Q=
go.opentelemetry.io/otel/metric v0.36.0/go.mod h1:wKVw57sd2HdSZAzyfOM9gTqqE8v7CbqWsYL6AyrH9qk=
go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY=
go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM=
go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M=
go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
@ -808,8 +814,8 @@ go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk=
go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU=
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
@ -831,8 +837,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -857,7 +863,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
@ -870,8 +875,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -919,8 +924,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -928,8 +933,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw=
golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1011,13 +1016,13 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1027,8 +1032,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1090,8 +1095,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4=
golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1178,8 +1183,8 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.52.1 h1:2NpOPk5g5Xtb0qebIEs7hNIa++PdtZLo2AQUpc1YnSU=
google.golang.org/grpc v1.52.1/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View file

@ -11,16 +11,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !stringlabels
package labels
import (
"bytes"
"encoding/json"
"sort"
"strconv"
"github.com/cespare/xxhash/v2"
"github.com/prometheus/common/model"
"golang.org/x/exp/slices"
)
// Well-known label names used by Prometheus components.
@ -358,7 +360,7 @@ func EmptyLabels() Labels {
func New(ls ...Label) Labels {
set := make(Labels, 0, len(ls))
set = append(set, ls...)
sort.Sort(set)
slices.SortFunc(set, func(a, b Label) bool { return a.Name < b.Name })
return set
}
@ -382,7 +384,7 @@ func FromStrings(ss ...string) Labels {
res = append(res, Label{Name: ss[i], Value: ss[i+1]})
}
sort.Sort(res)
slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name })
return res
}
@ -562,7 +564,7 @@ Outer:
}
if len(b.add) > 0 { // Base is already in order, so we only need to sort if we add to it.
res = append(res, b.add...)
sort.Sort(res)
slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name })
}
return res
}
@ -589,7 +591,7 @@ func (b *ScratchBuilder) Add(name, value string) {
// Sort the labels added so far by name.
func (b *ScratchBuilder) Sort() {
sort.Sort(b.add)
slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name })
}
// Asssign is for when you already have a Labels which you want this ScratchBuilder to return.

View file

@ -0,0 +1,788 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build stringlabels
package labels
import (
"bytes"
"encoding/json"
"reflect"
"strconv"
"unsafe"
"github.com/cespare/xxhash/v2"
"github.com/prometheus/common/model"
"golang.org/x/exp/slices"
)
// Well-known label names used by Prometheus components.
const (
MetricName = "__name__"
AlertName = "alertname"
BucketLabel = "le"
InstanceName = "instance"
)
var seps = []byte{'\xff'}
// Label is a key/value pair of strings.
type Label struct {
Name, Value string
}
// Labels is implemented by a single flat string holding name/value pairs.
// Each name and value is preceded by its length in varint encoding.
// Names are in order.
type Labels struct {
data string
}
type labelSlice []Label
func (ls labelSlice) Len() int { return len(ls) }
func (ls labelSlice) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] }
func (ls labelSlice) Less(i, j int) bool { return ls[i].Name < ls[j].Name }
func decodeSize(data string, index int) (int, int) {
var size int
for shift := uint(0); ; shift += 7 {
// Just panic if we go of the end of data, since all Labels strings are constructed internally and
// malformed data indicates a bug, or memory corruption.
b := data[index]
index++
size |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
return size, index
}
func decodeString(data string, index int) (string, int) {
var size int
size, index = decodeSize(data, index)
return data[index : index+size], index + size
}
func (ls Labels) String() string {
var b bytes.Buffer
b.WriteByte('{')
for i := 0; i < len(ls.data); {
if i > 0 {
b.WriteByte(',')
b.WriteByte(' ')
}
var name, value string
name, i = decodeString(ls.data, i)
value, i = decodeString(ls.data, i)
b.WriteString(name)
b.WriteByte('=')
b.WriteString(strconv.Quote(value))
}
b.WriteByte('}')
return b.String()
}
// Bytes returns ls as a byte slice.
// It uses non-printing characters and so should not be used for printing.
func (ls Labels) Bytes(buf []byte) []byte {
if cap(buf) < len(ls.data) {
buf = make([]byte, len(ls.data))
} else {
buf = buf[:len(ls.data)]
}
copy(buf, ls.data)
return buf
}
// MarshalJSON implements json.Marshaler.
func (ls Labels) MarshalJSON() ([]byte, error) {
return json.Marshal(ls.Map())
}
// UnmarshalJSON implements json.Unmarshaler.
func (ls *Labels) UnmarshalJSON(b []byte) error {
var m map[string]string
if err := json.Unmarshal(b, &m); err != nil {
return err
}
*ls = FromMap(m)
return nil
}
// MarshalYAML implements yaml.Marshaler.
func (ls Labels) MarshalYAML() (interface{}, error) {
return ls.Map(), nil
}
// IsZero implements yaml.IsZeroer - if we don't have this then 'omitempty' fields are always omitted.
func (ls Labels) IsZero() bool {
return len(ls.data) == 0
}
// UnmarshalYAML implements yaml.Unmarshaler.
func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error {
var m map[string]string
if err := unmarshal(&m); err != nil {
return err
}
*ls = FromMap(m)
return nil
}
// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean.
// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false.
// TODO: This is only used in printing an error message
func (ls Labels) MatchLabels(on bool, names ...string) Labels {
b := NewBuilder(ls)
if on {
b.Keep(names...)
} else {
b.Del(MetricName)
b.Del(names...)
}
return b.Labels(EmptyLabels())
}
// Hash returns a hash value for the label set.
// Note: the result is not guaranteed to be consistent across different runs of Prometheus.
func (ls Labels) Hash() uint64 {
return xxhash.Sum64(yoloBytes(ls.data))
}
// HashForLabels returns a hash value for the labels matching the provided names.
// 'names' have to be sorted in ascending order.
func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
b = b[:0]
j := 0
for i := 0; i < len(ls.data); {
var name, value string
name, i = decodeString(ls.data, i)
value, i = decodeString(ls.data, i)
for j < len(names) && names[j] < name {
j++
}
if j == len(names) {
break
}
if name == names[j] {
b = append(b, name...)
b = append(b, seps[0])
b = append(b, value...)
b = append(b, seps[0])
}
}
return xxhash.Sum64(b), b
}
// HashWithoutLabels returns a hash value for all labels except those matching
// the provided names.
// 'names' have to be sorted in ascending order.
func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
b = b[:0]
j := 0
for i := 0; i < len(ls.data); {
var name, value string
name, i = decodeString(ls.data, i)
value, i = decodeString(ls.data, i)
for j < len(names) && names[j] < name {
j++
}
if name == MetricName || (j < len(names) && name == names[j]) {
continue
}
b = append(b, name...)
b = append(b, seps[0])
b = append(b, value...)
b = append(b, seps[0])
}
return xxhash.Sum64(b), b
}
// BytesWithLabels is just as Bytes(), but only for labels matching names.
// 'names' have to be sorted in ascending order.
func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte {
b := buf[:0]
j := 0
for pos := 0; pos < len(ls.data); {
lName, newPos := decodeString(ls.data, pos)
_, newPos = decodeString(ls.data, newPos)
for j < len(names) && names[j] < lName {
j++
}
if j == len(names) {
break
}
if lName == names[j] {
b = append(b, ls.data[pos:newPos]...)
}
pos = newPos
}
return b
}
// BytesWithoutLabels is just as Bytes(), but only for labels not matching names.
// 'names' have to be sorted in ascending order.
func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte {
b := buf[:0]
j := 0
for pos := 0; pos < len(ls.data); {
lName, newPos := decodeString(ls.data, pos)
_, newPos = decodeString(ls.data, newPos)
for j < len(names) && names[j] < lName {
j++
}
if j == len(names) || lName != names[j] {
b = append(b, ls.data[pos:newPos]...)
}
pos = newPos
}
return b
}
// Copy returns a copy of the labels.
func (ls Labels) Copy() Labels {
buf := append([]byte{}, ls.data...)
return Labels{data: yoloString(buf)}
}
// Get returns the value for the label with the given name.
// Returns an empty string if the label doesn't exist.
func (ls Labels) Get(name string) string {
for i := 0; i < len(ls.data); {
var lName, lValue string
lName, i = decodeString(ls.data, i)
lValue, i = decodeString(ls.data, i)
if lName == name {
return lValue
}
}
return ""
}
// Has returns true if the label with the given name is present.
func (ls Labels) Has(name string) bool {
for i := 0; i < len(ls.data); {
var lName string
lName, i = decodeString(ls.data, i)
_, i = decodeString(ls.data, i)
if lName == name {
return true
}
}
return false
}
// HasDuplicateLabelNames returns whether ls has duplicate label names.
// It assumes that the labelset is sorted.
func (ls Labels) HasDuplicateLabelNames() (string, bool) {
var lName, prevName string
for i := 0; i < len(ls.data); {
lName, i = decodeString(ls.data, i)
_, i = decodeString(ls.data, i)
if lName == prevName {
return lName, true
}
prevName = lName
}
return "", false
}
// WithoutEmpty returns the labelset without empty labels.
// May return the same labelset.
func (ls Labels) WithoutEmpty() Labels {
for pos := 0; pos < len(ls.data); {
_, newPos := decodeString(ls.data, pos)
lValue, newPos := decodeString(ls.data, newPos)
if lValue != "" {
pos = newPos
continue
}
// Do not copy the slice until it's necessary.
// TODO: could optimise the case where all blanks are at the end.
// Note: we size the new buffer on the assumption there is exactly one blank value.
buf := make([]byte, pos, pos+(len(ls.data)-newPos))
copy(buf, ls.data[:pos]) // copy the initial non-blank labels
pos = newPos // move past the first blank value
for pos < len(ls.data) {
var newPos int
_, newPos = decodeString(ls.data, pos)
lValue, newPos = decodeString(ls.data, newPos)
if lValue != "" {
buf = append(buf, ls.data[pos:newPos]...)
}
pos = newPos
}
return Labels{data: yoloString(buf)}
}
return ls
}
// IsValid checks if the metric name or label names are valid.
func (ls Labels) IsValid() bool {
err := ls.Validate(func(l Label) error {
if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) {
return strconv.ErrSyntax
}
if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() {
return strconv.ErrSyntax
}
return nil
})
return err == nil
}
// Equal returns whether the two label sets are equal.
func Equal(ls, o Labels) bool {
return ls.data == o.data
}
// Map returns a string map of the labels.
func (ls Labels) Map() map[string]string {
m := make(map[string]string, len(ls.data)/10)
for i := 0; i < len(ls.data); {
var lName, lValue string
lName, i = decodeString(ls.data, i)
lValue, i = decodeString(ls.data, i)
m[lName] = lValue
}
return m
}
// EmptyLabels returns an empty Labels value, for convenience.
func EmptyLabels() Labels {
return Labels{}
}
func yoloString(b []byte) string {
return *((*string)(unsafe.Pointer(&b)))
}
func yoloBytes(s string) (b []byte) {
*(*string)(unsafe.Pointer(&b)) = s
(*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s)
return
}
// New returns a sorted Labels from the given labels.
// The caller has to guarantee that all label names are unique.
func New(ls ...Label) Labels {
slices.SortFunc(ls, func(a, b Label) bool { return a.Name < b.Name })
size := labelsSize(ls)
buf := make([]byte, size)
marshalLabelsToSizedBuffer(ls, buf)
return Labels{data: yoloString(buf)}
}
// FromMap returns new sorted Labels from the given map.
func FromMap(m map[string]string) Labels {
l := make([]Label, 0, len(m))
for k, v := range m {
l = append(l, Label{Name: k, Value: v})
}
return New(l...)
}
// FromStrings creates new labels from pairs of strings.
func FromStrings(ss ...string) Labels {
if len(ss)%2 != 0 {
panic("invalid number of strings")
}
ls := make([]Label, 0, len(ss)/2)
for i := 0; i < len(ss); i += 2 {
ls = append(ls, Label{Name: ss[i], Value: ss[i+1]})
}
slices.SortFunc(ls, func(a, b Label) bool { return a.Name < b.Name })
return New(ls...)
}
// Compare compares the two label sets.
// The result will be 0 if a==b, <0 if a < b, and >0 if a > b.
// TODO: replace with Less function - Compare is never needed.
// TODO: just compare the underlying strings when we don't need alphanumeric sorting.
func Compare(a, b Labels) int {
l := len(a.data)
if len(b.data) < l {
l = len(b.data)
}
ia, ib := 0, 0
for ia < l {
var aName, bName string
aName, ia = decodeString(a.data, ia)
bName, ib = decodeString(b.data, ib)
if aName != bName {
if aName < bName {
return -1
}
return 1
}
var aValue, bValue string
aValue, ia = decodeString(a.data, ia)
bValue, ib = decodeString(b.data, ib)
if aValue != bValue {
if aValue < bValue {
return -1
}
return 1
}
}
// If all labels so far were in common, the set with fewer labels comes first.
return len(a.data) - len(b.data)
}
// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed.
func (ls *Labels) CopyFrom(b Labels) {
ls.data = b.data // strings are immutable
}
// IsEmpty returns true if ls represents an empty set of labels.
func (ls Labels) IsEmpty() bool {
return len(ls.data) == 0
}
// Len returns the number of labels; it is relatively slow.
func (ls Labels) Len() int {
count := 0
for i := 0; i < len(ls.data); {
var size int
size, i = decodeSize(ls.data, i)
i += size
size, i = decodeSize(ls.data, i)
i += size
count++
}
return count
}
// Range calls f on each label.
func (ls Labels) Range(f func(l Label)) {
for i := 0; i < len(ls.data); {
var lName, lValue string
lName, i = decodeString(ls.data, i)
lValue, i = decodeString(ls.data, i)
f(Label{Name: lName, Value: lValue})
}
}
// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration.
func (ls Labels) Validate(f func(l Label) error) error {
for i := 0; i < len(ls.data); {
var lName, lValue string
lName, i = decodeString(ls.data, i)
lValue, i = decodeString(ls.data, i)
err := f(Label{Name: lName, Value: lValue})
if err != nil {
return err
}
}
return nil
}
// InternStrings calls intern on every string value inside ls, replacing them with what it returns.
func (ls *Labels) InternStrings(intern func(string) string) {
ls.data = intern(ls.data)
}
// ReleaseStrings calls release on every string value inside ls.
func (ls Labels) ReleaseStrings(release func(string)) {
release(ls.data)
}
// Builder allows modifying Labels.
type Builder struct {
base Labels
del []string
add []Label
}
// NewBuilder returns a new LabelsBuilder.
func NewBuilder(base Labels) *Builder {
b := &Builder{
del: make([]string, 0, 5),
add: make([]Label, 0, 5),
}
b.Reset(base)
return b
}
// Reset clears all current state for the builder.
func (b *Builder) Reset(base Labels) {
b.base = base
b.del = b.del[:0]
b.add = b.add[:0]
for i := 0; i < len(base.data); {
var lName, lValue string
lName, i = decodeString(base.data, i)
lValue, i = decodeString(base.data, i)
if lValue == "" {
b.del = append(b.del, lName)
}
}
}
// Del deletes the label of the given name.
func (b *Builder) Del(ns ...string) *Builder {
for _, n := range ns {
for i, a := range b.add {
if a.Name == n {
b.add = append(b.add[:i], b.add[i+1:]...)
}
}
b.del = append(b.del, n)
}
return b
}
// Keep removes all labels from the base except those with the given names.
func (b *Builder) Keep(ns ...string) *Builder {
Outer:
for i := 0; i < len(b.base.data); {
var lName string
lName, i = decodeString(b.base.data, i)
_, i = decodeString(b.base.data, i)
for _, n := range ns {
if lName == n {
continue Outer
}
}
b.del = append(b.del, lName)
}
return b
}
// Set the name/value pair as a label. A value of "" means delete that label.
func (b *Builder) Set(n, v string) *Builder {
if v == "" {
// Empty labels are the same as missing labels.
return b.Del(n)
}
for i, a := range b.add {
if a.Name == n {
b.add[i].Value = v
return b
}
}
b.add = append(b.add, Label{Name: n, Value: v})
return b
}
// Labels returns the labels from the builder, adding them to res if non-nil.
// Argument res can be the same as b.base, if caller wants to overwrite that slice.
// If no modifications were made, the original labels are returned.
func (b *Builder) Labels(res Labels) Labels {
if len(b.del) == 0 && len(b.add) == 0 {
return b.base
}
slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name })
slices.Sort(b.del)
a, d := 0, 0
bufSize := len(b.base.data) + labelsSize(b.add)
buf := make([]byte, 0, bufSize) // TODO: see if we can re-use the buffer from res.
for pos := 0; pos < len(b.base.data); {
oldPos := pos
var lName string
lName, pos = decodeString(b.base.data, pos)
_, pos = decodeString(b.base.data, pos)
for d < len(b.del) && b.del[d] < lName {
d++
}
if d < len(b.del) && b.del[d] == lName {
continue // This label has been deleted.
}
for ; a < len(b.add) && b.add[a].Name < lName; a++ {
buf = appendLabelTo(buf, &b.add[a]) // Insert label that was not in the base set.
}
if a < len(b.add) && b.add[a].Name == lName {
buf = appendLabelTo(buf, &b.add[a])
a++
continue // This label has been replaced.
}
buf = append(buf, b.base.data[oldPos:pos]...)
}
// We have come to the end of the base set; add any remaining labels.
for ; a < len(b.add); a++ {
buf = appendLabelTo(buf, &b.add[a])
}
return Labels{data: yoloString(buf)}
}
func marshalLabelsToSizedBuffer(lbls []Label, data []byte) int {
i := len(data)
for index := len(lbls) - 1; index >= 0; index-- {
size := marshalLabelToSizedBuffer(&lbls[index], data[:i])
i -= size
}
return len(data) - i
}
func marshalLabelToSizedBuffer(m *Label, data []byte) int {
i := len(data)
i -= len(m.Value)
copy(data[i:], m.Value)
i = encodeSize(data, i, len(m.Value))
i -= len(m.Name)
copy(data[i:], m.Name)
i = encodeSize(data, i, len(m.Name))
return len(data) - i
}
func sizeVarint(x uint64) (n int) {
// Most common case first
if x < 1<<7 {
return 1
}
if x >= 1<<56 {
return 9
}
if x >= 1<<28 {
x >>= 28
n = 4
}
if x >= 1<<14 {
x >>= 14
n += 2
}
if x >= 1<<7 {
n++
}
return n + 1
}
func encodeVarint(data []byte, offset int, v uint64) int {
offset -= sizeVarint(v)
base := offset
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
data[offset] = uint8(v)
return base
}
// Special code for the common case that a size is less than 128
func encodeSize(data []byte, offset, v int) int {
if v < 1<<7 {
offset--
data[offset] = uint8(v)
return offset
}
return encodeVarint(data, offset, uint64(v))
}
func labelsSize(lbls []Label) (n int) {
// we just encode name/value/name/value, without any extra tags or length bytes
for _, e := range lbls {
n += labelSize(&e)
}
return n
}
func labelSize(m *Label) (n int) {
// strings are encoded as length followed by contents.
l := len(m.Name)
n += l + sizeVarint(uint64(l))
l = len(m.Value)
n += l + sizeVarint(uint64(l))
return n
}
func appendLabelTo(buf []byte, m *Label) []byte {
size := labelSize(m)
sizeRequired := len(buf) + size
if cap(buf) >= sizeRequired {
buf = buf[:sizeRequired]
} else {
bufSize := cap(buf)
// Double size of buffer each time it needs to grow, to amortise copying cost.
for bufSize < sizeRequired {
bufSize = bufSize*2 + 1
}
newBuf := make([]byte, sizeRequired, bufSize)
copy(newBuf, buf)
buf = newBuf
}
marshalLabelToSizedBuffer(m, buf)
return buf
}
// ScratchBuilder allows efficient construction of a Labels from scratch.
type ScratchBuilder struct {
add []Label
output Labels
overwriteBuffer []byte
}
// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries.
func NewScratchBuilder(n int) ScratchBuilder {
return ScratchBuilder{add: make([]Label, 0, n)}
}
func (b *ScratchBuilder) Reset() {
b.add = b.add[:0]
b.output = EmptyLabels()
}
// Add a name/value pair.
// Note if you Add the same name twice you will get a duplicate label, which is invalid.
func (b *ScratchBuilder) Add(name, value string) {
b.add = append(b.add, Label{Name: name, Value: value})
}
// Sort the labels added so far by name.
func (b *ScratchBuilder) Sort() {
slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name })
}
// Asssign is for when you already have a Labels which you want this ScratchBuilder to return.
func (b *ScratchBuilder) Assign(l Labels) {
b.output = l
}
// Labels returns the name/value pairs added as a Labels object. Calling Add() after Labels() has no effect.
// Note: if you want them sorted, call Sort() first.
func (b *ScratchBuilder) Labels() Labels {
if b.output.IsEmpty() {
size := labelsSize(b.add)
buf := make([]byte, size)
marshalLabelsToSizedBuffer(b.add, buf)
b.output = Labels{data: yoloString(buf)}
}
return b.output
}
// Write the newly-built Labels out to ls, reusing an internal buffer.
// Callers must ensure that there are no other references to ls.
func (b *ScratchBuilder) Overwrite(ls *Labels) {
size := labelsSize(b.add)
if size <= cap(b.overwriteBuffer) {
b.overwriteBuffer = b.overwriteBuffer[:size]
} else {
b.overwriteBuffer = make([]byte, size)
}
marshalLabelsToSizedBuffer(b.add, b.overwriteBuffer)
ls.data = yoloString(b.overwriteBuffer)
}

View file

@ -696,6 +696,50 @@ func BenchmarkLabels_Hash(b *testing.B) {
}
}
func BenchmarkBuilder(b *testing.B) {
m := []Label{
{"job", "node"},
{"instance", "123.123.1.211:9090"},
{"path", "/api/v1/namespaces/<namespace>/deployments/<name>"},
{"method", "GET"},
{"namespace", "system"},
{"status", "500"},
{"prometheus", "prometheus-core-1"},
{"datacenter", "eu-west-1"},
{"pod_name", "abcdef-99999-defee"},
}
var l Labels
builder := NewBuilder(EmptyLabels())
for i := 0; i < b.N; i++ {
builder.Reset(EmptyLabels())
for _, l := range m {
builder.Set(l.Name, l.Value)
}
l = builder.Labels(EmptyLabels())
}
require.Equal(b, 9, l.Len())
}
func BenchmarkLabels_Copy(b *testing.B) {
m := map[string]string{
"job": "node",
"instance": "123.123.1.211:9090",
"path": "/api/v1/namespaces/<namespace>/deployments/<name>",
"method": "GET",
"namespace": "system",
"status": "500",
"prometheus": "prometheus-core-1",
"datacenter": "eu-west-1",
"pod_name": "abcdef-99999-defee",
}
l := FromMap(m)
for i := 0; i < b.N; i++ {
l = l.Copy()
}
}
func TestMarshaling(t *testing.T) {
lbls := FromStrings("aaa", "111", "bbb", "2222", "ccc", "33333")
expectedJSON := "{\"aaa\":\"111\",\"bbb\":\"2222\",\"ccc\":\"33333\"}"

View file

@ -15,6 +15,7 @@ package relabel
import (
"crypto/md5"
"encoding/binary"
"fmt"
"strings"
@ -268,7 +269,9 @@ func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) (ret labels.La
case Uppercase:
lb.Set(cfg.TargetLabel, strings.ToUpper(val))
case HashMod:
mod := sum64(md5.Sum([]byte(val))) % cfg.Modulus
hash := md5.Sum([]byte(val))
// Use only the last 8 bytes of the hash to give the same result as earlier versions of this code.
mod := binary.BigEndian.Uint64(hash[8:]) % cfg.Modulus
lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod))
case LabelMap:
lset.Range(func(l labels.Label) {
@ -295,15 +298,3 @@ func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) (ret labels.La
return lb.Labels(lset), true
}
// sum64 sums the md5 hash to an uint64.
func sum64(hash [md5.Size]byte) uint64 {
var s uint64
for i, b := range hash {
shift := uint64((md5.Size - i - 1) * 8)
s |= uint64(b) << shift
}
return s
}

View file

@ -17,7 +17,6 @@
package textparse
import (
"bytes"
"errors"
"fmt"
"io"
@ -31,8 +30,6 @@ import (
"github.com/prometheus/prometheus/model/value"
)
var allowedSuffixes = [][]byte{[]byte("_total"), []byte("_bucket")}
type openMetricsLexer struct {
b []byte
i int
@ -46,13 +43,6 @@ func (l *openMetricsLexer) buf() []byte {
return l.b[l.start:l.i]
}
func (l *openMetricsLexer) cur() byte {
if l.i < len(l.b) {
return l.b[l.i]
}
return byte(' ')
}
// next advances the openMetricsLexer to the next character.
func (l *openMetricsLexer) next() byte {
l.i++
@ -223,6 +213,14 @@ func (p *OpenMetricsParser) nextToken() token {
return tok
}
func (p *OpenMetricsParser) parseError(exp string, got token) error {
e := p.l.i + 1
if len(p.l.b) < e {
e = len(p.l.b)
}
return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e])
}
// Next advances the parser to the next sample. It returns false if no
// more samples were read or an error occurred.
func (p *OpenMetricsParser) Next() (Entry, error) {
@ -248,7 +246,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
case tMName:
p.offsets = append(p.offsets, p.l.start, p.l.i)
default:
return EntryInvalid, parseError("expected metric name after "+t.String(), t2)
return EntryInvalid, p.parseError("expected metric name after "+t.String(), t2)
}
switch t2 := p.nextToken(); t2 {
case tText:
@ -284,7 +282,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
}
case tHelp:
if !utf8.Valid(p.text) {
return EntryInvalid, errors.New("help text is not a valid utf8 string")
return EntryInvalid, fmt.Errorf("help text %q is not a valid utf8 string", p.text)
}
}
switch t {
@ -297,7 +295,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
u := yoloString(p.text)
if len(u) > 0 {
if !strings.HasSuffix(m, u) || len(m) < len(u)+1 || p.l.b[p.offsets[1]-len(u)-1] != '_' {
return EntryInvalid, fmt.Errorf("unit not a suffix of metric %q", m)
return EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", u, m)
}
}
return EntryUnit, nil
@ -336,10 +334,10 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
var ts float64
// A float is enough to hold what we need for millisecond resolution.
if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
return EntryInvalid, err
return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i])
}
if math.IsNaN(ts) || math.IsInf(ts, 0) {
return EntryInvalid, errors.New("invalid timestamp")
return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts)
}
p.ts = int64(ts * 1000)
switch t3 := p.nextToken(); t3 {
@ -349,26 +347,20 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
return EntryInvalid, err
}
default:
return EntryInvalid, parseError("expected next entry after timestamp", t3)
return EntryInvalid, p.parseError("expected next entry after timestamp", t3)
}
default:
return EntryInvalid, parseError("expected timestamp or # symbol", t2)
return EntryInvalid, p.parseError("expected timestamp or # symbol", t2)
}
return EntrySeries, nil
default:
err = fmt.Errorf("%q %q is not a valid start token", t, string(p.l.cur()))
err = p.parseError("expected a valid start token", t)
}
return EntryInvalid, err
}
func (p *OpenMetricsParser) parseComment() error {
// Validate the name of the metric. It must have _total or _bucket as
// suffix for exemplars to be supported.
if err := p.validateNameForExemplar(p.series[:p.offsets[0]-p.start]); err != nil {
return err
}
var err error
// Parse the labels.
p.eOffsets, err = p.parseLVals(p.eOffsets)
@ -395,19 +387,19 @@ func (p *OpenMetricsParser) parseComment() error {
var ts float64
// A float is enough to hold what we need for millisecond resolution.
if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
return err
return fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i])
}
if math.IsNaN(ts) || math.IsInf(ts, 0) {
return errors.New("invalid exemplar timestamp")
return fmt.Errorf("invalid exemplar timestamp %f", ts)
}
p.exemplarTs = int64(ts * 1000)
switch t3 := p.nextToken(); t3 {
case tLinebreak:
default:
return parseError("expected next entry after exemplar timestamp", t3)
return p.parseError("expected next entry after exemplar timestamp", t3)
}
default:
return parseError("expected timestamp or comment", t2)
return p.parseError("expected timestamp or comment", t2)
}
return nil
}
@ -421,21 +413,21 @@ func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) {
return offsets, nil
case tComma:
if first {
return nil, parseError("expected label name or left brace", t)
return nil, p.parseError("expected label name or left brace", t)
}
t = p.nextToken()
if t != tLName {
return nil, parseError("expected label name", t)
return nil, p.parseError("expected label name", t)
}
case tLName:
if !first {
return nil, parseError("expected comma", t)
return nil, p.parseError("expected comma", t)
}
default:
if first {
return nil, parseError("expected label name or left brace", t)
return nil, p.parseError("expected label name or left brace", t)
}
return nil, parseError("expected comma or left brace", t)
return nil, p.parseError("expected comma or left brace", t)
}
first = false
@ -444,13 +436,13 @@ func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) {
offsets = append(offsets, p.l.start, p.l.i)
if t := p.nextToken(); t != tEqual {
return nil, parseError("expected equal", t)
return nil, p.parseError("expected equal", t)
}
if t := p.nextToken(); t != tLValue {
return nil, parseError("expected label value", t)
return nil, p.parseError("expected label value", t)
}
if !utf8.Valid(p.l.buf()) {
return nil, errors.New("invalid UTF-8 label value")
return nil, fmt.Errorf("invalid UTF-8 label value: %q", p.l.buf())
}
// The openMetricsLexer ensures the value string is quoted. Strip first
@ -461,11 +453,11 @@ func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) {
func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) {
if t != tValue {
return 0, parseError(fmt.Sprintf("expected value after %v", after), t)
return 0, p.parseError(fmt.Sprintf("expected value after %v", after), t)
}
val, err := parseFloat(yoloString(p.l.buf()[1:]))
if err != nil {
return 0, err
return 0, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i])
}
// Ensure canonical NaN value.
if math.IsNaN(p.exemplarVal) {
@ -473,12 +465,3 @@ func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error
}
return val, nil
}
func (p *OpenMetricsParser) validateNameForExemplar(name []byte) error {
for _, suffix := range allowedSuffixes {
if bytes.HasSuffix(name, suffix) {
return nil
}
}
return fmt.Errorf("metric name %v does not support exemplars", string(name))
}

View file

@ -45,9 +45,14 @@ hh_bucket{le="+Inf"} 1
# TYPE gh gaugehistogram
gh_bucket{le="+Inf"} 1
# TYPE hhh histogram
hhh_bucket{le="+Inf"} 1 # {aa="bb"} 4
hhh_bucket{le="+Inf"} 1 # {id="histogram-bucket-test"} 4
hhh_count 1 # {id="histogram-count-test"} 4
# TYPE ggh gaugehistogram
ggh_bucket{le="+Inf"} 1 # {cc="dd",xx="yy"} 4 123.123
ggh_bucket{le="+Inf"} 1 # {id="gaugehistogram-bucket-test",xx="yy"} 4 123.123
ggh_count 1 # {id="gaugehistogram-count-test",xx="yy"} 4 123.123
# TYPE smr_seconds summary
smr_seconds_count 2.0 # {id="summary-count-test"} 1 123.321
smr_seconds_sum 42.0 # {id="summary-sum-test"} 1 123.321
# TYPE ii info
ii{foo="bar"} 1
# TYPE ss stateset
@ -59,7 +64,7 @@ _metric_starting_with_underscore 1
testmetric{_label_starting_with_underscore="foo"} 1
testmetric{label="\"bar\""} 1
# TYPE foo counter
foo_total 17.0 1520879607.789 # {xx="yy"} 5`
foo_total 17.0 1520879607.789 # {id="counter-test"} 5`
input += "\n# HELP metric foo\x00bar"
input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
@ -152,7 +157,12 @@ foo_total 17.0 1520879607.789 # {xx="yy"} 5`
m: `hhh_bucket{le="+Inf"}`,
v: 1,
lset: labels.FromStrings("__name__", "hhh_bucket", "le", "+Inf"),
e: &exemplar.Exemplar{Labels: labels.FromStrings("aa", "bb"), Value: 4},
e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "histogram-bucket-test"), Value: 4},
}, {
m: `hhh_count`,
v: 1,
lset: labels.FromStrings("__name__", "hhh_count"),
e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4},
}, {
m: "ggh",
typ: MetricTypeGaugeHistogram,
@ -160,7 +170,25 @@ foo_total 17.0 1520879607.789 # {xx="yy"} 5`
m: `ggh_bucket{le="+Inf"}`,
v: 1,
lset: labels.FromStrings("__name__", "ggh_bucket", "le", "+Inf"),
e: &exemplar.Exemplar{Labels: labels.FromStrings("cc", "dd", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123},
e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "gaugehistogram-bucket-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123},
}, {
m: `ggh_count`,
v: 1,
lset: labels.FromStrings("__name__", "ggh_count"),
e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123},
}, {
m: "smr_seconds",
typ: MetricTypeSummary,
}, {
m: `smr_seconds_count`,
v: 2,
lset: labels.FromStrings("__name__", "smr_seconds_count"),
e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "summary-count-test"), Value: 1, HasTs: true, Ts: 123321},
}, {
m: `smr_seconds_sum`,
v: 42,
lset: labels.FromStrings("__name__", "smr_seconds_sum"),
e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321},
}, {
m: "ii",
typ: MetricTypeInfo,
@ -206,7 +234,7 @@ foo_total 17.0 1520879607.789 # {xx="yy"} 5`
v: 17,
lset: labels.FromStrings("__name__", "foo_total"),
t: int64p(1520879607789),
e: &exemplar.Exemplar{Labels: labels.FromStrings("xx", "yy"), Value: 5},
e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
}, {
m: "metric",
help: "foo\x00bar",
@ -293,11 +321,11 @@ func TestOpenMetricsParseErrors(t *testing.T) {
},
{
input: "\n",
err: "\"INVALID\" \"\\n\" is not a valid start token",
err: "expected a valid start token, got \"\\n\" (\"INVALID\") while parsing: \"\\n\"",
},
{
input: "metric",
err: "expected value after metric, got \"EOF\"",
err: "expected value after metric, got \"metric\" (\"EOF\") while parsing: \"metric\"",
},
{
input: "metric 1",
@ -313,19 +341,19 @@ func TestOpenMetricsParseErrors(t *testing.T) {
},
{
input: "a\n#EOF\n",
err: "expected value after metric, got \"INVALID\"",
err: "expected value after metric, got \"\\n\" (\"INVALID\") while parsing: \"a\\n\"",
},
{
input: "\n\n#EOF\n",
err: "\"INVALID\" \"\\n\" is not a valid start token",
err: "expected a valid start token, got \"\\n\" (\"INVALID\") while parsing: \"\\n\"",
},
{
input: " a 1\n#EOF\n",
err: "\"INVALID\" \" \" is not a valid start token",
err: "expected a valid start token, got \" \" (\"INVALID\") while parsing: \" \"",
},
{
input: "9\n#EOF\n",
err: "\"INVALID\" \"9\" is not a valid start token",
err: "expected a valid start token, got \"9\" (\"INVALID\") while parsing: \"9\"",
},
{
input: "# TYPE u untyped\n#EOF\n",
@ -337,11 +365,11 @@ func TestOpenMetricsParseErrors(t *testing.T) {
},
{
input: "# TYPE c counter\n#EOF\n",
err: "\"INVALID\" \" \" is not a valid start token",
err: "expected a valid start token, got \"# \" (\"INVALID\") while parsing: \"# \"",
},
{
input: "# TYPE \n#EOF\n",
err: "expected metric name after TYPE, got \"INVALID\"",
err: "expected metric name after TYPE, got \"\\n\" (\"INVALID\") while parsing: \"# TYPE \\n\"",
},
{
input: "# TYPE m\n#EOF\n",
@ -349,19 +377,19 @@ func TestOpenMetricsParseErrors(t *testing.T) {
},
{
input: "# UNIT metric suffix\n#EOF\n",
err: "unit not a suffix of metric \"metric\"",
err: "unit \"suffix\" not a suffix of metric \"metric\"",
},
{
input: "# UNIT metricsuffix suffix\n#EOF\n",
err: "unit not a suffix of metric \"metricsuffix\"",
err: "unit \"suffix\" not a suffix of metric \"metricsuffix\"",
},
{
input: "# UNIT m suffix\n#EOF\n",
err: "unit not a suffix of metric \"m\"",
err: "unit \"suffix\" not a suffix of metric \"m\"",
},
{
input: "# UNIT \n#EOF\n",
err: "expected metric name after UNIT, got \"INVALID\"",
err: "expected metric name after UNIT, got \"\\n\" (\"INVALID\") while parsing: \"# UNIT \\n\"",
},
{
input: "# UNIT m\n#EOF\n",
@ -369,7 +397,7 @@ func TestOpenMetricsParseErrors(t *testing.T) {
},
{
input: "# HELP \n#EOF\n",
err: "expected metric name after HELP, got \"INVALID\"",
err: "expected metric name after HELP, got \"\\n\" (\"INVALID\") while parsing: \"# HELP \\n\"",
},
{
input: "# HELP m\n#EOF\n",
@ -377,27 +405,27 @@ func TestOpenMetricsParseErrors(t *testing.T) {
},
{
input: "a\t1\n#EOF\n",
err: "expected value after metric, got \"INVALID\"",
err: "expected value after metric, got \"\\t\" (\"INVALID\") while parsing: \"a\\t\"",
},
{
input: "a 1\t2\n#EOF\n",
err: "strconv.ParseFloat: parsing \"1\\t2\": invalid syntax",
err: "strconv.ParseFloat: parsing \"1\\t2\": invalid syntax while parsing: \"a 1\\t2\"",
},
{
input: "a 1 2 \n#EOF\n",
err: "expected next entry after timestamp, got \"INVALID\"",
err: "expected next entry after timestamp, got \" \\n\" (\"INVALID\") while parsing: \"a 1 2 \\n\"",
},
{
input: "a 1 2 #\n#EOF\n",
err: "expected next entry after timestamp, got \"TIMESTAMP\"",
err: "expected next entry after timestamp, got \" #\\n\" (\"TIMESTAMP\") while parsing: \"a 1 2 #\\n\"",
},
{
input: "a 1 1z\n#EOF\n",
err: "strconv.ParseFloat: parsing \"1z\": invalid syntax",
err: "strconv.ParseFloat: parsing \"1z\": invalid syntax while parsing: \"a 1 1z\"",
},
{
input: " # EOF\n",
err: "\"INVALID\" \" \" is not a valid start token",
err: "expected a valid start token, got \" \" (\"INVALID\") while parsing: \" \"",
},
{
input: "# EOF\na 1",
@ -413,7 +441,7 @@ func TestOpenMetricsParseErrors(t *testing.T) {
},
{
input: "#\tTYPE c counter\n",
err: "\"INVALID\" \"\\t\" is not a valid start token",
err: "expected a valid start token, got \"#\\t\" (\"INVALID\") while parsing: \"#\\t\"",
},
{
input: "# TYPE c counter\n",
@ -421,135 +449,131 @@ func TestOpenMetricsParseErrors(t *testing.T) {
},
{
input: "a 1 1 1\n# EOF\n",
err: "expected next entry after timestamp, got \"TIMESTAMP\"",
err: "expected next entry after timestamp, got \" 1\\n\" (\"TIMESTAMP\") while parsing: \"a 1 1 1\\n\"",
},
{
input: "a{b='c'} 1\n# EOF\n",
err: "expected label value, got \"INVALID\"",
err: "expected label value, got \"'\" (\"INVALID\") while parsing: \"a{b='\"",
},
{
input: "a{b=\"c\",} 1\n# EOF\n",
err: "expected label name, got \"BCLOSE\"",
err: "expected label name, got \"} \" (\"BCLOSE\") while parsing: \"a{b=\\\"c\\\",} \"",
},
{
input: "a{,b=\"c\"} 1\n# EOF\n",
err: "expected label name or left brace, got \"COMMA\"",
err: "expected label name or left brace, got \",b\" (\"COMMA\") while parsing: \"a{,b\"",
},
{
input: "a{b=\"c\"d=\"e\"} 1\n# EOF\n",
err: "expected comma, got \"LNAME\"",
err: "expected comma, got \"d=\" (\"LNAME\") while parsing: \"a{b=\\\"c\\\"d=\"",
},
{
input: "a{b=\"c\",,d=\"e\"} 1\n# EOF\n",
err: "expected label name, got \"COMMA\"",
err: "expected label name, got \",d\" (\"COMMA\") while parsing: \"a{b=\\\"c\\\",,d\"",
},
{
input: "a{b=\n# EOF\n",
err: "expected label value, got \"INVALID\"",
err: "expected label value, got \"\\n\" (\"INVALID\") while parsing: \"a{b=\\n\"",
},
{
input: "a{\xff=\"foo\"} 1\n# EOF\n",
err: "expected label name or left brace, got \"INVALID\"",
err: "expected label name or left brace, got \"\\xff\" (\"INVALID\") while parsing: \"a{\\xff\"",
},
{
input: "a{b=\"\xff\"} 1\n# EOF\n",
err: "invalid UTF-8 label value",
err: "invalid UTF-8 label value: \"\\\"\\xff\\\"\"",
},
{
input: "a true\n",
err: "strconv.ParseFloat: parsing \"true\": invalid syntax",
err: "strconv.ParseFloat: parsing \"true\": invalid syntax while parsing: \"a true\"",
},
{
input: "something_weird{problem=\"\n# EOF\n",
err: "expected label value, got \"INVALID\"",
err: "expected label value, got \"\\\"\\n\" (\"INVALID\") while parsing: \"something_weird{problem=\\\"\\n\"",
},
{
input: "empty_label_name{=\"\"} 0\n# EOF\n",
err: "expected label name or left brace, got \"EQUAL\"",
err: "expected label name or left brace, got \"=\\\"\" (\"EQUAL\") while parsing: \"empty_label_name{=\\\"\"",
},
{
input: "foo 1_2\n\n# EOF\n",
err: "unsupported character in float",
err: "unsupported character in float while parsing: \"foo 1_2\"",
},
{
input: "foo 0x1p-3\n\n# EOF\n",
err: "unsupported character in float",
err: "unsupported character in float while parsing: \"foo 0x1p-3\"",
},
{
input: "foo 0x1P-3\n\n# EOF\n",
err: "unsupported character in float",
err: "unsupported character in float while parsing: \"foo 0x1P-3\"",
},
{
input: "foo 0 1_2\n\n# EOF\n",
err: "unsupported character in float",
err: "unsupported character in float while parsing: \"foo 0 1_2\"",
},
{
input: "custom_metric_total 1 # {aa=bb}\n# EOF\n",
err: "expected label value, got \"INVALID\"",
err: "expected label value, got \"b\" (\"INVALID\") while parsing: \"custom_metric_total 1 # {aa=b\"",
},
{
input: "custom_metric_total 1 # {aa=\"bb\"}\n# EOF\n",
err: "expected value after exemplar labels, got \"INVALID\"",
err: "expected value after exemplar labels, got \"\\n\" (\"INVALID\") while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\"}\\n\"",
},
{
input: `custom_metric_total 1 # {aa="bb"}`,
err: "expected value after exemplar labels, got \"EOF\"",
},
{
input: `custom_metric 1 # {aa="bb"}`,
err: "metric name custom_metric does not support exemplars",
err: "expected value after exemplar labels, got \"}\" (\"EOF\") while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\"}\"",
},
{
input: `custom_metric_total 1 # {aa="bb",,cc="dd"} 1`,
err: "expected label name, got \"COMMA\"",
err: "expected label name, got \",c\" (\"COMMA\") while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\",,c\"",
},
{
input: `custom_metric_total 1 # {aa="bb"} 1_2`,
err: "unsupported character in float",
err: "unsupported character in float while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\"} 1_2\"",
},
{
input: `custom_metric_total 1 # {aa="bb"} 0x1p-3`,
err: "unsupported character in float",
err: "unsupported character in float while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\"} 0x1p-3\"",
},
{
input: `custom_metric_total 1 # {aa="bb"} true`,
err: "strconv.ParseFloat: parsing \"true\": invalid syntax",
err: "strconv.ParseFloat: parsing \"true\": invalid syntax while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\"} true\"",
},
{
input: `custom_metric_total 1 # {aa="bb",cc=}`,
err: "expected label value, got \"INVALID\"",
err: "expected label value, got \"}\" (\"INVALID\") while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\",cc=}\"",
},
{
input: `custom_metric_total 1 # {aa=\"\xff\"} 9.0`,
err: "expected label value, got \"INVALID\"",
err: "expected label value, got \"\\\\\" (\"INVALID\") while parsing: \"custom_metric_total 1 # {aa=\\\\\"",
},
{
input: `{b="c",} 1`,
err: `"INVALID" "{" is not a valid start token`,
err: "expected a valid start token, got \"{\" (\"INVALID\") while parsing: \"{\"",
},
{
input: `a 1 NaN`,
err: `invalid timestamp`,
err: `invalid timestamp NaN`,
},
{
input: `a 1 -Inf`,
err: `invalid timestamp`,
err: `invalid timestamp -Inf`,
},
{
input: `a 1 Inf`,
err: `invalid timestamp`,
err: `invalid timestamp +Inf`,
},
{
input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 NaN",
err: `invalid exemplar timestamp`,
err: `invalid exemplar timestamp NaN`,
},
{
input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 -Inf",
err: `invalid exemplar timestamp`,
err: `invalid exemplar timestamp -Inf`,
},
{
input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 Inf",
err: `invalid exemplar timestamp`,
err: `invalid exemplar timestamp +Inf`,
},
}
@ -586,35 +610,35 @@ func TestOMNullByteHandling(t *testing.T) {
},
{
input: "a{b=\x00\"ssss\"} 1\n# EOF\n",
err: "expected label value, got \"INVALID\"",
err: "expected label value, got \"\\x00\" (\"INVALID\") while parsing: \"a{b=\\x00\"",
},
{
input: "a{b=\"\x00",
err: "expected label value, got \"INVALID\"",
err: "expected label value, got \"\\\"\\x00\" (\"INVALID\") while parsing: \"a{b=\\\"\\x00\"",
},
{
input: "a{b\x00=\"hiih\"} 1",
err: "expected equal, got \"INVALID\"",
err: "expected equal, got \"\\x00\" (\"INVALID\") while parsing: \"a{b\\x00\"",
},
{
input: "a\x00{b=\"ddd\"} 1",
err: "expected value after metric, got \"INVALID\"",
err: "expected value after metric, got \"\\x00\" (\"INVALID\") while parsing: \"a\\x00\"",
},
{
input: "#",
err: "\"INVALID\" \" \" is not a valid start token",
err: "expected a valid start token, got \"#\" (\"INVALID\") while parsing: \"#\"",
},
{
input: "# H",
err: "\"INVALID\" \" \" is not a valid start token",
err: "expected a valid start token, got \"# H\" (\"INVALID\") while parsing: \"# H\"",
},
{
input: "custom_metric_total 1 # {b=\x00\"ssss\"} 1\n",
err: "expected label value, got \"INVALID\"",
err: "expected label value, got \"\\x00\" (\"INVALID\") while parsing: \"custom_metric_total 1 # {b=\\x00\"",
},
{
input: "custom_metric_total 1 # {b=\"\x00ss\"} 1\n",
err: "expected label value, got \"INVALID\"",
err: "expected label value, got \"\\\"\\x00\" (\"INVALID\") while parsing: \"custom_metric_total 1 # {b=\\\"\\x00\"",
},
}

View file

@ -254,8 +254,12 @@ func (p *PromParser) nextToken() token {
}
}
func parseError(exp string, got token) error {
return fmt.Errorf("%s, got %q", exp, got)
func (p *PromParser) parseError(exp string, got token) error {
e := p.l.i + 1
if len(p.l.b) < e {
e = len(p.l.b)
}
return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e])
}
// Next advances the parser to the next sample. It returns false if no
@ -278,7 +282,7 @@ func (p *PromParser) Next() (Entry, error) {
case tMName:
p.offsets = append(p.offsets, p.l.start, p.l.i)
default:
return EntryInvalid, parseError("expected metric name after "+t.String(), t2)
return EntryInvalid, p.parseError("expected metric name after "+t.String(), t2)
}
switch t2 := p.nextToken(); t2 {
case tText:
@ -308,11 +312,11 @@ func (p *PromParser) Next() (Entry, error) {
}
case tHelp:
if !utf8.Valid(p.text) {
return EntryInvalid, fmt.Errorf("help text is not a valid utf8 string")
return EntryInvalid, fmt.Errorf("help text %q is not a valid utf8 string", p.text)
}
}
if t := p.nextToken(); t != tLinebreak {
return EntryInvalid, parseError("linebreak expected after metadata", t)
return EntryInvalid, p.parseError("linebreak expected after metadata", t)
}
switch t {
case tHelp:
@ -323,7 +327,7 @@ func (p *PromParser) Next() (Entry, error) {
case tComment:
p.text = p.l.buf()
if t := p.nextToken(); t != tLinebreak {
return EntryInvalid, parseError("linebreak expected after comment", t)
return EntryInvalid, p.parseError("linebreak expected after comment", t)
}
return EntryComment, nil
@ -340,10 +344,10 @@ func (p *PromParser) Next() (Entry, error) {
t2 = p.nextToken()
}
if t2 != tValue {
return EntryInvalid, parseError("expected value after metric", t2)
return EntryInvalid, p.parseError("expected value after metric", t2)
}
if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil {
return EntryInvalid, err
return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i])
}
// Ensure canonical NaN value.
if math.IsNaN(p.val) {
@ -356,18 +360,18 @@ func (p *PromParser) Next() (Entry, error) {
case tTimestamp:
p.hasTS = true
if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil {
return EntryInvalid, err
return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i])
}
if t2 := p.nextToken(); t2 != tLinebreak {
return EntryInvalid, parseError("expected next entry after timestamp", t2)
return EntryInvalid, p.parseError("expected next entry after timestamp", t2)
}
default:
return EntryInvalid, parseError("expected timestamp or new record", t)
return EntryInvalid, p.parseError("expected timestamp or new record", t)
}
return EntrySeries, nil
default:
err = fmt.Errorf("%q is not a valid start token", t)
err = p.parseError("expected a valid start token", t)
}
return EntryInvalid, err
}
@ -380,18 +384,18 @@ func (p *PromParser) parseLVals() error {
return nil
case tLName:
default:
return parseError("expected label name", t)
return p.parseError("expected label name", t)
}
p.offsets = append(p.offsets, p.l.start, p.l.i)
if t := p.nextToken(); t != tEqual {
return parseError("expected equal", t)
return p.parseError("expected equal", t)
}
if t := p.nextToken(); t != tLValue {
return parseError("expected label value", t)
return p.parseError("expected label value", t)
}
if !utf8.Valid(p.l.buf()) {
return fmt.Errorf("invalid UTF-8 label value")
return fmt.Errorf("invalid UTF-8 label value: %q", p.l.buf())
}
// The promlexer ensures the value string is quoted. Strip first

View file

@ -219,63 +219,63 @@ func TestPromParseErrors(t *testing.T) {
}{
{
input: "a",
err: "expected value after metric, got \"INVALID\"",
err: "expected value after metric, got \"\\n\" (\"INVALID\") while parsing: \"a\\n\"",
},
{
input: "a{b='c'} 1\n",
err: "expected label value, got \"INVALID\"",
err: "expected label value, got \"'\" (\"INVALID\") while parsing: \"a{b='\"",
},
{
input: "a{b=\n",
err: "expected label value, got \"INVALID\"",
err: "expected label value, got \"\\n\" (\"INVALID\") while parsing: \"a{b=\\n\"",
},
{
input: "a{\xff=\"foo\"} 1\n",
err: "expected label name, got \"INVALID\"",
err: "expected label name, got \"\\xff\" (\"INVALID\") while parsing: \"a{\\xff\"",
},
{
input: "a{b=\"\xff\"} 1\n",
err: "invalid UTF-8 label value",
err: "invalid UTF-8 label value: \"\\\"\\xff\\\"\"",
},
{
input: "a true\n",
err: "strconv.ParseFloat: parsing \"true\": invalid syntax",
err: "strconv.ParseFloat: parsing \"true\": invalid syntax while parsing: \"a true\"",
},
{
input: "something_weird{problem=\"",
err: "expected label value, got \"INVALID\"",
err: "expected label value, got \"\\\"\\n\" (\"INVALID\") while parsing: \"something_weird{problem=\\\"\\n\"",
},
{
input: "empty_label_name{=\"\"} 0",
err: "expected label name, got \"EQUAL\"",
err: "expected label name, got \"=\\\"\" (\"EQUAL\") while parsing: \"empty_label_name{=\\\"\"",
},
{
input: "foo 1_2\n",
err: "unsupported character in float",
err: "unsupported character in float while parsing: \"foo 1_2\"",
},
{
input: "foo 0x1p-3\n",
err: "unsupported character in float",
err: "unsupported character in float while parsing: \"foo 0x1p-3\"",
},
{
input: "foo 0x1P-3\n",
err: "unsupported character in float",
err: "unsupported character in float while parsing: \"foo 0x1P-3\"",
},
{
input: "foo 0 1_2\n",
err: "expected next entry after timestamp, got \"INVALID\"",
err: "expected next entry after timestamp, got \"_\" (\"INVALID\") while parsing: \"foo 0 1_\"",
},
{
input: `{a="ok"} 1`,
err: `"INVALID" is not a valid start token`,
err: "expected a valid start token, got \"{\" (\"INVALID\") while parsing: \"{\"",
},
{
input: "# TYPE #\n#EOF\n",
err: "expected metric name after TYPE, got \"INVALID\"",
err: "expected metric name after TYPE, got \"#\" (\"INVALID\") while parsing: \"# TYPE #\"",
},
{
input: "# HELP #\n#EOF\n",
err: "expected metric name after HELP, got \"INVALID\"",
err: "expected metric name after HELP, got \"#\" (\"INVALID\") while parsing: \"# HELP #\"",
},
}
@ -313,23 +313,23 @@ func TestPromNullByteHandling(t *testing.T) {
},
{
input: "a{b=\x00\"ssss\"} 1\n",
err: "expected label value, got \"INVALID\"",
err: "expected label value, got \"\\x00\" (\"INVALID\") while parsing: \"a{b=\\x00\"",
},
{
input: "a{b=\"\x00",
err: "expected label value, got \"INVALID\"",
err: "expected label value, got \"\\\"\\x00\\n\" (\"INVALID\") while parsing: \"a{b=\\\"\\x00\\n\"",
},
{
input: "a{b\x00=\"hiih\"} 1",
err: "expected equal, got \"INVALID\"",
err: "expected equal, got \"\\x00\" (\"INVALID\") while parsing: \"a{b\\x00\"",
},
{
input: "a\x00{b=\"ddd\"} 1",
err: "expected value after metric, got \"INVALID\"",
err: "expected value after metric, got \"\\x00\" (\"INVALID\") while parsing: \"a\\x00\"",
},
{
input: "a 0 1\x00",
err: "expected next entry after timestamp, got \"INVALID\"",
err: "expected next entry after timestamp, got \"\\x00\" (\"INVALID\") while parsing: \"a 0 1\\x00\"",
},
}

View file

@ -2029,7 +2029,7 @@ var testExpr = []struct {
{
input: `foo[5y1hs]`,
fail: true,
errMsg: "not a valid duration string: \"5y1hs\"",
errMsg: "unknown unit \"hs\" in duration \"5y1hs\"",
},
{
input: `foo[5m1h]`,

View file

@ -82,7 +82,7 @@ func (s Series) String() string {
func (s Series) MarshalJSON() ([]byte, error) {
// Note that this is rather inefficient because it re-creates the whole
// series, just separated by Histogram Points and Value Points. For API
// purposes, there is a more efficcient jsoniter implementation in
// purposes, there is a more efficient jsoniter implementation in
// web/api/v1/api.go.
series := struct {
M labels.Labels `json:"metric"`

View file

@ -919,8 +919,8 @@ func TestAlertingEvalWithOrigin(t *testing.T) {
time.Second,
time.Minute,
lbs,
nil,
nil,
labels.EmptyLabels(),
labels.EmptyLabels(),
"",
true, log.NewNopLogger(),
)

View file

@ -29,7 +29,7 @@ import (
type unknownRule struct{}
func (u unknownRule) Name() string { return "" }
func (u unknownRule) Labels() labels.Labels { return nil }
func (u unknownRule) Labels() labels.Labels { return labels.EmptyLabels() }
func (u unknownRule) Eval(ctx context.Context, time time.Time, queryFunc QueryFunc, url *url.URL, i int) (promql.Vector, error) {
return nil, nil
}

View file

@ -79,19 +79,21 @@ func (rule *RecordingRule) Eval(ctx context.Context, ts time.Time, query QueryFu
if err != nil {
return nil, err
}
// Override the metric name and labels.
lb := labels.NewBuilder(labels.EmptyLabels())
for i := range vector {
sample := &vector[i]
lb := labels.NewBuilder(sample.Metric)
lb.Reset(sample.Metric)
lb.Set(labels.MetricName, rule.name)
rule.labels.Range(func(l labels.Label) {
lb.Set(l.Name, l.Value)
})
sample.Metric = lb.Labels(labels.EmptyLabels())
sample.Metric = lb.Labels(sample.Metric)
}
// Check that the rule does not produce identical metrics after applying

View file

@ -27,59 +27,126 @@ import (
"github.com/prometheus/prometheus/util/teststorage"
)
var (
ruleEvaluationTime = time.Unix(0, 0).UTC()
exprWithMetricName, _ = parser.ParseExpr(`sort(metric)`)
exprWithoutMetricName, _ = parser.ParseExpr(`sort(metric + metric)`)
)
var ruleEvalTestScenarios = []struct {
name string
ruleLabels labels.Labels
expr parser.Expr
expected promql.Vector
}{
{
name: "no labels in recording rule, metric name in query result",
ruleLabels: labels.EmptyLabels(),
expr: exprWithMetricName,
expected: promql.Vector{
promql.Sample{
Metric: labels.FromStrings("__name__", "test_rule", "label_a", "1", "label_b", "3"),
Point: promql.Point{V: 1, T: timestamp.FromTime(ruleEvaluationTime)},
},
promql.Sample{
Metric: labels.FromStrings("__name__", "test_rule", "label_a", "2", "label_b", "4"),
Point: promql.Point{V: 10, T: timestamp.FromTime(ruleEvaluationTime)},
},
},
},
{
name: "only new labels in recording rule, metric name in query result",
ruleLabels: labels.FromStrings("extra_from_rule", "foo"),
expr: exprWithMetricName,
expected: promql.Vector{
promql.Sample{
Metric: labels.FromStrings("__name__", "test_rule", "label_a", "1", "label_b", "3", "extra_from_rule", "foo"),
Point: promql.Point{V: 1, T: timestamp.FromTime(ruleEvaluationTime)},
},
promql.Sample{
Metric: labels.FromStrings("__name__", "test_rule", "label_a", "2", "label_b", "4", "extra_from_rule", "foo"),
Point: promql.Point{V: 10, T: timestamp.FromTime(ruleEvaluationTime)},
},
},
},
{
name: "some replacement labels in recording rule, metric name in query result",
ruleLabels: labels.FromStrings("label_a", "from_rule"),
expr: exprWithMetricName,
expected: promql.Vector{
promql.Sample{
Metric: labels.FromStrings("__name__", "test_rule", "label_a", "from_rule", "label_b", "3"),
Point: promql.Point{V: 1, T: timestamp.FromTime(ruleEvaluationTime)},
},
promql.Sample{
Metric: labels.FromStrings("__name__", "test_rule", "label_a", "from_rule", "label_b", "4"),
Point: promql.Point{V: 10, T: timestamp.FromTime(ruleEvaluationTime)},
},
},
},
{
name: "no labels in recording rule, no metric name in query result",
ruleLabels: labels.EmptyLabels(),
expr: exprWithoutMetricName,
expected: promql.Vector{
promql.Sample{
Metric: labels.FromStrings("__name__", "test_rule", "label_a", "1", "label_b", "3"),
Point: promql.Point{V: 2, T: timestamp.FromTime(ruleEvaluationTime)},
},
promql.Sample{
Metric: labels.FromStrings("__name__", "test_rule", "label_a", "2", "label_b", "4"),
Point: promql.Point{V: 20, T: timestamp.FromTime(ruleEvaluationTime)},
},
},
},
}
func setUpRuleEvalTest(t require.TestingT) *promql.Test {
suite, err := promql.NewTest(t, `
load 1m
metric{label_a="1",label_b="3"} 1
metric{label_a="2",label_b="4"} 10
`)
require.NoError(t, err)
return suite
}
func TestRuleEval(t *testing.T) {
storage := teststorage.New(t)
defer storage.Close()
suite := setUpRuleEvalTest(t)
defer suite.Close()
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 10,
Timeout: 10 * time.Second,
}
require.NoError(t, suite.Run())
engine := promql.NewEngine(opts)
ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx()
now := time.Now()
suite := []struct {
name string
expr parser.Expr
labels labels.Labels
result promql.Vector
err string
}{
{
name: "nolabels",
expr: &parser.NumberLiteral{Val: 1},
labels: labels.Labels{},
result: promql.Vector{promql.Sample{
Metric: labels.FromStrings("__name__", "nolabels"),
Point: promql.Point{V: 1, T: timestamp.FromTime(now)},
}},
},
{
name: "labels",
expr: &parser.NumberLiteral{Val: 1},
labels: labels.FromStrings("foo", "bar"),
result: promql.Vector{promql.Sample{
Metric: labels.FromStrings("__name__", "labels", "foo", "bar"),
Point: promql.Point{V: 1, T: timestamp.FromTime(now)},
}},
},
}
for _, test := range suite {
rule := NewRecordingRule(test.name, test.expr, test.labels)
result, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil, 0)
if test.err == "" {
for _, scenario := range ruleEvalTestScenarios {
t.Run(scenario.name, func(t *testing.T) {
rule := NewRecordingRule("test_rule", scenario.expr, scenario.ruleLabels)
result, err := rule.Eval(suite.Context(), ruleEvaluationTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
require.NoError(t, err)
} else {
require.Equal(t, test.err, err.Error())
}
require.Equal(t, test.result, result)
require.Equal(t, scenario.expected, result)
})
}
}
func BenchmarkRuleEval(b *testing.B) {
suite := setUpRuleEvalTest(b)
defer suite.Close()
require.NoError(b, suite.Run())
for _, scenario := range ruleEvalTestScenarios {
b.Run(scenario.name, func(b *testing.B) {
rule := NewRecordingRule("test_rule", scenario.expr, scenario.ruleLabels)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := rule.Eval(suite.Context(), ruleEvaluationTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
if err != nil {
require.NoError(b, err)
}
}
})
}
}

View file

@ -270,8 +270,13 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error {
m.mtxScrape.Lock()
defer m.mtxScrape.Unlock()
scfgs, err := cfg.GetScrapeConfigs()
if err != nil {
return err
}
c := make(map[string]*config.ScrapeConfig)
for _, scfg := range cfg.ScrapeConfigs {
for _, scfg := range scfgs {
c[scfg.JobName] = scfg
}
m.scrapeConfigs = c

View file

@ -263,7 +263,7 @@ func TestPopulateLabels(t *testing.T) {
},
res: labels.EmptyLabels(),
resOrig: labels.EmptyLabels(),
err: "error parsing scrape interval: not a valid duration string: \"2notseconds\"",
err: "error parsing scrape interval: unknown unit \"notseconds\" in duration \"2notseconds\"",
},
// Invalid duration in timeout label.
{
@ -280,7 +280,7 @@ func TestPopulateLabels(t *testing.T) {
},
res: labels.EmptyLabels(),
resOrig: labels.EmptyLabels(),
err: "error parsing scrape timeout: not a valid duration string: \"2notseconds\"",
err: "error parsing scrape timeout: unknown unit \"notseconds\" in duration \"2notseconds\"",
},
// 0 interval in timeout label.
{

View file

@ -328,7 +328,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
options.PassMetadataInContext,
)
}
targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit))
return sp, nil
}

View file

@ -2735,7 +2735,7 @@ func TestReuseScrapeCache(t *testing.T) {
HonorTimestamps: true,
SampleLimit: 400,
HTTPClientConfig: config_util.HTTPClientConfig{
ProxyURL: config_util.URL{URL: proxyURL},
ProxyConfig: config_util.ProxyConfig{ProxyURL: config_util.URL{URL: proxyURL}},
},
ScrapeInterval: model.Duration(5 * time.Second),
ScrapeTimeout: model.Duration(15 * time.Second),

View file

@ -75,12 +75,17 @@ function bumpVersion() {
if [[ "${version}" == v* ]]; then
version="${version:1}"
fi
# increase the version on all packages
npm version "${version}" --workspaces
# upgrade the @prometheus-io/* dependencies on all packages
for workspace in ${workspaces}; do
sed -E -i "s|(\"@prometheus-io/.+\": )\".+\"|\1\"\^${version}\"|" "${workspace}"/package.json
# sed -i syntax is different on mac and linux
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -E -i "" "s|(\"@prometheus-io/.+\": )\".+\"|\1\"${version}\"|" "${workspace}"/package.json
else
sed -E -i "s|(\"@prometheus-io/.+\": )\".+\"|\1\"${version}\"|" "${workspace}"/package.json
fi
done
# increase the version on all packages
npm version "${version}" --workspaces
}
if [[ "$1" == "--copy" ]]; then

View file

@ -109,10 +109,11 @@ func TestClientRetryAfter(t *testing.T) {
RetryOnRateLimit: false,
}
var recErr RecoverableError
c := getClient(conf)
err = c.Store(context.Background(), []byte{})
_, ok := err.(RecoverableError)
require.False(t, ok, "Recoverable error not expected.")
require.False(t, errors.As(err, &recErr), "Recoverable error not expected.")
conf = &ClientConfig{
URL: &config_util.URL{URL: serverURL},
@ -122,8 +123,7 @@ func TestClientRetryAfter(t *testing.T) {
c = getClient(conf)
err = c.Store(context.Background(), []byte{})
_, ok = err.(RecoverableError)
require.True(t, ok, "Recoverable error was expected.")
require.True(t, errors.As(err, &recErr), "Recoverable error was expected.")
}
func TestRetryAfterDuration(t *testing.T) {

View file

@ -894,6 +894,10 @@ func (t *QueueManager) releaseLabels(ls labels.Labels) {
// processExternalLabels merges externalLabels into ls. If ls contains
// a label in externalLabels, the value in ls wins.
func processExternalLabels(ls labels.Labels, externalLabels []labels.Label) labels.Labels {
if len(externalLabels) == 0 {
return ls
}
b := labels.NewScratchBuilder(ls.Len() + len(externalLabels))
j := 0
ls.Range(func(l labels.Label) {
@ -1565,8 +1569,8 @@ func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l
}
// If the error is unrecoverable, we should not retry.
backoffErr, ok := err.(RecoverableError)
if !ok {
var backoffErr RecoverableError
if !errors.As(err, &backoffErr) {
return err
}

View file

@ -30,7 +30,7 @@ import (
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc/credentials"

View file

@ -125,6 +125,10 @@ type Options struct {
// WALCompression will turn on Snappy compression for records on the WAL.
WALCompression bool
// Maximum number of CPUs that can simultaneously processes WAL replay.
// If it is <=0, then GOMAXPROCS is used.
WALReplayConcurrency int
// StripeSize is the size in entries of the series hash map. Reducing the size will save memory but impact performance.
StripeSize int
@ -782,6 +786,9 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
headOpts.EnableNativeHistograms.Store(opts.EnableNativeHistograms)
headOpts.OutOfOrderTimeWindow.Store(opts.OutOfOrderTimeWindow)
headOpts.OutOfOrderCapMax.Store(opts.OutOfOrderCapMax)
if opts.WALReplayConcurrency > 0 {
headOpts.WALReplayConcurrency = opts.WALReplayConcurrency
}
if opts.IsolationDisabled {
// We only override this flag if isolation is disabled at DB level. We use the default otherwise.
headOpts.IsolationDisabled = opts.IsolationDisabled

View file

@ -18,6 +18,7 @@ import (
"io"
"math"
"path/filepath"
"runtime"
"sync"
"time"
@ -58,6 +59,8 @@ var (
// defaultIsolationDisabled is true if isolation is disabled by default.
defaultIsolationDisabled = false
defaultWALReplayConcurrency = runtime.GOMAXPROCS(0)
)
// Head handles reads and writes of time series data within a time window.
@ -155,6 +158,11 @@ type HeadOptions struct {
EnableMemorySnapshotOnShutdown bool
IsolationDisabled bool
// Maximum number of CPUs that can simultaneously processes WAL replay.
// The default value is GOMAXPROCS.
// If it is set to a negative value or zero, the default value is used.
WALReplayConcurrency int
}
const (
@ -172,6 +180,7 @@ func DefaultHeadOptions() *HeadOptions {
StripeSize: DefaultStripeSize,
SeriesCallback: &noopSeriesLifecycleCallback{},
IsolationDisabled: defaultIsolationDisabled,
WALReplayConcurrency: defaultWALReplayConcurrency,
}
ho.OutOfOrderCapMax.Store(DefaultOutOfOrderCapMax)
return ho
@ -247,6 +256,10 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wlog.WL, opts *Hea
opts.ChunkPool = chunkenc.NewPool()
}
if opts.WALReplayConcurrency <= 0 {
opts.WALReplayConcurrency = defaultWALReplayConcurrency
}
h.chunkDiskMapper, err = chunks.NewChunkDiskMapper(
r,
mmappedChunksDir(opts.ChunkDirRoot),
@ -502,6 +515,17 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics {
}, func() float64 {
return float64(h.iso.lastAppendID())
}),
prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_head_chunks_storage_size_bytes",
Help: "Size of the chunks_head directory.",
}, func() float64 {
val, err := h.chunkDiskMapper.Size()
if err != nil {
level.Error(h.logger).Log("msg", "Failed to calculate size of \"chunks_head\" dir",
"err", err.Error())
}
return float64(val)
}),
)
}
return m
@ -568,20 +592,47 @@ func (h *Head) Init(minValidTime int64) error {
if h.opts.EnableMemorySnapshotOnShutdown {
level.Info(h.logger).Log("msg", "Chunk snapshot is enabled, replaying from the snapshot")
var err error
snapIdx, snapOffset, refSeries, err = h.loadChunkSnapshot()
if err != nil {
snapIdx, snapOffset = -1, 0
refSeries = make(map[chunks.HeadSeriesRef]*memSeries)
// If there are any WAL files, there should be at least one WAL file with an index that is current or newer
// than the snapshot index. If the WAL index is behind the snapshot index somehow, the snapshot is assumed
// to be outdated.
loadSnapshot := true
if h.wal != nil {
_, endAt, err := wlog.Segments(h.wal.Dir())
if err != nil {
return errors.Wrap(err, "finding WAL segments")
}
h.metrics.snapshotReplayErrorTotal.Inc()
level.Error(h.logger).Log("msg", "Failed to load chunk snapshot", "err", err)
// We clear the partially loaded data to replay fresh from the WAL.
if err := h.resetInMemoryState(); err != nil {
return err
_, idx, _, err := LastChunkSnapshot(h.opts.ChunkDirRoot)
if err != nil && err != record.ErrNotFound {
level.Error(h.logger).Log("msg", "Could not find last snapshot", "err", err)
}
if err == nil && endAt < idx {
loadSnapshot = false
level.Warn(h.logger).Log("msg", "Last WAL file is behind snapshot, removing snapshots")
if err := DeleteChunkSnapshots(h.opts.ChunkDirRoot, math.MaxInt, math.MaxInt); err != nil {
level.Error(h.logger).Log("msg", "Error while deleting snapshot directories", "err", err)
}
}
}
if loadSnapshot {
var err error
snapIdx, snapOffset, refSeries, err = h.loadChunkSnapshot()
if err == nil {
level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", time.Since(start).String())
}
if err != nil {
snapIdx, snapOffset = -1, 0
refSeries = make(map[chunks.HeadSeriesRef]*memSeries)
h.metrics.snapshotReplayErrorTotal.Inc()
level.Error(h.logger).Log("msg", "Failed to load chunk snapshot", "err", err)
// We clear the partially loaded data to replay fresh from the WAL.
if err := h.resetInMemoryState(); err != nil {
return err
}
}
}
level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", time.Since(start).String())
}
mmapChunkReplayStart := time.Now()

View file

@ -367,7 +367,7 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
}
s.Unlock()
if delta > 0 {
a.head.metrics.oooHistogram.Observe(float64(delta))
a.head.metrics.oooHistogram.Observe(float64(delta) / 1000)
}
if err != nil {
switch err {

View file

@ -304,12 +304,10 @@ func (h *headChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
s.Unlock()
return &safeChunk{
Chunk: c.chunk,
s: s,
cid: cid,
isoState: h.isoState,
chunkDiskMapper: h.head.chunkDiskMapper,
memChunkPool: &h.head.memChunkPool,
Chunk: c.chunk,
s: s,
cid: cid,
isoState: h.isoState,
}, nil
}
@ -600,43 +598,24 @@ func (b boundedIterator) Seek(t int64) chunkenc.ValueType {
// safeChunk makes sure that the chunk can be accessed without a race condition
type safeChunk struct {
chunkenc.Chunk
s *memSeries
cid chunks.HeadChunkID
isoState *isolationState
chunkDiskMapper *chunks.ChunkDiskMapper
memChunkPool *sync.Pool
s *memSeries
cid chunks.HeadChunkID
isoState *isolationState
}
func (c *safeChunk) Iterator(reuseIter chunkenc.Iterator) chunkenc.Iterator {
c.s.Lock()
it := c.s.iterator(c.cid, c.isoState, c.chunkDiskMapper, c.memChunkPool, reuseIter)
it := c.s.iterator(c.cid, c.Chunk, c.isoState, reuseIter)
c.s.Unlock()
return it
}
// iterator returns a chunk iterator for the requested chunkID, or a NopIterator if the requested ID is out of range.
// It is unsafe to call this concurrently with s.append(...) without holding the series lock.
func (s *memSeries) iterator(id chunks.HeadChunkID, isoState *isolationState, chunkDiskMapper *chunks.ChunkDiskMapper, memChunkPool *sync.Pool, it chunkenc.Iterator) chunkenc.Iterator {
c, garbageCollect, err := s.chunk(id, chunkDiskMapper, memChunkPool)
// TODO(fabxc): Work around! An error will be returns when a querier have retrieved a pointer to a
// series's chunk, which got then garbage collected before it got
// accessed. We must ensure to not garbage collect as long as any
// readers still hold a reference.
if err != nil {
return chunkenc.NewNopIterator()
}
defer func() {
if garbageCollect {
// Set this to nil so that Go GC can collect it after it has been used.
// This should be done always at the end.
c.chunk = nil
memChunkPool.Put(c)
}
}()
func (s *memSeries) iterator(id chunks.HeadChunkID, c chunkenc.Chunk, isoState *isolationState, it chunkenc.Iterator) chunkenc.Iterator {
ix := int(id) - int(s.firstChunkID)
numSamples := c.chunk.NumSamples()
numSamples := c.NumSamples()
stopAfter := numSamples
if isoState != nil && !isoState.IsolationDisabled() {
@ -681,9 +660,9 @@ func (s *memSeries) iterator(id chunks.HeadChunkID, isoState *isolationState, ch
return chunkenc.NewNopIterator()
}
if stopAfter == numSamples {
return c.chunk.Iterator(it)
return c.Iterator(it)
}
return makeStopIterator(c.chunk, it, stopAfter)
return makeStopIterator(c, it, stopAfter)
}
// stopIterator wraps an Iterator, but only returns the first

View file

@ -537,11 +537,18 @@ func TestHead_ReadWAL(t *testing.T) {
require.NoError(t, c.Err())
return x
}
require.Equal(t, []sample{{100, 2, nil, nil}, {101, 5, nil, nil}}, expandChunk(s10.iterator(0, nil, head.chunkDiskMapper, nil, nil)))
require.Equal(t, []sample{{101, 6, nil, nil}}, expandChunk(s50.iterator(0, nil, head.chunkDiskMapper, nil, nil)))
c, _, err := s10.chunk(0, head.chunkDiskMapper, &head.memChunkPool)
require.NoError(t, err)
require.Equal(t, []sample{{100, 2, nil, nil}, {101, 5, nil, nil}}, expandChunk(c.chunk.Iterator(nil)))
c, _, err = s50.chunk(0, head.chunkDiskMapper, &head.memChunkPool)
require.NoError(t, err)
require.Equal(t, []sample{{101, 6, nil, nil}}, expandChunk(c.chunk.Iterator(nil)))
// The samples before the new series record should be discarded since a duplicate record
// is only possible when old samples were compacted.
require.Equal(t, []sample{{101, 7, nil, nil}}, expandChunk(s100.iterator(0, nil, head.chunkDiskMapper, nil, nil)))
c, _, err = s100.chunk(0, head.chunkDiskMapper, &head.memChunkPool)
require.NoError(t, err)
require.Equal(t, []sample{{101, 7, nil, nil}}, expandChunk(c.chunk.Iterator(nil)))
q, err := head.ExemplarQuerier(context.Background())
require.NoError(t, err)
@ -2566,7 +2573,13 @@ func TestIteratorSeekIntoBuffer(t *testing.T) {
require.True(t, ok, "sample append failed")
}
it := s.iterator(s.headChunkID(len(s.mmappedChunks)), nil, chunkDiskMapper, nil, nil)
c, _, err := s.chunk(0, chunkDiskMapper, &sync.Pool{
New: func() interface{} {
return &memChunk{}
},
})
require.NoError(t, err)
it := c.chunk.Iterator(nil)
// First point.
require.Equal(t, chunkenc.ValFloat, it.Seek(0))
@ -4773,3 +4786,58 @@ func TestGaugeFloatHistogramWALAndChunkHeader(t *testing.T) {
checkHeaders()
}
func TestSnapshotAheadOfWALError(t *testing.T) {
head, _ := newTestHead(t, 120*4, false, false)
head.opts.EnableMemorySnapshotOnShutdown = true
// Add a sample to fill WAL.
app := head.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("foo", "bar"), 10, 10)
require.NoError(t, err)
require.NoError(t, app.Commit())
// Increment snapshot index to create sufficiently large difference.
for i := 0; i < 2; i++ {
_, err = head.wal.NextSegment()
require.NoError(t, err)
}
require.NoError(t, head.Close()) // This will create a snapshot.
_, idx, _, err := LastChunkSnapshot(head.opts.ChunkDirRoot)
require.NoError(t, err)
require.Equal(t, 2, idx)
// Restart the WAL while keeping the old snapshot. The new head is created manually in this case in order
// to keep using the same snapshot directory instead of a random one.
require.NoError(t, os.RemoveAll(head.wal.Dir()))
head.opts.EnableMemorySnapshotOnShutdown = false
w, _ := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false)
head, err = NewHead(nil, nil, w, nil, head.opts, nil)
require.NoError(t, err)
// Add a sample to fill WAL.
app = head.Appender(context.Background())
_, err = app.Append(0, labels.FromStrings("foo", "bar"), 10, 10)
require.NoError(t, err)
require.NoError(t, app.Commit())
lastSegment, _, _ := w.LastSegmentAndOffset()
require.Equal(t, 0, lastSegment)
require.NoError(t, head.Close())
// New WAL is saved, but old snapshot still exists.
_, idx, _, err = LastChunkSnapshot(head.opts.ChunkDirRoot)
require.NoError(t, err)
require.Equal(t, 2, idx)
// Create new Head which should detect the incorrect index and delete the snapshot.
head.opts.EnableMemorySnapshotOnShutdown = true
w, _ = wlog.NewSize(nil, nil, head.wal.Dir(), 32768, false)
head, err = NewHead(nil, nil, w, nil, head.opts, nil)
require.NoError(t, err)
require.NoError(t, head.Init(math.MinInt64))
// Verify that snapshot directory does not exist anymore.
_, _, _, err = LastChunkSnapshot(head.opts.ChunkDirRoot)
require.Equal(t, record.ErrNotFound, err)
require.NoError(t, head.Close())
}

View file

@ -18,7 +18,6 @@ import (
"math"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
@ -65,13 +64,13 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
// Start workers that each process samples for a partition of the series ID space.
var (
wg sync.WaitGroup
n = runtime.GOMAXPROCS(0)
processors = make([]walSubsetProcessor, n)
concurrency = h.opts.WALReplayConcurrency
processors = make([]walSubsetProcessor, concurrency)
exemplarsInput chan record.RefExemplar
dec record.Decoder
shards = make([][]record.RefSample, n)
histogramShards = make([][]histogramRecord, n)
shards = make([][]record.RefSample, concurrency)
histogramShards = make([][]histogramRecord, concurrency)
decoded = make(chan interface{}, 10)
decodeErr, seriesCreationErr error
@ -116,7 +115,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
// For CorruptionErr ensure to terminate all workers before exiting.
_, ok := err.(*wlog.CorruptionErr)
if ok || seriesCreationErr != nil {
for i := 0; i < n; i++ {
for i := 0; i < concurrency; i++ {
processors[i].closeAndDrain()
}
close(exemplarsInput)
@ -124,8 +123,8 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
}
}()
wg.Add(n)
for i := 0; i < n; i++ {
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
processors[i].setup()
go func(wp *walSubsetProcessor) {
@ -276,7 +275,7 @@ Outer:
multiRef[walSeries.Ref] = mSeries.ref
}
idx := uint64(mSeries.ref) % uint64(n)
idx := uint64(mSeries.ref) % uint64(concurrency)
processors[idx].input <- walSubsetProcessorInputItem{walSeriesRef: walSeries.Ref, existingSeries: mSeries}
}
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
@ -293,7 +292,7 @@ Outer:
if len(samples) < m {
m = len(samples)
}
for i := 0; i < n; i++ {
for i := 0; i < concurrency; i++ {
if shards[i] == nil {
shards[i] = processors[i].reuseBuf()
}
@ -305,10 +304,10 @@ Outer:
if r, ok := multiRef[sam.Ref]; ok {
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(n)
mod := uint64(sam.Ref) % uint64(concurrency)
shards[mod] = append(shards[mod], sam)
}
for i := 0; i < n; i++ {
for i := 0; i < concurrency; i++ {
if len(shards[i]) > 0 {
processors[i].input <- walSubsetProcessorInputItem{samples: shards[i]}
shards[i] = nil
@ -351,7 +350,7 @@ Outer:
if len(samples) < m {
m = len(samples)
}
for i := 0; i < n; i++ {
for i := 0; i < concurrency; i++ {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
}
@ -363,10 +362,10 @@ Outer:
if r, ok := multiRef[sam.Ref]; ok {
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(n)
mod := uint64(sam.Ref) % uint64(concurrency)
histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, h: sam.H})
}
for i := 0; i < n; i++ {
for i := 0; i < concurrency; i++ {
if len(histogramShards[i]) > 0 {
processors[i].input <- walSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
histogramShards[i] = nil
@ -388,7 +387,7 @@ Outer:
if len(samples) < m {
m = len(samples)
}
for i := 0; i < n; i++ {
for i := 0; i < concurrency; i++ {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
}
@ -400,10 +399,10 @@ Outer:
if r, ok := multiRef[sam.Ref]; ok {
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(n)
mod := uint64(sam.Ref) % uint64(concurrency)
histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, fh: sam.FH})
}
for i := 0; i < n; i++ {
for i := 0; i < concurrency; i++ {
if len(histogramShards[i]) > 0 {
processors[i].input <- walSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
histogramShards[i] = nil
@ -444,7 +443,7 @@ Outer:
}
// Signal termination to each worker and wait for it to close its output channel.
for i := 0; i < n; i++ {
for i := 0; i < concurrency; i++ {
processors[i].closeAndDrain()
}
close(exemplarsInput)
@ -685,12 +684,12 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
lastSeq, lastOff := lastMmapRef.Unpack()
// Start workers that each process samples for a partition of the series ID space.
var (
wg sync.WaitGroup
n = runtime.GOMAXPROCS(0)
processors = make([]wblSubsetProcessor, n)
wg sync.WaitGroup
concurrency = h.opts.WALReplayConcurrency
processors = make([]wblSubsetProcessor, concurrency)
dec record.Decoder
shards = make([][]record.RefSample, n)
shards = make([][]record.RefSample, concurrency)
decodedCh = make(chan interface{}, 10)
decodeErr error
@ -712,15 +711,15 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
_, ok := err.(*wlog.CorruptionErr)
if ok {
err = &errLoadWbl{err: err}
for i := 0; i < n; i++ {
for i := 0; i < concurrency; i++ {
processors[i].closeAndDrain()
}
wg.Wait()
}
}()
wg.Add(n)
for i := 0; i < n; i++ {
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
processors[i].setup()
go func(wp *wblSubsetProcessor) {
@ -779,17 +778,17 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
if len(samples) < m {
m = len(samples)
}
for i := 0; i < n; i++ {
for i := 0; i < concurrency; i++ {
shards[i] = processors[i].reuseBuf()
}
for _, sam := range samples[:m] {
if r, ok := multiRef[sam.Ref]; ok {
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(n)
mod := uint64(sam.Ref) % uint64(concurrency)
shards[mod] = append(shards[mod], sam)
}
for i := 0; i < n; i++ {
for i := 0; i < concurrency; i++ {
processors[i].input <- shards[i]
}
samples = samples[m:]
@ -816,7 +815,7 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
mmapMarkerUnknownRefs.Inc()
continue
}
idx := uint64(ms.ref) % uint64(n)
idx := uint64(ms.ref) % uint64(concurrency)
// It is possible that some old sample is being processed in processWALSamples that
// could cause race below. So we wait for the goroutine to empty input the buffer and finish
// processing all old samples after emptying the buffer.
@ -845,7 +844,7 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
}
// Signal termination to each worker and wait for it to close its output channel.
for i := 0; i < n; i++ {
for i := 0; i < concurrency; i++ {
processors[i].closeAndDrain()
}
wg.Wait()
@ -1381,18 +1380,18 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie
var (
numSeries = 0
unknownRefs = int64(0)
n = runtime.GOMAXPROCS(0)
concurrency = h.opts.WALReplayConcurrency
wg sync.WaitGroup
recordChan = make(chan chunkSnapshotRecord, 5*n)
shardedRefSeries = make([]map[chunks.HeadSeriesRef]*memSeries, n)
errChan = make(chan error, n)
recordChan = make(chan chunkSnapshotRecord, 5*concurrency)
shardedRefSeries = make([]map[chunks.HeadSeriesRef]*memSeries, concurrency)
errChan = make(chan error, concurrency)
refSeries map[chunks.HeadSeriesRef]*memSeries
exemplarBuf []record.RefExemplar
dec record.Decoder
)
wg.Add(n)
for i := 0; i < n; i++ {
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
go func(idx int, rc <-chan chunkSnapshotRecord) {
defer wg.Done()
defer func() {

0
tsdb/test.txt Normal file
View file

View file

@ -1,58 +0,0 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import "testing"
func BenchmarkMapConversion(b *testing.B) {
type key string
type val string
m := map[key]val{
"job": "node",
"instance": "123.123.1.211:9090",
"path": "/api/v1/namespaces/<namespace>/deployments/<name>",
"method": "GET",
"namespace": "system",
"status": "500",
}
var sm map[string]string
for i := 0; i < b.N; i++ {
sm = make(map[string]string, len(m))
for k, v := range m {
sm[string(k)] = string(v)
}
}
}
func BenchmarkListIter(b *testing.B) {
var list []uint32
for i := 0; i < 1e4; i++ {
list = append(list, uint32(i))
}
b.ResetTimer()
total := uint32(0)
for j := 0; j < b.N; j++ {
sum := uint32(0)
for _, k := range list {
sum += k
}
total += sum
}
}

View file

@ -1,123 +0,0 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"crypto/rand"
"fmt"
"hash/crc32"
"testing"
"github.com/cespare/xxhash/v2"
sip13 "github.com/dgryski/go-sip13"
)
type pair struct {
name, value string
}
var testInput = []pair{
{"job", "node"},
{"instance", "123.123.1.211:9090"},
{"path", "/api/v1/namespaces/<namespace>/deployments/<name>"},
{"method", "GET"},
{"namespace", "system"},
{"status", "500"},
}
func BenchmarkHash(b *testing.B) {
input := []byte{}
for _, v := range testInput {
input = append(input, v.name...)
input = append(input, '\xff')
input = append(input, v.value...)
input = append(input, '\xff')
}
var total uint64
var k0 uint64 = 0x0706050403020100
var k1 uint64 = 0x0f0e0d0c0b0a0908
for name, f := range map[string]func(b []byte) uint64{
"xxhash": xxhash.Sum64,
"fnv64": fnv64a,
"sip13": func(b []byte) uint64 { return sip13.Sum64(k0, k1, b) },
} {
b.Run(name, func(b *testing.B) {
b.SetBytes(int64(len(input)))
total = 0
for i := 0; i < b.N; i++ {
total += f(input)
}
})
}
}
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
func fnv64a(b []byte) uint64 {
const (
offset64 = 14695981039346656037
prime64 = 1099511628211
)
h := uint64(offset64)
for x := range b {
h ^= uint64(x)
h *= prime64
}
return h
}
func BenchmarkCRC32_diff(b *testing.B) {
data := [][]byte{}
for i := 0; i < 1000; i++ {
b := make([]byte, 512)
rand.Read(b)
data = append(data, b)
}
ctab := crc32.MakeTable(crc32.Castagnoli)
total := uint32(0)
b.Run("direct", func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
total += crc32.Checksum(data[i%1000], ctab)
}
})
b.Run("hash-reuse", func(b *testing.B) {
b.ReportAllocs()
h := crc32.New(ctab)
for i := 0; i < b.N; i++ {
h.Reset()
h.Write(data[i%1000])
total += h.Sum32()
}
})
b.Run("hash-new", func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
h := crc32.New(ctab)
h.Write(data[i%1000])
total += h.Sum32()
}
})
fmt.Println(total)
}

View file

@ -1,214 +0,0 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"bytes"
"crypto/rand"
"testing"
"github.com/prometheus/prometheus/model/labels"
)
func BenchmarkMapClone(b *testing.B) {
m := map[string]string{
"job": "node",
"instance": "123.123.1.211:9090",
"path": "/api/v1/namespaces/<namespace>/deployments/<name>",
"method": "GET",
"namespace": "system",
"status": "500",
"prometheus": "prometheus-core-1",
"datacenter": "eu-west-1",
"pod_name": "abcdef-99999-defee",
}
for i := 0; i < b.N; i++ {
res := make(map[string]string, len(m))
for k, v := range m {
res[k] = v
}
m = res
}
}
func BenchmarkLabelsClone(b *testing.B) {
m := map[string]string{
"job": "node",
"instance": "123.123.1.211:9090",
"path": "/api/v1/namespaces/<namespace>/deployments/<name>",
"method": "GET",
"namespace": "system",
"status": "500",
"prometheus": "prometheus-core-1",
"datacenter": "eu-west-1",
"pod_name": "abcdef-99999-defee",
}
l := labels.FromMap(m)
for i := 0; i < b.N; i++ {
l = l.Copy()
}
}
func BenchmarkLabelMapAccess(b *testing.B) {
m := map[string]string{
"job": "node",
"instance": "123.123.1.211:9090",
"path": "/api/v1/namespaces/<namespace>/deployments/<name>",
"method": "GET",
"namespace": "system",
"status": "500",
"prometheus": "prometheus-core-1",
"datacenter": "eu-west-1",
"pod_name": "abcdef-99999-defee",
}
var v string
for k := range m {
b.Run(k, func(b *testing.B) {
for i := 0; i < b.N; i++ {
v = m[k]
}
})
}
_ = v
}
func BenchmarkLabelSetAccess(b *testing.B) {
m := map[string]string{
"job": "node",
"instance": "123.123.1.211:9090",
"path": "/api/v1/namespaces/<namespace>/deployments/<name>",
"method": "GET",
"namespace": "system",
"status": "500",
"prometheus": "prometheus-core-1",
"datacenter": "eu-west-1",
"pod_name": "abcdef-99999-defee",
}
ls := labels.FromMap(m)
var v string
ls.Range(func(l labels.Label) {
b.Run(l.Name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
v = ls.Get(l.Name)
}
})
})
_ = v
}
func BenchmarkStringBytesEquals(b *testing.B) {
randBytes := func(n int) ([]byte, []byte) {
buf1 := make([]byte, n)
if _, err := rand.Read(buf1); err != nil {
b.Fatal(err)
}
buf2 := make([]byte, n)
copy(buf1, buf2)
return buf1, buf2
}
cases := []struct {
name string
f func() ([]byte, []byte)
}{
{
name: "equal",
f: func() ([]byte, []byte) {
return randBytes(60)
},
},
{
name: "1-flip-end",
f: func() ([]byte, []byte) {
b1, b2 := randBytes(60)
b2[59] ^= b2[59]
return b1, b2
},
},
{
name: "1-flip-middle",
f: func() ([]byte, []byte) {
b1, b2 := randBytes(60)
b2[29] ^= b2[29]
return b1, b2
},
},
{
name: "1-flip-start",
f: func() ([]byte, []byte) {
b1, b2 := randBytes(60)
b2[0] ^= b2[0]
return b1, b2
},
},
{
name: "different-length",
f: func() ([]byte, []byte) {
b1, b2 := randBytes(60)
return b1, b2[:59]
},
},
}
for _, c := range cases {
b.Run(c.name+"-strings", func(b *testing.B) {
ab, bb := c.f()
as, bs := string(ab), string(bb)
b.SetBytes(int64(len(as)))
var r bool
for i := 0; i < b.N; i++ {
r = as == bs
}
_ = r
})
b.Run(c.name+"-bytes", func(b *testing.B) {
ab, bb := c.f()
b.SetBytes(int64(len(ab)))
var r bool
for i := 0; i < b.N; i++ {
r = bytes.Equal(ab, bb)
}
_ = r
})
b.Run(c.name+"-bytes-length-check", func(b *testing.B) {
ab, bb := c.f()
b.SetBytes(int64(len(ab)))
var r bool
for i := 0; i < b.N; i++ {
if len(ab) != len(bb) {
continue
}
r = bytes.Equal(ab, bb)
}
_ = r
})
}
}

View file

@ -199,9 +199,10 @@ type wlMetrics struct {
truncateTotal prometheus.Counter
currentSegment prometheus.Gauge
writesFailed prometheus.Counter
walFileSize prometheus.GaugeFunc
}
func newWLMetrics(r prometheus.Registerer) *wlMetrics {
func newWLMetrics(w *WL, r prometheus.Registerer) *wlMetrics {
m := &wlMetrics{}
m.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{
@ -233,6 +234,17 @@ func newWLMetrics(r prometheus.Registerer) *wlMetrics {
Name: "writes_failed_total",
Help: "Total number of write log writes that failed.",
})
m.walFileSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "storage_size_bytes",
Help: "Size of the write log directory.",
}, func() float64 {
val, err := w.Size()
if err != nil {
level.Error(w.logger).Log("msg", "Failed to calculate size of \"wal\" dir",
"err", err.Error())
}
return float64(val)
})
if r != nil {
r.MustRegister(
@ -243,6 +255,7 @@ func newWLMetrics(r prometheus.Registerer) *wlMetrics {
m.truncateTotal,
m.currentSegment,
m.writesFailed,
m.walFileSize,
)
}
@ -279,7 +292,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi
if filepath.Base(dir) == WblDirName {
prefix = "prometheus_tsdb_out_of_order_wbl_"
}
w.metrics = newWLMetrics(prometheus.WrapRegistererWithPrefix(prefix, reg))
w.metrics = newWLMetrics(w, prometheus.WrapRegistererWithPrefix(prefix, reg))
_, last, err := Segments(w.Dir())
if err != nil {

View file

@ -421,7 +421,10 @@ func (api *API) query(r *http.Request) (result apiFuncResult) {
defer cancel()
}
opts := extractQueryOpts(r)
opts, err := extractQueryOpts(r)
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
qry, err := api.QueryEngine.NewInstantQuery(api.Queryable, opts, r.FormValue("query"), ts)
if err != nil {
return invalidParamError(err, "query")
@ -466,10 +469,18 @@ func (api *API) formatQuery(r *http.Request) (result apiFuncResult) {
return apiFuncResult{expr.Pretty(0), nil, nil, nil}
}
func extractQueryOpts(r *http.Request) *promql.QueryOpts {
return &promql.QueryOpts{
func extractQueryOpts(r *http.Request) (*promql.QueryOpts, error) {
opts := &promql.QueryOpts{
EnablePerStepStats: r.FormValue("stats") == "all",
}
if strDuration := r.FormValue("lookback_delta"); strDuration != "" {
duration, err := parseDuration(strDuration)
if err != nil {
return nil, fmt.Errorf("error parsing lookback delta duration: %w", err)
}
opts.LookbackDelta = duration
}
return opts, nil
}
func (api *API) queryRange(r *http.Request) (result apiFuncResult) {
@ -513,7 +524,10 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) {
defer cancel()
}
opts := extractQueryOpts(r)
opts, err := extractQueryOpts(r)
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
qry, err := api.QueryEngine.NewRangeQuery(api.Queryable, opts, r.FormValue("query"), start, end, step)
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}

View file

@ -3353,3 +3353,66 @@ func (t *testCodec) CanEncode(_ *Response) bool {
func (t *testCodec) Encode(_ *Response) ([]byte, error) {
return []byte(fmt.Sprintf("response from %v codec", t.contentType)), nil
}
func TestExtractQueryOpts(t *testing.T) {
tests := []struct {
name string
form url.Values
expect *promql.QueryOpts
err error
}{
{
name: "with stats all",
form: url.Values{
"stats": []string{"all"},
},
expect: &promql.QueryOpts{
EnablePerStepStats: true,
},
err: nil,
},
{
name: "with stats none",
form: url.Values{
"stats": []string{"none"},
},
expect: &promql.QueryOpts{
EnablePerStepStats: false,
},
err: nil,
},
{
name: "with lookback delta",
form: url.Values{
"stats": []string{"all"},
"lookback_delta": []string{"30s"},
},
expect: &promql.QueryOpts{
EnablePerStepStats: true,
LookbackDelta: 30 * time.Second,
},
err: nil,
},
{
name: "with invalid lookback delta",
form: url.Values{
"lookback_delta": []string{"invalid"},
},
expect: nil,
err: errors.New(`error parsing lookback delta duration: cannot parse "invalid" to a valid duration`),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
req := &http.Request{Form: test.form}
opts, err := extractQueryOpts(req)
require.Equal(t, test.expect, opts)
if test.err == nil {
require.NoError(t, err)
} else {
require.Equal(t, test.err.Error(), err.Error())
}
})
}
}

View file

@ -231,7 +231,19 @@ describe('analyzeCompletion test', () => {
title: 'starting to autocomplete labelName in aggregate modifier',
expr: 'sum by ()',
pos: 8, // cursor is between the bracket
expectedContext: [{ kind: ContextKind.LabelName }],
expectedContext: [{ kind: ContextKind.LabelName, metricName: '' }],
},
{
title: 'starting to autocomplete labelName in aggregate modifier with metric name',
expr: 'sum(up) by ()',
pos: 12, // cursor is between ()
expectedContext: [{ kind: ContextKind.LabelName, metricName: 'up' }],
},
{
title: 'starting to autocomplete labelName in aggregate modifier with metric name in front',
expr: 'sum by ()(up)',
pos: 8, // cursor is between ()
expectedContext: [{ kind: ContextKind.LabelName, metricName: 'up' }],
},
{
title: 'continue to autocomplete labelName in aggregate modifier',
@ -243,7 +255,7 @@ describe('analyzeCompletion test', () => {
title: 'autocomplete labelName in a list',
expr: 'sum by (myLabel1,)',
pos: 17, // cursor is between the bracket after the string myLab
expectedContext: [{ kind: ContextKind.LabelName }],
expectedContext: [{ kind: ContextKind.LabelName, metricName: '' }],
},
{
title: 'autocomplete labelName in a list 2',

View file

@ -110,6 +110,26 @@ export interface Context {
matchers?: Matcher[];
}
function getMetricNameInGroupBy(tree: SyntaxNode, state: EditorState): string {
// There should be an AggregateExpr as parent of the GroupingLabels.
// Then we should find the VectorSelector child to be able to find the metric name.
const currentNode: SyntaxNode | null = walkBackward(tree, AggregateExpr);
if (!currentNode) {
return '';
}
let metricName = '';
currentNode.cursor().iterate((node) => {
// Continue until we find the VectorSelector, then look up the metric name.
if (node.type.id === VectorSelector) {
metricName = getMetricNameInVectorSelector(node.node, state);
if (metricName) {
return false;
}
}
});
return metricName;
}
function getMetricNameInVectorSelector(tree: SyntaxNode, state: EditorState): string {
// Find if there is a defined metric name. Should be used to autocomplete a labelValue or a labelName
// First find the parent "VectorSelector" to be able to find then the subChild "Identifier" if it exists.
@ -344,9 +364,9 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
break;
case GroupingLabels:
// In this case we are in the given situation:
// sum by ()
// So we have to autocomplete any labelName
result.push({ kind: ContextKind.LabelName });
// sum by () or sum (metric_name) by ()
// so we have or to autocomplete any kind of labelName or to autocomplete only the labelName associated to the metric
result.push({ kind: ContextKind.LabelName, metricName: getMetricNameInGroupBy(node, state) });
break;
case LabelMatchers:
// In that case we are in the given situation:

View file

@ -6,6 +6,7 @@ import { BrowserRouter as Router, Redirect, Route, Switch } from 'react-router-d
import { PathPrefixContext } from './contexts/PathPrefixContext';
import { ThemeContext, themeName, themeSetting } from './contexts/ThemeContext';
import { ReadyContext } from './contexts/ReadyContext';
import { AnimateLogoContext } from './contexts/AnimateLogoContext';
import { useLocalStorage } from './hooks/useLocalStorage';
import useMedia from './hooks/useMedia';
import {
@ -60,6 +61,7 @@ const App: FC<AppProps> = ({ consolesLink, agentMode, ready }) => {
const [userTheme, setUserTheme] = useLocalStorage<themeSetting>(themeLocalStorageKey, 'auto');
const browserHasThemes = useMedia('(prefers-color-scheme)');
const browserWantsDarkTheme = useMedia('(prefers-color-scheme: dark)');
const [animateLogo, setAnimateLogo] = useLocalStorage<boolean>('animateLogo', false);
let theme: themeName;
if (userTheme !== 'auto') {
@ -76,46 +78,48 @@ const App: FC<AppProps> = ({ consolesLink, agentMode, ready }) => {
<PathPrefixContext.Provider value={basePath}>
<ReadyContext.Provider value={ready}>
<Router basename={basePath}>
<Navigation consolesLink={consolesLink} agentMode={agentMode} />
<Container fluid style={{ paddingTop: 70 }}>
<Switch>
<Redirect exact from="/" to={agentMode ? '/agent' : '/graph'} />
{/*
<AnimateLogoContext.Provider value={animateLogo}>
<Navigation consolesLink={consolesLink} agentMode={agentMode} animateLogo={animateLogo} />
<Container fluid style={{ paddingTop: 70 }}>
<Switch>
<Redirect exact from="/" to={agentMode ? '/agent' : '/graph'} />
{/*
NOTE: Any route added here needs to also be added to the list of
React-handled router paths ("reactRouterPaths") in /web/web.go.
*/}
<Route path="/agent">
<AgentPage />
</Route>
<Route path="/graph">
<PanelListPage />
</Route>
<Route path="/alerts">
<AlertsPage />
</Route>
<Route path="/config">
<ConfigPage />
</Route>
<Route path="/flags">
<FlagsPage />
</Route>
<Route path="/rules">
<RulesPage />
</Route>
<Route path="/service-discovery">
<ServiceDiscoveryPage />
</Route>
<Route path="/status">
<StatusPage agentMode={agentMode} />
</Route>
<Route path="/tsdb-status">
<TSDBStatusPage />
</Route>
<Route path="/targets">
<TargetsPage />
</Route>
</Switch>
</Container>
<Route path="/agent">
<AgentPage />
</Route>
<Route path="/graph">
<PanelListPage />
</Route>
<Route path="/alerts">
<AlertsPage />
</Route>
<Route path="/config">
<ConfigPage />
</Route>
<Route path="/flags">
<FlagsPage />
</Route>
<Route path="/rules">
<RulesPage />
</Route>
<Route path="/service-discovery">
<ServiceDiscoveryPage />
</Route>
<Route path="/status">
<StatusPage agentMode={agentMode} setAnimateLogo={setAnimateLogo} />
</Route>
<Route path="/tsdb-status">
<TSDBStatusPage />
</Route>
<Route path="/targets">
<TargetsPage />
</Route>
</Switch>
</Container>
</AnimateLogoContext.Provider>
</Router>
</ReadyContext.Provider>
</PathPrefixContext.Provider>

View file

@ -13,21 +13,22 @@ import {
DropdownItem,
} from 'reactstrap';
import { ThemeToggle } from './Theme';
import logo from './images/prometheus_logo_grey.svg';
import { ReactComponent as PromLogo } from './images/prometheus_logo_grey.svg';
interface NavbarProps {
consolesLink: string | null;
agentMode: boolean;
animateLogo?: boolean | false;
}
const Navigation: FC<NavbarProps> = ({ consolesLink, agentMode }) => {
const Navigation: FC<NavbarProps> = ({ consolesLink, agentMode, animateLogo }) => {
const [isOpen, setIsOpen] = useState(false);
const toggle = () => setIsOpen(!isOpen);
return (
<Navbar className="mb-3" dark color="dark" expand="md" fixed="top">
<NavbarToggler onClick={toggle} className="mr-2" />
<Link className="pt-0 navbar-brand" to={agentMode ? '/agent' : '/graph'}>
<img src={logo} className="d-inline-block align-top" alt="Prometheus logo" title="Prometheus" />
<PromLogo className={`d-inline-block align-top${animateLogo ? ' animate' : ''}`} title="Prometheus" />
Prometheus{agentMode && ' Agent'}
</Link>
<Collapse isOpen={isOpen} navbar style={{ justifyContent: 'space-between' }}>

View file

@ -0,0 +1,3 @@
import { createContext } from 'react';
export const AnimateLogoContext = createContext<boolean>(false);

View file

@ -1,4 +1,4 @@
import React, { Fragment, FC } from 'react';
import React, { Fragment, FC, useState, useEffect } from 'react';
import { Table } from 'reactstrap';
import { withStatusIndicator } from '../../components/withStatusIndicator';
import { useFetch } from '../../hooks/useFetch';
@ -97,7 +97,46 @@ const StatusResult: FC<{ fetchPath: string; title: string }> = ({ fetchPath, tit
);
};
const Status: FC<{ agentMode: boolean }> = ({ agentMode }) => {
interface StatusProps {
agentMode?: boolean | false;
setAnimateLogo?: (animateLogo: boolean) => void;
}
const Status: FC<StatusProps> = ({ agentMode, setAnimateLogo }) => {
/* _
* /' \
* | |
* \__/ */
const [inputText, setInputText] = useState('');
useEffect(() => {
const handleKeyPress = (event: KeyboardEvent) => {
const keyPressed = event.key.toUpperCase();
setInputText((prevInputText) => {
const newInputText = prevInputText.slice(-3) + String.fromCharCode(((keyPressed.charCodeAt(0) - 64) % 26) + 65);
return newInputText;
});
};
document.addEventListener('keypress', handleKeyPress);
return () => {
document.removeEventListener('keypress', handleKeyPress);
};
}, []);
useEffect(() => {
if (setAnimateLogo && inputText != '') {
setAnimateLogo(inputText.toUpperCase() === 'QSPN');
}
}, [inputText]);
/* _
* /' \
* | |
* \__/ */
const pathPrefix = usePathPrefix();
const path = `${pathPrefix}/${API_PATH}`;

View file

@ -104,9 +104,14 @@ button.execute-btn {
margin-bottom: 20px;
}
.navbar-brand img {
.navbar-brand svg {
padding-right: 1rem;
height: 1.9rem;
width: 2.9rem;
}
.navbar-brand svg.animate path {
animation: flamecolor 4s ease-in 1 forwards,flame 1s ease-in infinite;
}
.navbar-brand {
@ -357,3 +362,16 @@ input[type='checkbox']:checked + label {
display: block;
font-family: monospace;
}
@keyframes flamecolor {
100% {
fill: #e95224;
}
}
@keyframes flame {
0% { d: path("M 56.667,0.667 C 25.372,0.667 0,26.036 0,57.332 c 0,31.295 25.372,56.666 56.667,56.666 31.295,0 56.666,-25.371 56.666,-56.666 0,-31.296 -25.372,-56.665 -56.666,-56.665 z m 0,106.055 c -8.904,0 -16.123,-5.948 -16.123,-13.283 H 72.79 c 0,7.334 -7.219,13.283 -16.123,13.283 z M 83.297,89.04 H 30.034 V 79.382 H 83.298 V 89.04 Z M 83.106,74.411 H 30.186 C 30.01,74.208 29.83,74.008 29.66,73.802 24.208,67.182 22.924,63.726 21.677,60.204 c -0.021,-0.116 6.611,1.355 11.314,2.413 0,0 2.42,0.56 5.958,1.205 -3.397,-3.982 -5.414,-9.044 -5.414,-14.218 0,-11.359 8.712,-21.285 5.569,-29.308 3.059,0.249 6.331,6.456 6.552,16.161 3.252,-4.494 4.613,-12.701 4.613,-17.733 0,-5.21 3.433,-11.262 6.867,-11.469 -3.061,5.045 0.793,9.37 4.219,20.099 1.285,4.03 1.121,10.812 2.113,15.113 C 63.797,33.534 65.333,20.5 71,16 c -2.5,5.667 0.37,12.758 2.333,16.167 3.167,5.5 5.087,9.667 5.087,17.548 0,5.284 -1.951,10.259 -5.242,14.148 3.742,-0.702 6.326,-1.335 6.326,-1.335 l 12.152,-2.371 c 10e-4,-10e-4 -1.765,7.261 -8.55,14.254 z");
}
50% { d: path("M 56.667,0.667 C 25.372,0.667 0,26.036 0,57.332 c 0,31.295 25.372,56.666 56.667,56.666 31.295,0 56.666,-25.371 56.666,-56.666 0,-31.296 -25.372,-56.665 -56.666,-56.665 z m 0,106.055 c -8.904,0 -16.123,-5.948 -16.123,-13.283 H 72.79 c 0,7.334 -7.219,13.283 -16.123,13.283 z M 83.297,89.04 H 30.034 V 79.382 H 83.298 V 89.04 Z M 83.106,74.411 H 30.186 C 30.01,74.208 29.83,74.008 29.66,73.802 24.208,67.182 22.924,63.726 21.677,60.204 c -0.021,-0.116 6.611,1.355 11.314,2.413 0,0 2.42,0.56 5.958,1.205 -3.397,-3.982 -5.414,-9.044 -5.414,-14.218 0,-11.359 1.640181,-23.047128 7.294982,-29.291475 C 39.391377,29.509803 45.435,26.752 45.656,36.457 c 3.252,-4.494 7.100362,-8.366957 7.100362,-13.398957 0,-5.21 0.137393,-8.650513 -3.479689,-15.0672265 7.834063,1.6180944 8.448052,4.2381285 11.874052,14.9671285 1.285,4.03 1.325275,15.208055 2.317275,19.509055 0.329,-8.933 6.441001,-14.01461 5.163951,-21.391003 5.755224,5.771457 4.934508,10.495521 7.126537,14.288218 3.167,5.5 2.382625,7.496239 2.382625,15.377239 0,5.284 -1.672113,9.232546 -4.963113,13.121546 3.742,-0.702 6.326,-1.335 6.326,-1.335 l 12.152,-2.371 c 10e-4,-10e-4 -1.765,7.261 -8.55,14.254 z"); }
100% {
d: path("M 56.667,0.667 C 25.372,0.667 0,26.036 0,57.332 c 0,31.295 25.372,56.666 56.667,56.666 31.295,0 56.666,-25.371 56.666,-56.666 0,-31.296 -25.372,-56.665 -56.666,-56.665 z m 0,106.055 c -8.904,0 -16.123,-5.948 -16.123,-13.283 H 72.79 c 0,7.334 -7.219,13.283 -16.123,13.283 z M 83.297,89.04 H 30.034 V 79.382 H 83.298 V 89.04 Z M 83.106,74.411 H 30.186 C 30.01,74.208 29.83,74.008 29.66,73.802 24.208,67.182 22.924,63.726 21.677,60.204 c -0.021,-0.116 6.611,1.355 11.314,2.413 0,0 2.42,0.56 5.958,1.205 -3.397,-3.982 -5.414,-9.044 -5.414,-14.218 0,-11.359 8.712,-21.285 5.569,-29.308 3.059,0.249 6.331,6.456 6.552,16.161 3.252,-4.494 4.613,-12.701 4.613,-17.733 0,-5.21 3.433,-11.262 6.867,-11.469 -3.061,5.045 0.793,9.37 4.219,20.099 1.285,4.03 1.121,10.812 2.113,15.113 C 63.797,33.534 65.333,20.5 71,16 c -2.5,5.667 0.37,12.758 2.333,16.167 3.167,5.5 5.087,9.667 5.087,17.548 0,5.284 -1.951,10.259 -5.242,14.148 3.742,-0.702 6.326,-1.335 6.326,-1.335 l 12.152,-2.371 c 10e-4,-10e-4 -1.765,7.261 -8.55,14.254 z"); }
}