Merge branch 'prometheus:main' into docs-deriv

This commit is contained in:
Matthew 2022-02-19 08:37:09 -05:00 committed by GitHub
commit 00578d245b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
140 changed files with 5084 additions and 4689 deletions

View file

@ -37,6 +37,7 @@ jobs:
GOMAXPROCS: "2" GOMAXPROCS: "2"
GO111MODULE: "on" GO111MODULE: "on"
- run: go test ./tsdb/ -test.tsdb-isolation=false - run: go test ./tsdb/ -test.tsdb-isolation=false
- run: make -C documentation/examples/remote_storage
- prometheus/check_proto: - prometheus/check_proto:
version: "3.15.8" version: "3.15.8"
- prometheus/store_artifact: - prometheus/store_artifact:
@ -180,10 +181,10 @@ workflows:
branches: branches:
ignore: /.*/ ignore: /.*/
image: circleci/golang:1-node image: circleci/golang:1-node
nightly: daily:
triggers: triggers:
- schedule: - schedule:
cron: "0 0 * * *" cron: "49 19 * * *"
filters: filters:
branches: branches:
only: only:

20
.github/workflows/buf-lint.yml vendored Normal file
View file

@ -0,0 +1,20 @@
name: buf.build
on:
pull_request:
paths:
- ".github/workflows/buf-lint.yml"
- "**.proto"
jobs:
buf:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: bufbuild/buf-setup-action@v0.6.0
- uses: bufbuild/buf-lint-action@v1
with:
input: 'prompb'
- uses: bufbuild/buf-breaking-action@v1
with:
input: 'prompb'
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD,subdir=prompb'

23
.github/workflows/buf.yml vendored Normal file
View file

@ -0,0 +1,23 @@
name: buf.build
on:
push:
branches:
- main
jobs:
buf:
name: lint and publish
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: bufbuild/buf-setup-action@v0.6.0
- uses: bufbuild/buf-lint-action@v1
with:
input: 'prompb'
- uses: bufbuild/buf-breaking-action@v1
with:
input: 'prompb'
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD~1,subdir=prompb'
- uses: bufbuild/buf-push-action@v1
with:
input: 'prompb'
buf_token: ${{ secrets.BUF_TOKEN }}

View file

@ -24,13 +24,11 @@ linters-settings:
depguard: depguard:
list-type: blacklist list-type: blacklist
include-go-root: true include-go-root: true
packages:
- sync/atomic
- github.com/stretchr/testify/assert
packages-with-error-message: packages-with-error-message:
- sync/atomic: "Use go.uber.org/atomic instead of sync/atomic" - sync/atomic: "Use go.uber.org/atomic instead of sync/atomic"
- github.com/stretchr/testify/assert: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert" - github.com/stretchr/testify/assert: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert"
- github.com/go-kit/kit/log: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" - github.com/go-kit/kit/log: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
- regexp: "Use github.com/grafana/regexp instead of regexp"
errcheck: errcheck:
exclude: scripts/errcheck_excludes.txt exclude: scripts/errcheck_excludes.txt
goimports: goimports:

View file

@ -1,4 +1,17 @@
## 2.33.0-rc.0 / 2022-01-12 ## 2.33.3 / 2022-02-11
* [BUGFIX] Azure SD: Fix a regression when public IP Address isn't set. #10289
## 2.33.2 / 2022-02-11
* [BUGFIX] Azure SD: Fix panic when public IP Address isn't set. #10280
* [BUGFIX] Remote-write: Fix deadlock when stopping a shard. #10279
## 2.33.1 / 2022-02-02
* [BUGFIX] SD: Fix _no such file or directory_ in K8s SD when not running inside K8s. #10235
## 2.33.0 / 2022-01-29
* [CHANGE] PromQL: Promote negative offset and `@` modifer to stable features. #10121 * [CHANGE] PromQL: Promote negative offset and `@` modifer to stable features. #10121
* [CHANGE] Web: Promote remote-write-receiver to stable. #10119 * [CHANGE] Web: Promote remote-write-receiver to stable. #10119
@ -25,6 +38,7 @@
* [BUGFIX] UI: Fix overlapping click targets for the alert state checkboxes. #10136 * [BUGFIX] UI: Fix overlapping click targets for the alert state checkboxes. #10136
* [BUGFIX] UI: Fix _Unhealthy_ filter on target page to actually display only _Unhealthy_ targets. #10103 * [BUGFIX] UI: Fix _Unhealthy_ filter on target page to actually display only _Unhealthy_ targets. #10103
* [BUGFIX] UI: Fix autocompletion when expression is empty. #10053 * [BUGFIX] UI: Fix autocompletion when expression is empty. #10053
* [BUGFIX] TSDB: Fix deadlock from simultaneous GC and write. #10166
## 2.32.1 / 2021-12-17 ## 2.32.1 / 2021-12-17

View file

@ -1 +1 @@
2.33.0-rc.0 2.33.3

View file

@ -17,7 +17,6 @@ package main
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"math" "math"
"math/bits" "math/bits"
"net" "net"
@ -27,7 +26,6 @@ import (
"os" "os"
"os/signal" "os/signal"
"path/filepath" "path/filepath"
"regexp"
"runtime" "runtime"
"strings" "strings"
"sync" "sync"
@ -37,9 +35,9 @@ import (
"github.com/alecthomas/units" "github.com/alecthomas/units"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/grafana/regexp"
conntrack "github.com/mwitkow/go-conntrack" conntrack "github.com/mwitkow/go-conntrack"
"github.com/oklog/run" "github.com/oklog/run"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -48,8 +46,6 @@ import (
"github.com/prometheus/common/version" "github.com/prometheus/common/version"
toolkit_web "github.com/prometheus/exporter-toolkit/web" toolkit_web "github.com/prometheus/exporter-toolkit/web"
toolkit_webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag" toolkit_webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag"
jcfg "github.com/uber/jaeger-client-go/config"
jprom "github.com/uber/jaeger-lib/metrics/prometheus"
"go.uber.org/atomic" "go.uber.org/atomic"
kingpin "gopkg.in/alecthomas/kingpin.v2" kingpin "gopkg.in/alecthomas/kingpin.v2"
klog "k8s.io/klog" klog "k8s.io/klog"
@ -69,6 +65,7 @@ import (
"github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/tracing"
"github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/agent" "github.com/prometheus/prometheus/tsdb/agent"
"github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/logging"
@ -431,7 +428,11 @@ func main() {
// Throw error for invalid config before starting other components. // Throw error for invalid config before starting other components.
var cfgFile *config.Config var cfgFile *config.Config
if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, log.NewNopLogger()); err != nil { if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, log.NewNopLogger()); err != nil {
level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "err", err) absPath, pathErr := filepath.Abs(cfg.configFile)
if pathErr != nil {
absPath = cfg.configFile
}
level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err)
os.Exit(2) os.Exit(2)
} }
if cfg.tsdb.EnableExemplarStorage { if cfg.tsdb.EnableExemplarStorage {
@ -549,7 +550,8 @@ func main() {
} }
var ( var (
scrapeManager = scrape.NewManager(&cfg.scrape, log.With(logger, "component", "scrape manager"), fanoutStorage) scrapeManager = scrape.NewManager(&cfg.scrape, log.With(logger, "component", "scrape manager"), fanoutStorage)
tracingManager = tracing.NewManager(logger)
queryEngine *promql.Engine queryEngine *promql.Engine
ruleManager *rules.Manager ruleManager *rules.Manager
@ -716,6 +718,9 @@ func main() {
externalURL, externalURL,
) )
}, },
}, {
name: "tracing",
reloader: tracingManager.ApplyConfig,
}, },
} }
@ -742,13 +747,6 @@ func main() {
}) })
} }
closer, err := initTracing(logger)
if err != nil {
level.Error(logger).Log("msg", "Unable to init tracing", "err", err)
os.Exit(2)
}
defer closer.Close()
listener, err := webHandler.Listener() listener, err := webHandler.Listener()
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Unable to start web listener", "err", err) level.Error(logger).Log("msg", "Unable to start web listener", "err", err)
@ -836,6 +834,19 @@ func main() {
}, },
) )
} }
{
// Tracing manager.
g.Add(
func() error {
<-reloadReady.C
tracingManager.Run()
return nil
},
func(err error) {
tracingManager.Stop()
},
)
}
{ {
// Reload handler. // Reload handler.
@ -1522,47 +1533,6 @@ func (opts agentOptions) ToAgentOptions() agent.Options {
} }
} }
func initTracing(logger log.Logger) (io.Closer, error) {
// Set tracing configuration defaults.
cfg := &jcfg.Configuration{
ServiceName: "prometheus",
Disabled: true,
}
// Available options can be seen here:
// https://github.com/jaegertracing/jaeger-client-go#environment-variables
cfg, err := cfg.FromEnv()
if err != nil {
return nil, errors.Wrap(err, "unable to get tracing config from environment")
}
jLogger := jaegerLogger{logger: log.With(logger, "component", "tracing")}
tracer, closer, err := cfg.NewTracer(
jcfg.Logger(jLogger),
jcfg.Metrics(jprom.New()),
)
if err != nil {
return nil, errors.Wrap(err, "unable to init tracing")
}
opentracing.SetGlobalTracer(tracer)
return closer, nil
}
type jaegerLogger struct {
logger log.Logger
}
func (l jaegerLogger) Error(msg string) {
level.Error(l.logger).Log("msg", msg)
}
func (l jaegerLogger) Infof(msg string, args ...interface{}) {
keyvals := []interface{}{"msg", fmt.Sprintf(msg, args...)}
level.Info(l.logger).Log(keyvals...)
}
// discoveryManager interfaces the discovery manager. This is used to keep using // discoveryManager interfaces the discovery manager. This is used to keep using
// the manager that restarts SD's on reload for a few releases until we feel // the manager that restarts SD's on reload for a few releases until we feel
// the new manager can be enabled for all users. // the new manager can be enabled for all users.

View file

@ -19,13 +19,13 @@ import (
"net/url" "net/url"
"os" "os"
"path/filepath" "path/filepath"
"regexp"
"strings" "strings"
"time" "time"
"github.com/alecthomas/units" "github.com/alecthomas/units"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/grafana/regexp"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -83,6 +83,9 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
for i, v := range cfg.GlobalConfig.ExternalLabels { for i, v := range cfg.GlobalConfig.ExternalLabels {
newV := os.Expand(v.Value, func(s string) string { newV := os.Expand(v.Value, func(s string) string {
if s == "$" {
return "$"
}
if v := os.Getenv(s); v != "" { if v := os.Getenv(s); v != "" {
return v return v
} }
@ -200,8 +203,9 @@ var (
// DefaultRemoteReadConfig is the default remote read configuration. // DefaultRemoteReadConfig is the default remote read configuration.
DefaultRemoteReadConfig = RemoteReadConfig{ DefaultRemoteReadConfig = RemoteReadConfig{
RemoteTimeout: model.Duration(1 * time.Minute), RemoteTimeout: model.Duration(1 * time.Minute),
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
FilterExternalLabels: true,
} }
// DefaultStorageConfig is the default TSDB/Exemplar storage configuration. // DefaultStorageConfig is the default TSDB/Exemplar storage configuration.
@ -221,6 +225,7 @@ type Config struct {
RuleFiles []string `yaml:"rule_files,omitempty"` RuleFiles []string `yaml:"rule_files,omitempty"`
ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"`
StorageConfig StorageConfig `yaml:"storage,omitempty"` StorageConfig StorageConfig `yaml:"storage,omitempty"`
TracingConfig TracingConfig `yaml:"tracing,omitempty"`
RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"` RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"`
RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"` RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"`
@ -230,6 +235,7 @@ type Config struct {
func (c *Config) SetDirectory(dir string) { func (c *Config) SetDirectory(dir string) {
c.GlobalConfig.SetDirectory(dir) c.GlobalConfig.SetDirectory(dir)
c.AlertingConfig.SetDirectory(dir) c.AlertingConfig.SetDirectory(dir)
c.TracingConfig.SetDirectory(dir)
for i, file := range c.RuleFiles { for i, file := range c.RuleFiles {
c.RuleFiles[i] = config.JoinDir(dir, file) c.RuleFiles[i] = config.JoinDir(dir, file)
} }
@ -499,6 +505,62 @@ type StorageConfig struct {
ExemplarsConfig *ExemplarsConfig `yaml:"exemplars,omitempty"` ExemplarsConfig *ExemplarsConfig `yaml:"exemplars,omitempty"`
} }
type TracingClientType string
const (
TracingClientHTTP TracingClientType = "http"
TracingClientGRPC TracingClientType = "grpc"
)
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (t *TracingClientType) UnmarshalYAML(unmarshal func(interface{}) error) error {
*t = TracingClientType("")
type plain TracingClientType
if err := unmarshal((*plain)(t)); err != nil {
return err
}
if *t != TracingClientHTTP && *t != TracingClientGRPC {
return fmt.Errorf("expected tracing client type to be to be %s or %s, but got %s",
TracingClientHTTP, TracingClientGRPC, *t,
)
}
return nil
}
// TracingConfig configures the tracing options.
type TracingConfig struct {
ClientType TracingClientType `yaml:"client_type,omitempty"`
Endpoint string `yaml:"endpoint,omitempty"`
SamplingFraction float64 `yaml:"sampling_fraction,omitempty"`
Insecure bool `yaml:"insecure,omitempty"`
TLSConfig config.TLSConfig `yaml:"tls_config,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
func (t *TracingConfig) SetDirectory(dir string) {
t.TLSConfig.SetDirectory(dir)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (t *TracingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*t = TracingConfig{
ClientType: TracingClientGRPC,
Insecure: true,
}
type plain TracingConfig
if err := unmarshal((*plain)(t)); err != nil {
return err
}
if t.Endpoint == "" {
return errors.New("tracing endpoint must be set")
}
return nil
}
// ExemplarsConfig configures runtime reloadable configuration options. // ExemplarsConfig configures runtime reloadable configuration options.
type ExemplarsConfig struct { type ExemplarsConfig struct {
// MaxExemplars sets the size, in # of exemplars stored, of the single circular buffer used to store exemplars in memory. // MaxExemplars sets the size, in # of exemplars stored, of the single circular buffer used to store exemplars in memory.
@ -793,6 +855,9 @@ type RemoteReadConfig struct {
// RequiredMatchers is an optional list of equality matchers which have to // RequiredMatchers is an optional list of equality matchers which have to
// be present in a selector to query the remote read endpoint. // be present in a selector to query the remote read endpoint.
RequiredMatchers model.LabelSet `yaml:"required_matchers,omitempty"` RequiredMatchers model.LabelSet `yaml:"required_matchers,omitempty"`
// Whether to use the external labels as selectors for the remote read endpoint.
FilterExternalLabels bool `yaml:"filter_external_labels,omitempty"`
} }
// SetDirectory joins any relative file paths with dir. // SetDirectory joins any relative file paths with dir.

View file

@ -19,12 +19,12 @@ import (
"net/url" "net/url"
"os" "os"
"path/filepath" "path/filepath"
"regexp"
"testing" "testing"
"time" "time"
"github.com/alecthomas/units" "github.com/alecthomas/units"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/grafana/regexp"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -130,11 +130,12 @@ var expectedConf = &Config{
RemoteReadConfigs: []*RemoteReadConfig{ RemoteReadConfigs: []*RemoteReadConfig{
{ {
URL: mustParseURL("http://remote1/read"), URL: mustParseURL("http://remote1/read"),
RemoteTimeout: model.Duration(1 * time.Minute), RemoteTimeout: model.Duration(1 * time.Minute),
ReadRecent: true, ReadRecent: true,
Name: "default", Name: "default",
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
FilterExternalLabels: true,
}, },
{ {
URL: mustParseURL("http://remote3/read"), URL: mustParseURL("http://remote3/read"),
@ -149,6 +150,7 @@ var expectedConf = &Config{
}, },
FollowRedirects: true, FollowRedirects: true,
}, },
FilterExternalLabels: true,
}, },
}, },
@ -985,6 +987,11 @@ var expectedConf = &Config{
}, },
}, },
}, },
TracingConfig: TracingConfig{
Endpoint: "localhost:4317",
ClientType: TracingClientGRPC,
Insecure: true,
},
} }
func TestYAMLRoundtrip(t *testing.T) { func TestYAMLRoundtrip(t *testing.T) {
@ -1181,6 +1188,14 @@ var expectedErrors = []struct {
filename: "kubernetes_http_config_without_api_server.bad.yml", filename: "kubernetes_http_config_without_api_server.bad.yml",
errMsg: "to use custom HTTP client configuration please provide the 'api_server' URL explicitly", errMsg: "to use custom HTTP client configuration please provide the 'api_server' URL explicitly",
}, },
{
filename: "kubernetes_kubeconfig_with_own_namespace.bad.yml",
errMsg: "cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously",
},
{
filename: "kubernetes_api_server_with_own_namespace.bad.yml",
errMsg: "cannot use 'api_server' and 'namespaces.own_namespace' simultaneously",
},
{ {
filename: "kubernetes_kubeconfig_with_apiserver.bad.yml", filename: "kubernetes_kubeconfig_with_apiserver.bad.yml",
errMsg: "cannot use 'kubeconfig_file' and 'api_server' simultaneously", errMsg: "cannot use 'kubeconfig_file' and 'api_server' simultaneously",
@ -1433,6 +1448,10 @@ var expectedErrors = []struct {
filename: "empty_scrape_config_action.bad.yml", filename: "empty_scrape_config_action.bad.yml",
errMsg: "relabel action cannot be empty", errMsg: "relabel action cannot be empty",
}, },
{
filename: "tracing.bad.yml",
errMsg: "tracing endpoint must be set",
},
{ {
filename: "uyuni_no_server.bad.yml", filename: "uyuni_no_server.bad.yml",
errMsg: "Uyuni SD configuration requires server host", errMsg: "Uyuni SD configuration requires server host",
@ -1480,12 +1499,16 @@ func TestExpandExternalLabels(t *testing.T) {
require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0]) require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0])
require.Equal(t, labels.Label{Name: "baz", Value: "foo${TEST}bar"}, c.GlobalConfig.ExternalLabels[1]) require.Equal(t, labels.Label{Name: "baz", Value: "foo${TEST}bar"}, c.GlobalConfig.ExternalLabels[1])
require.Equal(t, labels.Label{Name: "foo", Value: "${TEST}"}, c.GlobalConfig.ExternalLabels[2]) require.Equal(t, labels.Label{Name: "foo", Value: "${TEST}"}, c.GlobalConfig.ExternalLabels[2])
require.Equal(t, labels.Label{Name: "qux", Value: "foo$${TEST}"}, c.GlobalConfig.ExternalLabels[3])
require.Equal(t, labels.Label{Name: "xyz", Value: "foo$$bar"}, c.GlobalConfig.ExternalLabels[4])
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger()) c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0]) require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0])
require.Equal(t, labels.Label{Name: "baz", Value: "foobar"}, c.GlobalConfig.ExternalLabels[1]) require.Equal(t, labels.Label{Name: "baz", Value: "foobar"}, c.GlobalConfig.ExternalLabels[1])
require.Equal(t, labels.Label{Name: "foo", Value: ""}, c.GlobalConfig.ExternalLabels[2]) require.Equal(t, labels.Label{Name: "foo", Value: ""}, c.GlobalConfig.ExternalLabels[2])
require.Equal(t, labels.Label{Name: "qux", Value: "foo${TEST}"}, c.GlobalConfig.ExternalLabels[3])
require.Equal(t, labels.Label{Name: "xyz", Value: "foo$bar"}, c.GlobalConfig.ExternalLabels[4])
os.Setenv("TEST", "TestValue") os.Setenv("TEST", "TestValue")
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger()) c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
@ -1493,6 +1516,8 @@ func TestExpandExternalLabels(t *testing.T) {
require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0]) require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0])
require.Equal(t, labels.Label{Name: "baz", Value: "fooTestValuebar"}, c.GlobalConfig.ExternalLabels[1]) require.Equal(t, labels.Label{Name: "baz", Value: "fooTestValuebar"}, c.GlobalConfig.ExternalLabels[1])
require.Equal(t, labels.Label{Name: "foo", Value: "TestValue"}, c.GlobalConfig.ExternalLabels[2]) require.Equal(t, labels.Label{Name: "foo", Value: "TestValue"}, c.GlobalConfig.ExternalLabels[2])
require.Equal(t, labels.Label{Name: "qux", Value: "foo${TEST}"}, c.GlobalConfig.ExternalLabels[3])
require.Equal(t, labels.Label{Name: "xyz", Value: "foo$bar"}, c.GlobalConfig.ExternalLabels[4])
} }
func TestEmptyGlobalBlock(t *testing.T) { func TestEmptyGlobalBlock(t *testing.T) {

View file

@ -366,3 +366,7 @@ alerting:
- "1.2.3.4:9093" - "1.2.3.4:9093"
- "1.2.3.5:9093" - "1.2.3.5:9093"
- "1.2.3.6:9093" - "1.2.3.6:9093"
tracing:
endpoint: "localhost:4317"
client_type: "grpc"

View file

@ -3,3 +3,5 @@ global:
bar: foo bar: foo
foo: ${TEST} foo: ${TEST}
baz: foo${TEST}bar baz: foo${TEST}bar
qux: foo$${TEST}
xyz: foo$$bar

View file

@ -0,0 +1,7 @@
scrape_configs:
- job_name: prometheus
kubernetes_sd_configs:
- role: endpoints
api_server: 'https://localhost:1234'
namespaces:
own_namespace: true

View file

@ -0,0 +1,7 @@
scrape_configs:
- job_name: prometheus
kubernetes_sd_configs:
- role: endpoints
kubeconfig_file: /home/User1/.kubeconfig
namespaces:
own_namespace: true

2
config/testdata/tracing.bad.yml vendored Normal file
View file

@ -0,0 +1,2 @@
tracing:
sampling_fraction: 1

View file

@ -32,6 +32,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
config_util "github.com/prometheus/common/config" config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/version"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/refresh"
@ -58,14 +59,18 @@ const (
authMethodManagedIdentity = "ManagedIdentity" authMethodManagedIdentity = "ManagedIdentity"
) )
// DefaultSDConfig is the default Azure SD configuration. var (
var DefaultSDConfig = SDConfig{ userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
Port: 80,
RefreshInterval: model.Duration(5 * time.Minute), // DefaultSDConfig is the default Azure SD configuration.
Environment: azure.PublicCloud.Name, DefaultSDConfig = SDConfig{
AuthenticationMethod: authMethodOAuth, Port: 80,
HTTPClientConfig: config_util.DefaultHTTPClientConfig, RefreshInterval: model.Duration(5 * time.Minute),
} Environment: azure.PublicCloud.Name,
AuthenticationMethod: authMethodOAuth,
HTTPClientConfig: config_util.DefaultHTTPClientConfig,
}
)
func init() { func init() {
discovery.RegisterConfig(&SDConfig{}) discovery.RegisterConfig(&SDConfig{})
@ -208,24 +213,29 @@ func createAzureClient(cfg SDConfig) (azureClient, error) {
return azureClient{}, err return azureClient{}, err
} }
sender := autorest.DecorateSender(client) sender := autorest.DecorateSender(client)
preparer := autorest.WithUserAgent(userAgent)
bearerAuthorizer := autorest.NewBearerAuthorizer(spt) bearerAuthorizer := autorest.NewBearerAuthorizer(spt)
c.vm = compute.NewVirtualMachinesClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID) c.vm = compute.NewVirtualMachinesClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID)
c.vm.Authorizer = bearerAuthorizer c.vm.Authorizer = bearerAuthorizer
c.vm.Sender = sender c.vm.Sender = sender
c.vm.RequestInspector = preparer
c.nic = network.NewInterfacesClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID) c.nic = network.NewInterfacesClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID)
c.nic.Authorizer = bearerAuthorizer c.nic.Authorizer = bearerAuthorizer
c.nic.Sender = sender c.nic.Sender = sender
c.nic.RequestInspector = preparer
c.vmss = compute.NewVirtualMachineScaleSetsClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID) c.vmss = compute.NewVirtualMachineScaleSetsClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID)
c.vmss.Authorizer = bearerAuthorizer c.vmss.Authorizer = bearerAuthorizer
c.vm.Sender = sender c.vmss.Sender = sender
c.vmss.RequestInspector = preparer
c.vmssvm = compute.NewVirtualMachineScaleSetVMsClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID) c.vmssvm = compute.NewVirtualMachineScaleSetVMsClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID)
c.vmssvm.Authorizer = bearerAuthorizer c.vmssvm.Authorizer = bearerAuthorizer
c.vmssvm.Sender = sender c.vmssvm.Sender = sender
c.vmssvm.RequestInspector = preparer
return c, nil return c, nil
} }
@ -361,7 +371,9 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
if *networkInterface.Primary { if *networkInterface.Primary {
for _, ip := range *networkInterface.IPConfigurations { for _, ip := range *networkInterface.IPConfigurations {
if ip.PublicIPAddress != nil && ip.PublicIPAddress.PublicIPAddressPropertiesFormat != nil { // IPAddress is a field defined in PublicIPAddressPropertiesFormat,
// therefore we need to validate that both are not nil.
if ip.PublicIPAddress != nil && ip.PublicIPAddress.PublicIPAddressPropertiesFormat != nil && ip.PublicIPAddress.IPAddress != nil {
labels[azureLabelMachinePublicIP] = model.LabelValue(*ip.PublicIPAddress.IPAddress) labels[azureLabelMachinePublicIP] = model.LabelValue(*ip.PublicIPAddress.IPAddress)
} }
if ip.PrivateIPAddress != nil { if ip.PrivateIPAddress != nil {
@ -537,7 +549,8 @@ func (client *azureClient) getNetworkInterfaceByID(ctx context.Context, networkI
autorest.AsGet(), autorest.AsGet(),
autorest.WithBaseURL(client.nic.BaseURI), autorest.WithBaseURL(client.nic.BaseURI),
autorest.WithPath(networkInterfaceID), autorest.WithPath(networkInterfaceID),
autorest.WithQueryParameters(queryParameters)) autorest.WithQueryParameters(queryParameters),
autorest.WithUserAgent(userAgent))
req, err := preparer.Prepare((&http.Request{}).WithContext(ctx)) req, err := preparer.Prepare((&http.Request{}).WithContext(ctx))
if err != nil { if err != nil {
return nil, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", nil, "Failure preparing request") return nil, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", nil, "Failure preparing request")

View file

@ -138,7 +138,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
Source: "DigitalOcean", Source: "DigitalOcean",
} }
droplets, err := d.listDroplets() droplets, err := d.listDroplets(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -196,13 +196,13 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
return []*targetgroup.Group{tg}, nil return []*targetgroup.Group{tg}, nil
} }
func (d *Discovery) listDroplets() ([]godo.Droplet, error) { func (d *Discovery) listDroplets(ctx context.Context) ([]godo.Droplet, error) {
var ( var (
droplets []godo.Droplet droplets []godo.Droplet
opts = &godo.ListOptions{} opts = &godo.ListOptions{}
) )
for { for {
paginatedDroplets, resp, err := d.client.Droplets.List(context.Background(), opts) paginatedDroplets, resp, err := d.client.Droplets.List(ctx, opts)
if err != nil { if err != nil {
return nil, fmt.Errorf("error while listing droplets page %d: %w", opts.Page, err) return nil, fmt.Errorf("error while listing droplets page %d: %w", opts.Page, err)
} }

View file

@ -20,7 +20,6 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"regexp"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -28,6 +27,7 @@ import (
"github.com/fsnotify/fsnotify" "github.com/fsnotify/fsnotify"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/grafana/regexp"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"

View file

@ -53,12 +53,9 @@ type testRunner struct {
func newTestRunner(t *testing.T) *testRunner { func newTestRunner(t *testing.T) *testRunner {
t.Helper() t.Helper()
tmpDir, err := ioutil.TempDir("", "prometheus-file-sd")
require.NoError(t, err)
return &testRunner{ return &testRunner{
T: t, T: t,
dir: tmpDir, dir: t.TempDir(),
ch: make(chan []*targetgroup.Group), ch: make(chan []*targetgroup.Group),
done: make(chan struct{}), done: make(chan struct{}),
stopped: make(chan struct{}), stopped: make(chan struct{}),

View file

@ -21,12 +21,12 @@ import (
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"regexp"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/grafana/regexp"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"

View file

@ -184,6 +184,12 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) { if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
return errors.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly") return errors.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
} }
if c.APIServer.URL != nil && c.NamespaceDiscovery.IncludeOwnNamespace {
return errors.Errorf("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously")
}
if c.KubeConfig != "" && c.NamespaceDiscovery.IncludeOwnNamespace {
return errors.Errorf("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously")
}
foundSelectorRoles := make(map[Role]struct{}) foundSelectorRoles := make(map[Role]struct{})
allowedSelectors := map[Role][]string{ allowedSelectors := map[Role][]string{
@ -263,7 +269,7 @@ func (d *Discovery) getNamespaces() []string {
return []string{apiv1.NamespaceAll} return []string{apiv1.NamespaceAll}
} }
if includeOwnNamespace { if includeOwnNamespace && d.ownNamespace != "" {
return append(namespaces, d.ownNamespace) return append(namespaces, d.ownNamespace)
} }
@ -276,8 +282,9 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
l = log.NewNopLogger() l = log.NewNopLogger()
} }
var ( var (
kcfg *rest.Config kcfg *rest.Config
err error err error
ownNamespace string
) )
if conf.KubeConfig != "" { if conf.KubeConfig != "" {
kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig) kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig)
@ -291,6 +298,18 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if conf.NamespaceDiscovery.IncludeOwnNamespace {
ownNamespaceContents, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
if err != nil {
return nil, fmt.Errorf("could not determine the pod's namespace: %w", err)
}
if len(ownNamespaceContents) == 0 {
return nil, errors.New("could not read own namespace name (empty file)")
}
ownNamespace = string(ownNamespaceContents)
}
level.Info(l).Log("msg", "Using pod service account via in-cluster config") level.Info(l).Log("msg", "Using pod service account via in-cluster config")
} else { } else {
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd") rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd")
@ -310,11 +329,6 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
return nil, err return nil, err
} }
ownNamespace, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
if err != nil {
return nil, fmt.Errorf("could not determine the pod's namespace: %w", err)
}
return &Discovery{ return &Discovery{
client: c, client: c,
logger: l, logger: l,
@ -322,7 +336,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
namespaceDiscovery: &conf.NamespaceDiscovery, namespaceDiscovery: &conf.NamespaceDiscovery,
discoverers: make([]discovery.Discoverer, 0), discoverers: make([]discovery.Discoverer, 0),
selectors: mapSelector(conf.Selectors), selectors: mapSelector(conf.Selectors),
ownNamespace: string(ownNamespace), ownNamespace: ownNamespace,
}, nil }, nil
} }

View file

@ -24,12 +24,12 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
"regexp"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/grafana/regexp"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"

View file

@ -91,3 +91,18 @@ func (tg *Group) UnmarshalJSON(b []byte) error {
tg.Labels = g.Labels tg.Labels = g.Labels
return nil return nil
} }
// MarshalJSON implements the json.Marshaler interface.
func (tg Group) MarshalJSON() ([]byte, error) {
g := &struct {
Targets []string `json:"targets"`
Labels model.LabelSet `json:"labels,omitempty"`
}{
Targets: make([]string, 0, len(tg.Targets)),
Labels: tg.Labels,
}
for _, t := range tg.Targets {
g.Targets = append(g.Targets, string(t[model.AddressLabel]))
}
return json.Marshal(g)
}

View file

@ -22,7 +22,7 @@ import (
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
) )
func TestTargetGroupStrictJsonUnmarshal(t *testing.T) { func TestTargetGroupStrictJSONUnmarshal(t *testing.T) {
tests := []struct { tests := []struct {
json string json string
expectedReply error expectedReply error
@ -59,6 +59,39 @@ func TestTargetGroupStrictJsonUnmarshal(t *testing.T) {
} }
} }
func TestTargetGroupJSONMarshal(t *testing.T) {
tests := []struct {
expectedJSON string
expectedErr error
group Group
}{
{
// labels should be omitted if empty.
group: Group{},
expectedJSON: `{"targets": []}`,
expectedErr: nil,
},
{
// targets only exposes addresses.
group: Group{
Targets: []model.LabelSet{
{"__address__": "localhost:9090"},
{"__address__": "localhost:9091"},
},
Labels: model.LabelSet{"foo": "bar", "bar": "baz"},
},
expectedJSON: `{"targets": ["localhost:9090", "localhost:9091"], "labels": {"bar": "baz", "foo": "bar"}}`,
expectedErr: nil,
},
}
for _, test := range tests {
actual, err := test.group.MarshalJSON()
require.Equal(t, test.expectedErr, err)
require.JSONEq(t, test.expectedJSON, string(actual))
}
}
func TestTargetGroupYamlMarshal(t *testing.T) { func TestTargetGroupYamlMarshal(t *testing.T) {
marshal := func(g interface{}) []byte { marshal := func(g interface{}) []byte {
d, err := yaml.Marshal(g) d, err := yaml.Marshal(g)

View file

@ -23,7 +23,6 @@ import (
"time" "time"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/kolo/xmlrpc" "github.com/kolo/xmlrpc"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
@ -47,6 +46,8 @@ const (
uyuniLabelProxyModule = uyuniMetaLabelPrefix + "proxy_module" uyuniLabelProxyModule = uyuniMetaLabelPrefix + "proxy_module"
uyuniLabelMetricsPath = uyuniMetaLabelPrefix + "metrics_path" uyuniLabelMetricsPath = uyuniMetaLabelPrefix + "metrics_path"
uyuniLabelScheme = uyuniMetaLabelPrefix + "scheme" uyuniLabelScheme = uyuniMetaLabelPrefix + "scheme"
tokenDuration = 10 * time.Minute
) )
// DefaultSDConfig is the default Uyuni SD configuration. // DefaultSDConfig is the default Uyuni SD configuration.
@ -96,14 +97,16 @@ type endpointInfo struct {
// Discovery periodically performs Uyuni API requests. It implements the Discoverer interface. // Discovery periodically performs Uyuni API requests. It implements the Discoverer interface.
type Discovery struct { type Discovery struct {
*refresh.Discovery *refresh.Discovery
apiURL *url.URL apiURL *url.URL
roundTripper http.RoundTripper roundTripper http.RoundTripper
username string username string
password string password string
entitlement string token string
separator string tokenExpiration time.Time
interval time.Duration entitlement string
logger log.Logger separator string
interval time.Duration
logger log.Logger
} }
// Name returns the name of the Config. // Name returns the name of the Config.
@ -140,16 +143,12 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil return nil
} }
func login(rpcclient *xmlrpc.Client, user, pass string) (string, error) { func login(rpcclient *xmlrpc.Client, user, pass string, duration int) (string, error) {
var result string var result string
err := rpcclient.Call("auth.login", []interface{}{user, pass}, &result) err := rpcclient.Call("auth.login", []interface{}{user, pass, duration}, &result)
return result, err return result, err
} }
func logout(rpcclient *xmlrpc.Client, token string) error {
return rpcclient.Call("auth.logout", token, nil)
}
func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token, entitlement string) (map[int][]systemGroupID, error) { func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token, entitlement string) (map[int][]systemGroupID, error) {
var systemGroupsInfos []struct { var systemGroupsInfos []struct {
SystemID int `xmlrpc:"id"` SystemID int `xmlrpc:"id"`
@ -271,12 +270,11 @@ func getSystemGroupNames(systemGroupsIDs []systemGroupID) []string {
func (d *Discovery) getTargetsForSystems( func (d *Discovery) getTargetsForSystems(
rpcClient *xmlrpc.Client, rpcClient *xmlrpc.Client,
token string,
entitlement string, entitlement string,
) ([]model.LabelSet, error) { ) ([]model.LabelSet, error) {
result := make([]model.LabelSet, 0) result := make([]model.LabelSet, 0)
systemGroupIDsBySystemID, err := getSystemGroupsInfoOfMonitoredClients(rpcClient, token, entitlement) systemGroupIDsBySystemID, err := getSystemGroupsInfoOfMonitoredClients(rpcClient, d.token, entitlement)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "unable to get the managed system groups information of monitored clients") return nil, errors.Wrap(err, "unable to get the managed system groups information of monitored clients")
} }
@ -286,12 +284,12 @@ func (d *Discovery) getTargetsForSystems(
systemIDs = append(systemIDs, systemID) systemIDs = append(systemIDs, systemID)
} }
endpointInfos, err := getEndpointInfoForSystems(rpcClient, token, systemIDs) endpointInfos, err := getEndpointInfoForSystems(rpcClient, d.token, systemIDs)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "unable to get endpoints information") return nil, errors.Wrap(err, "unable to get endpoints information")
} }
networkInfoBySystemID, err := getNetworkInformationForSystems(rpcClient, token, systemIDs) networkInfoBySystemID, err := getNetworkInformationForSystems(rpcClient, d.token, systemIDs)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "unable to get the systems network information") return nil, errors.Wrap(err, "unable to get the systems network information")
} }
@ -308,25 +306,27 @@ func (d *Discovery) getTargetsForSystems(
return result, nil return result, nil
} }
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *Discovery) refresh(_ context.Context) ([]*targetgroup.Group, error) {
rpcClient, err := xmlrpc.NewClient(d.apiURL.String(), d.roundTripper) rpcClient, err := xmlrpc.NewClient(d.apiURL.String(), d.roundTripper)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer rpcClient.Close() defer rpcClient.Close()
token, err := login(rpcClient, d.username, d.password) if time.Now().After(d.tokenExpiration) {
if err != nil { // Uyuni API takes duration in seconds.
return nil, errors.Wrap(err, "unable to login to Uyuni API") d.token, err = login(rpcClient, d.username, d.password, int(tokenDuration.Seconds()))
} if err != nil {
defer func() { return nil, errors.Wrap(err, "unable to login to Uyuni API")
if err := logout(rpcClient, token); err != nil {
level.Debug(d.logger).Log("msg", "Failed to log out from Uyuni API", "err", err)
} }
}() // Login again at half the token lifetime.
d.tokenExpiration = time.Now().Add(tokenDuration / 2)
}
targetsForSystems, err := d.getTargetsForSystems(rpcClient, token, d.entitlement) targetsForSystems, err := d.getTargetsForSystems(rpcClient, d.entitlement)
if err != nil { if err != nil {
// Force the renewal of the token on next refresh.
d.tokenExpiration = time.Now()
return nil, err return nil, err
} }

View file

@ -19,6 +19,7 @@ import (
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"testing" "testing"
"time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -56,3 +57,67 @@ func TestUyuniSDHandleError(t *testing.T) {
require.EqualError(t, err, errTesting) require.EqualError(t, err, errTesting)
require.Equal(t, len(tgs), 0) require.Equal(t, len(tgs), 0)
} }
func TestUyuniSDLogin(t *testing.T) {
var (
errTesting = "unable to get the managed system groups information of monitored clients: request error: bad status code - 500"
call = 0
respHandler = func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/xml")
switch call {
case 0:
w.WriteHeader(http.StatusOK)
io.WriteString(w, `<?xml version="1.0"?>
<methodResponse>
<params>
<param>
<value>
a token
</value>
</param>
</params>
</methodResponse>`)
case 1:
w.WriteHeader(http.StatusInternalServerError)
io.WriteString(w, ``)
}
call++
}
)
tgs, err := testUpdateServices(respHandler)
require.EqualError(t, err, errTesting)
require.Equal(t, len(tgs), 0)
}
func TestUyuniSDSkipLogin(t *testing.T) {
var (
errTesting = "unable to get the managed system groups information of monitored clients: request error: bad status code - 500"
respHandler = func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
w.Header().Set("Content-Type", "application/xml")
io.WriteString(w, ``)
}
)
// Create a test server with mock HTTP handler.
ts := httptest.NewServer(http.HandlerFunc(respHandler))
defer ts.Close()
conf := SDConfig{
Server: ts.URL,
}
md, err := NewDiscovery(&conf, nil)
if err != nil {
t.Error(err)
}
// simulate a cached token
md.token = `a token`
md.tokenExpiration = time.Now().Add(time.Minute)
tgs, err := md.refresh(context.Background())
require.EqualError(t, err, errTesting)
require.Equal(t, len(tgs), 0)
}

View file

@ -34,6 +34,7 @@ Generic placeholders are defined as follows:
* `<boolean>`: a boolean that can take the values `true` or `false` * `<boolean>`: a boolean that can take the values `true` or `false`
* `<duration>`: a duration matching the regular expression `((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0)`, e.g. `1d`, `1h30m`, `5m`, `10s` * `<duration>`: a duration matching the regular expression `((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0)`, e.g. `1d`, `1h30m`, `5m`, `10s`
* `<filename>`: a valid path in the current working directory * `<filename>`: a valid path in the current working directory
* `<float>`: a floating-point number
* `<host>`: a valid string consisting of a hostname or IP followed by an optional port number * `<host>`: a valid string consisting of a hostname or IP followed by an optional port number
* `<int>`: an integer value * `<int>`: an integer value
* `<labelname>`: a string matching the regular expression `[a-zA-Z_][a-zA-Z0-9_]*` * `<labelname>`: a string matching the regular expression `[a-zA-Z_][a-zA-Z0-9_]*`
@ -99,6 +100,10 @@ remote_read:
# Storage related settings that are runtime reloadable. # Storage related settings that are runtime reloadable.
storage: storage:
[ - <exemplars> ... ] [ - <exemplars> ... ]
# Configures exporting traces.
tracing:
[ <tracing_config> ]
``` ```
### `<scrape_config>` ### `<scrape_config>`
@ -191,7 +196,7 @@ oauth2:
[ <oauth2> ] [ <oauth2> ]
# Configure whether scrape requests follow HTTP 3xx redirects. # Configure whether scrape requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
# Configures the scrape request's TLS settings. # Configures the scrape request's TLS settings.
tls_config: tls_config:
@ -438,7 +443,7 @@ subscription_id: <string>
# instead be specified in the relabeling rule. # instead be specified in the relabeling rule.
[ port: <int> | default = 80 ] [ port: <int> | default = 80 ]
# Authentication information used to authenticate to the consul server. # Authentication information used to authenticate to the Azure API.
# Note that `basic_auth`, `authorization` and `oauth2` options are # Note that `basic_auth`, `authorization` and `oauth2` options are
# mutually exclusive. # mutually exclusive.
# `password` and `password_file` are mutually exclusive. # `password` and `password_file` are mutually exclusive.
@ -468,7 +473,7 @@ oauth2:
[ proxy_url: <string> ] [ proxy_url: <string> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
# TLS configuration. # TLS configuration.
tls_config: tls_config:
@ -564,7 +569,7 @@ oauth2:
[ proxy_url: <string> ] [ proxy_url: <string> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
# TLS configuration. # TLS configuration.
tls_config: tls_config:
@ -639,7 +644,7 @@ oauth2:
[ proxy_url: <string> ] [ proxy_url: <string> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
# TLS configuration. # TLS configuration.
tls_config: tls_config:
@ -698,9 +703,7 @@ tls_config:
# Optional filters to limit the discovery process to a subset of available # Optional filters to limit the discovery process to a subset of available
# resources. # resources.
# The available filters are listed in the upstream documentation: # The available filters are listed in the upstream documentation:
# Services: https://docs.docker.com/engine/api/v1.40/#operation/ServiceList # https://docs.docker.com/engine/api/v1.40/#operation/ContainerList
# Tasks: https://docs.docker.com/engine/api/v1.40/#operation/TaskList
# Nodes: https://docs.docker.com/engine/api/v1.40/#operation/NodeList
[ filters: [ filters:
[ - name: <string> [ - name: <string>
values: <string>, [...] ] values: <string>, [...] ]
@ -736,7 +739,7 @@ oauth2:
[ <oauth2> ] [ <oauth2> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
``` ```
@ -863,7 +866,9 @@ role: <string>
# Optional filters to limit the discovery process to a subset of available # Optional filters to limit the discovery process to a subset of available
# resources. # resources.
# The available filters are listed in the upstream documentation: # The available filters are listed in the upstream documentation:
# https://docs.docker.com/engine/api/v1.40/#operation/ContainerList # Services: https://docs.docker.com/engine/api/v1.40/#operation/ServiceList
# Tasks: https://docs.docker.com/engine/api/v1.40/#operation/TaskList
# Nodes: https://docs.docker.com/engine/api/v1.40/#operation/NodeList
[ filters: [ filters:
[ - name: <string> [ - name: <string>
values: <string>, [...] ] values: <string>, [...] ]
@ -899,7 +904,7 @@ oauth2:
[ <oauth2> ] [ <oauth2> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
``` ```
@ -1202,7 +1207,7 @@ oauth2:
[ proxy_url: <string> ] [ proxy_url: <string> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
``` ```
See [this example Prometheus configuration file](/documentation/examples/prometheus-puppetdb.yml) See [this example Prometheus configuration file](/documentation/examples/prometheus-puppetdb.yml)
@ -1406,7 +1411,7 @@ oauth2:
[ proxy_url: <string> ] [ proxy_url: <string> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
# TLS configuration. # TLS configuration.
tls_config: tls_config:
@ -1487,7 +1492,7 @@ oauth2:
[ proxy_url: <string> ] [ proxy_url: <string> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
# TLS configuration. # TLS configuration.
tls_config: tls_config:
@ -1649,7 +1654,7 @@ See below for the configuration options for Kubernetes discovery:
[ api_server: <host> ] [ api_server: <host> ]
# The Kubernetes role of entities that should be discovered. # The Kubernetes role of entities that should be discovered.
# One of endpoints, service, pod, node, or ingress. # One of endpoints, endpointslice, service, pod, node, or ingress.
role: <string> role: <string>
# Optional path to a kubeconfig file. # Optional path to a kubeconfig file.
@ -1686,7 +1691,7 @@ oauth2:
[ proxy_url: <string> ] [ proxy_url: <string> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
# TLS configuration. # TLS configuration.
tls_config: tls_config:
@ -1694,7 +1699,7 @@ tls_config:
# Optional namespace discovery. If omitted, all namespaces are used. # Optional namespace discovery. If omitted, all namespaces are used.
namespaces: namespaces:
own_namespace: <bool> own_namespace: <boolean>
names: names:
[ - <string> ] [ - <string> ]
@ -1784,7 +1789,7 @@ oauth2:
[ <oauth2> ] [ <oauth2> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
``` ```
The [relabeling phase](#relabel_config) is the preferred and more powerful way The [relabeling phase](#relabel_config) is the preferred and more powerful way
@ -1900,7 +1905,7 @@ oauth2:
[ proxy_url: <string> ] [ proxy_url: <string> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
# TLS configuration. # TLS configuration.
tls_config: tls_config:
@ -1983,7 +1988,7 @@ oauth2:
[ <oauth2> ] [ <oauth2> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
# TLS configuration for connecting to marathon servers # TLS configuration for connecting to marathon servers
tls_config: tls_config:
@ -2193,7 +2198,7 @@ tls_config:
[ proxy_url: <string> ] [ proxy_url: <string> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
# Refresh interval to re-read the app instance list. # Refresh interval to re-read the app instance list.
[ refresh_interval: <duration> | default = 30s ] [ refresh_interval: <duration> | default = 30s ]
@ -2297,7 +2302,7 @@ tags_filter:
[ refresh_interval: <duration> | default = 60s ] [ refresh_interval: <duration> | default = 60s ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
# Optional proxy URL. # Optional proxy URL.
[ proxy_url: <string> ] [ proxy_url: <string> ]
@ -2370,7 +2375,7 @@ oauth2:
[ proxy_url: <string> ] [ proxy_url: <string> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
# TLS configuration. # TLS configuration.
tls_config: tls_config:
@ -2552,7 +2557,7 @@ tls_config:
[ proxy_url: <string> ] [ proxy_url: <string> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
# List of Azure service discovery configurations. # List of Azure service discovery configurations.
azure_sd_configs: azure_sd_configs:
@ -2655,6 +2660,28 @@ relabel_configs:
[ - <relabel_config> ... ] [ - <relabel_config> ... ]
``` ```
### `<tracing_config>`
`tracing_config` configures exporting traces from Prometheus to a tracing backend via the OTLP protocol. Tracing is currently an **experimental** feature and could change in the future.
```yaml
# Client used to export the traces. Options are 'http' or 'grpc'.
[ client_type: <string> | default = grpc ]
# Endpoint to send the traces to.
[ endpoint: <string> ]
# Sets the probability a given trace will be sampled. Must be a float from 0 through 1.
[ sampling_fraction: <float> | default = 0 ]
# If disabled, the client will use a secure connection.
[ insecure: <boolean> | default = true ]
# TLS configuration.
tls_config:
[ <tls_config> ]
```
### `<remote_write>` ### `<remote_write>`
`write_relabel_configs` is relabeling applied to samples before sending them `write_relabel_configs` is relabeling applied to samples before sending them
@ -2739,7 +2766,7 @@ tls_config:
[ proxy_url: <string> ] [ proxy_url: <string> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
# Configures the queue used to write to remote storage. # Configures the queue used to write to remote storage.
queue_config: queue_config:
@ -2840,7 +2867,10 @@ tls_config:
[ proxy_url: <string> ] [ proxy_url: <string> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <boolean> | default = true ]
# Whether to use the external labels as selectors for the remote read endpoint.
[ filter_external_labels: <boolean> | default = true ]
``` ```
There is a list of There is a list of

View file

@ -84,6 +84,7 @@ versions.
| args | []interface{} | map[string]interface{} | This converts a list of objects to a map with keys arg0, arg1 etc. This is intended to allow multiple arguments to be passed to templates. | | args | []interface{} | map[string]interface{} | This converts a list of objects to a map with keys arg0, arg1 etc. This is intended to allow multiple arguments to be passed to templates. |
| tmpl | string, []interface{} | nothing | Like the built-in `template`, but allows non-literals as the template name. Note that the result is assumed to be safe, and will not be auto-escaped. Only available in consoles. | | tmpl | string, []interface{} | nothing | Like the built-in `template`, but allows non-literals as the template name. Note that the result is assumed to be safe, and will not be auto-escaped. Only available in consoles. |
| safeHtml | string | string | Marks string as HTML not requiring auto-escaping. | | safeHtml | string | string | Marks string as HTML not requiring auto-escaping. |
| pathPrefix | _none_ | string | The external URL [path](https://pkg.go.dev/net/url#URL) for use in console templates. |
## Template type differences ## Template type differences

View file

@ -18,6 +18,7 @@ They may be enabled by default in future versions.
Replace `${var}` or `$var` in the [`external_labels`](configuration/configuration.md#configuration-file) Replace `${var}` or `$var` in the [`external_labels`](configuration/configuration.md#configuration-file)
values according to the values of the current environment variables. References values according to the values of the current environment variables. References
to undefined variables are replaced by the empty string. to undefined variables are replaced by the empty string.
The `$` character can be escaped by using `$$`.
## Remote Write Receiver ## Remote Write Receiver

View file

@ -197,7 +197,7 @@ bucket. Otherwise, the upper bound of the lowest bucket is returned
for quantiles located in the lowest bucket. for quantiles located in the lowest bucket.
If `b` has 0 observations, `NaN` is returned. If `b` contains fewer than two buckets, If `b` has 0 observations, `NaN` is returned. If `b` contains fewer than two buckets,
`NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. For φ = `NaN`, `NaN` is returned.
## `holt_winters()` ## `holt_winters()`

View file

@ -241,7 +241,7 @@ vector. `by` and `without` are only used to bucket the input vector.
`quantile` calculates the φ-quantile, the value that ranks at number φ*N among `quantile` calculates the φ-quantile, the value that ranks at number φ*N among
the N metric values of the dimensions aggregated over. φ is provided as the the N metric values of the dimensions aggregated over. φ is provided as the
aggregation parameter. For example, `quantile(0.5, ...)` calculates the median, aggregation parameter. For example, `quantile(0.5, ...)` calculates the median,
`quantile(0.95, ...)` the 95th percentile. `quantile(0.95, ...)` the 95th percentile. For φ = `NaN`, `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned.
Example: Example:

View file

@ -0,0 +1,21 @@
# Copyright 2022 The Prometheus Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
include ../../../Makefile.common
build:
@echo ">> building example_write_adapter"
@$(GO) build -o example_write_adapter/example_write_adapter ./example_write_adapter
@echo ">> building remote_storage_adapter"
@$(GO) build -o remote_storage_adapter/remote_storage_adapter ./remote_storage_adapter

View file

@ -0,0 +1,53 @@
module github.com/prometheus/prometheus/documentation/examples/remote_storage
go 1.17
require (
github.com/go-kit/log v0.2.0
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.9.5
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.12.1
github.com/prometheus/common v0.32.1
github.com/prometheus/prometheus v1.8.2-0.20220202104425-d819219dd438
github.com/stretchr/testify v1.7.0
gopkg.in/alecthomas/kingpin.v2 v2.2.6
)
require (
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
github.com/aws/aws-sdk-go v1.42.31 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dennwc/varint v1.0.0 // indirect
github.com/felixge/httpsnoop v1.0.2 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.26.1 // indirect
go.opentelemetry.io/otel v1.2.0 // indirect
go.opentelemetry.io/otel/internal/metric v0.24.0 // indirect
go.opentelemetry.io/otel/metric v0.24.0 // indirect
go.opentelemetry.io/otel/trace v1.2.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/goleak v1.1.12 // indirect
golang.org/x/net v0.0.0-20220105145211-5b0dc2dfae98 // indirect
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
)

File diff suppressed because it is too large Load diff

30
go.mod
View file

@ -9,9 +9,10 @@ require (
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
github.com/aws/aws-sdk-go v1.42.31 github.com/armon/go-metrics v0.3.3 // indirect
github.com/aws/aws-sdk-go v1.42.44
github.com/cespare/xxhash/v2 v2.1.2 github.com/cespare/xxhash/v2 v2.1.2
github.com/containerd/containerd v1.5.7 // indirect github.com/containerd/containerd v1.5.9 // indirect
github.com/dennwc/varint v1.0.0 github.com/dennwc/varint v1.0.0
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245
github.com/digitalocean/godo v1.73.0 github.com/digitalocean/godo v1.73.0
@ -29,24 +30,27 @@ require (
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd github.com/google/pprof v0.0.0-20211214055906-6f57359322fd
github.com/gophercloud/gophercloud v0.24.0 github.com/gophercloud/gophercloud v0.24.0
github.com/gorilla/mux v1.8.0 // indirect
github.com/grafana/regexp v0.0.0-20220202152315-e74e38789280
github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.12.0 github.com/hashicorp/consul/api v1.12.0
github.com/hashicorp/go-hclog v0.12.2 // indirect
github.com/hashicorp/go-immutable-radix v1.2.0 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hetznercloud/hcloud-go v1.33.1 github.com/hetznercloud/hcloud-go v1.33.1
github.com/influxdata/influxdb v1.9.5
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b
github.com/linode/linodego v1.2.1 github.com/linode/linodego v1.3.0
github.com/mattn/go-colorable v0.1.8 // indirect
github.com/miekg/dns v1.1.45 github.com/miekg/dns v1.1.45
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/morikuni/aec v1.0.0 // indirect github.com/morikuni/aec v1.0.0 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
github.com/oklog/run v1.1.0 github.com/oklog/run v1.1.0
github.com/oklog/ulid v1.3.1 github.com/oklog/ulid v1.3.1
github.com/opentracing-contrib/go-stdlib v1.0.0
github.com/opentracing/opentracing-go v1.2.0
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/prometheus/alertmanager v0.23.0 github.com/prometheus/alertmanager v0.23.0
github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_golang v1.12.1
github.com/prometheus/client_model v0.2.0 github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.32.1 github.com/prometheus/common v0.32.1
github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/common/sigv4 v0.1.0
@ -55,18 +59,24 @@ require (
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546
github.com/stretchr/testify v1.7.0 github.com/stretchr/testify v1.7.0
github.com/uber/jaeger-client-go v2.30.0+incompatible go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.26.1
github.com/uber/jaeger-lib v2.4.1+incompatible go.opentelemetry.io/otel v1.2.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.2.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.2.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.2.0
go.opentelemetry.io/otel/sdk v1.2.0
go.opentelemetry.io/otel/trace v1.2.0
go.uber.org/atomic v1.9.0 go.uber.org/atomic v1.9.0
go.uber.org/goleak v1.1.12 go.uber.org/goleak v1.1.12
golang.org/x/net v0.0.0-20220105145211-5b0dc2dfae98 golang.org/x/net v0.0.0-20220105145211-5b0dc2dfae98
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11
golang.org/x/tools v0.1.9-0.20211209172050-90a85b2969be golang.org/x/tools v0.1.9-0.20211209172050-90a85b2969be
google.golang.org/api v0.64.0 google.golang.org/api v0.64.0
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb
google.golang.org/grpc v1.42.0
google.golang.org/protobuf v1.27.1 google.golang.org/protobuf v1.27.1
gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0

329
go.sum

File diff suppressed because it is too large Load diff

View file

@ -14,9 +14,10 @@
package labels package labels
import ( import (
"regexp"
"regexp/syntax"
"strings" "strings"
"github.com/grafana/regexp"
"github.com/grafana/regexp/syntax"
) )
type FastRegexMatcher struct { type FastRegexMatcher struct {

View file

@ -14,9 +14,10 @@
package labels package labels
import ( import (
"regexp/syntax" "strings"
"testing" "testing"
"github.com/grafana/regexp/syntax"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -96,3 +97,42 @@ func TestOptimizeConcatRegex(t *testing.T) {
require.Equal(t, c.contains, contains) require.Equal(t, c.contains, contains)
} }
} }
func BenchmarkFastRegexMatcher(b *testing.B) {
var (
x = strings.Repeat("x", 50)
y = "foo" + x
z = x + "foo"
)
regexes := []string{
"foo",
"^foo",
"(foo|bar)",
"foo.*",
".*foo",
"^.*foo$",
"^.+foo$",
".*",
".+",
"foo.+",
".+foo",
".*foo.*",
"(?i:foo)",
"(prometheus|api_prom)_api_v1_.+",
"((fo(bar))|.+foo)",
}
for _, r := range regexes {
r := r
b.Run(r, func(b *testing.B) {
m, err := NewFastRegexMatcher(r)
require.NoError(b, err)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = m.MatchString(x)
_ = m.MatchString(y)
_ = m.MatchString(z)
}
})
}
}

View file

@ -16,9 +16,9 @@ package relabel
import ( import (
"crypto/md5" "crypto/md5"
"fmt" "fmt"
"regexp"
"strings" "strings"
"github.com/grafana/regexp"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"

View file

@ -61,12 +61,19 @@ type Parser interface {
} }
// New returns a new parser of the byte slice. // New returns a new parser of the byte slice.
func New(b []byte, contentType string) Parser { //
// This function always returns a valid parser, but might additionally
// return an error if the content type cannot be parsed.
func New(b []byte, contentType string) (Parser, error) {
if contentType == "" {
return NewPromParser(b), nil
}
mediaType, _, err := mime.ParseMediaType(contentType) mediaType, _, err := mime.ParseMediaType(contentType)
if err == nil && mediaType == "application/openmetrics-text" { if err == nil && mediaType == "application/openmetrics-text" {
return NewOpenMetricsParser(b) return NewOpenMetricsParser(b), nil
} }
return NewPromParser(b) return NewPromParser(b), err
} }
// Entry represents the type of a parsed entry. // Entry represents the type of a parsed entry.

View file

@ -0,0 +1,104 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package textparse
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestNewParser(t *testing.T) {
t.Parallel()
requirePromParser := func(t *testing.T, p Parser) {
require.NotNil(t, p)
_, ok := p.(*PromParser)
require.True(t, ok)
}
requireOpenMetricsParser := func(t *testing.T, p Parser) {
require.NotNil(t, p)
_, ok := p.(*OpenMetricsParser)
require.True(t, ok)
}
for name, tt := range map[string]*struct {
contentType string
validateParser func(*testing.T, Parser)
err string
}{
"empty-string": {
validateParser: requirePromParser,
},
"invalid-content-type-1": {
contentType: "invalid/",
validateParser: requirePromParser,
err: "expected token after slash",
},
"invalid-content-type-2": {
contentType: "invalid/invalid/invalid",
validateParser: requirePromParser,
err: "unexpected content after media subtype",
},
"invalid-content-type-3": {
contentType: "/",
validateParser: requirePromParser,
err: "no media type",
},
"invalid-content-type-4": {
contentType: "application/openmetrics-text; charset=UTF-8; charset=utf-8",
validateParser: requirePromParser,
err: "duplicate parameter name",
},
"openmetrics": {
contentType: "application/openmetrics-text",
validateParser: requireOpenMetricsParser,
},
"openmetrics-with-charset": {
contentType: "application/openmetrics-text; charset=utf-8",
validateParser: requireOpenMetricsParser,
},
"openmetrics-with-charset-and-version": {
contentType: "application/openmetrics-text; version=1.0.0; charset=utf-8",
validateParser: requireOpenMetricsParser,
},
"plain-text": {
contentType: "text/plain",
validateParser: requirePromParser,
},
"plain-text-with-version": {
contentType: "text/plain; version=0.0.4",
validateParser: requirePromParser,
},
"some-other-valid-content-type": {
contentType: "text/html",
validateParser: requirePromParser,
},
} {
t.Run(name, func(t *testing.T) {
tt := tt // Copy to local variable before going parallel.
t.Parallel()
p, err := New([]byte{}, tt.contentType)
tt.validateParser(t, p)
if tt.err == "" {
require.NoError(t, err)
} else {
require.Error(t, err)
require.Contains(t, err.Error(), tt.err)
}
})
}
}

10
prompb/buf.lock Normal file
View file

@ -0,0 +1,10 @@
# Generated by buf. DO NOT EDIT.
version: v1
deps:
- remote: buf.build
owner: gogo
repository: protobuf
branch: main
commit: 4df00b267f944190a229ce3695781e99
digest: b1-sjLgsg7CzrkOrIjBDh3s-l0aMjE6oqTj85-OsoopKAw=
create_time: 2021-08-10T00:14:28.345069Z

18
prompb/buf.yaml Normal file
View file

@ -0,0 +1,18 @@
version: v1
name: buf.build/prometheus/prometheus
lint:
ignore_only:
ENUM_VALUE_PREFIX:
- remote.proto
- types.proto
ENUM_ZERO_VALUE_SUFFIX:
- remote.proto
- types.proto
PACKAGE_DIRECTORY_MATCH:
- remote.proto
- types.proto
PACKAGE_VERSION_SUFFIX:
- remote.proto
- types.proto
deps:
- buf.build/gogo/protobuf

View file

@ -20,7 +20,6 @@ import (
"fmt" "fmt"
"math" "math"
"reflect" "reflect"
"regexp"
"runtime" "runtime"
"sort" "sort"
"strconv" "strconv"
@ -29,11 +28,13 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/opentracing/opentracing-go" "github.com/grafana/regexp"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/uber/jaeger-client-go" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/timestamp"
@ -178,8 +179,8 @@ func (q *query) Close() {
// Exec implements the Query interface. // Exec implements the Query interface.
func (q *query) Exec(ctx context.Context) *Result { func (q *query) Exec(ctx context.Context) *Result {
if span := opentracing.SpanFromContext(ctx); span != nil { if span := trace.SpanFromContext(ctx); span != nil {
span.SetTag(queryTag, q.stmt.String()) span.SetAttributes(attribute.String(queryTag, q.stmt.String()))
} }
// Exec query. // Exec query.
@ -207,13 +208,32 @@ func contextErr(err error, env string) error {
} }
} }
// QueryTracker provides access to two features:
//
// 1) Tracking of active query. If PromQL engine crashes while executing any query, such query should be present
// in the tracker on restart, hence logged. After the logging on restart, the tracker gets emptied.
//
// 2) Enforcement of the maximum number of concurrent queries.
type QueryTracker interface {
// GetMaxConcurrent returns maximum number of concurrent queries that are allowed by this tracker.
GetMaxConcurrent() int
// Insert inserts query into query tracker. This call must block if maximum number of queries is already running.
// If Insert doesn't return error then returned integer value should be used in subsequent Delete call.
// Insert should return error if context is finished before query can proceed, and integer value returned in this case should be ignored by caller.
Insert(ctx context.Context, query string) (int, error)
// Delete removes query from activity tracker. InsertIndex is value returned by Insert call.
Delete(insertIndex int)
}
// EngineOpts contains configuration options used when creating a new Engine. // EngineOpts contains configuration options used when creating a new Engine.
type EngineOpts struct { type EngineOpts struct {
Logger log.Logger Logger log.Logger
Reg prometheus.Registerer Reg prometheus.Registerer
MaxSamples int MaxSamples int
Timeout time.Duration Timeout time.Duration
ActiveQueryTracker *ActiveQueryTracker ActiveQueryTracker QueryTracker
// LookbackDelta determines the time since the last sample after which a time // LookbackDelta determines the time since the last sample after which a time
// series is considered stale. // series is considered stale.
LookbackDelta time.Duration LookbackDelta time.Duration
@ -243,7 +263,7 @@ type Engine struct {
metrics *engineMetrics metrics *engineMetrics
timeout time.Duration timeout time.Duration
maxSamplesPerQuery int maxSamplesPerQuery int
activeQueryTracker *ActiveQueryTracker activeQueryTracker QueryTracker
queryLogger QueryLogger queryLogger QueryLogger
queryLoggerLock sync.RWMutex queryLoggerLock sync.RWMutex
lookbackDelta time.Duration lookbackDelta time.Duration
@ -505,10 +525,8 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storag
f = append(f, "error", err) f = append(f, "error", err)
} }
f = append(f, "stats", stats.NewQueryStats(q.Stats())) f = append(f, "stats", stats.NewQueryStats(q.Stats()))
if span := opentracing.SpanFromContext(ctx); span != nil { if span := trace.SpanFromContext(ctx); span != nil {
if spanCtx, ok := span.Context().(jaeger.SpanContext); ok { f = append(f, "spanID", span.SpanContext().SpanID())
f = append(f, "spanID", spanCtx.SpanID())
}
} }
if origin := ctx.Value(QueryOrigin{}); origin != nil { if origin := ctx.Value(QueryOrigin{}); origin != nil {
for k, v := range origin.(map[string]interface{}) { for k, v := range origin.(map[string]interface{}) {
@ -1171,8 +1189,9 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
// Create a new span to help investigate inner evaluation performances. // Create a new span to help investigate inner evaluation performances.
span, _ := opentracing.StartSpanFromContext(ev.ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String()) ctxWithSpan, span := otel.Tracer("").Start(ev.ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String())
defer span.Finish() ev.ctx = ctxWithSpan
defer span.End()
switch e := expr.(type) { switch e := expr.(type) {
case *parser.AggregateExpr: case *parser.AggregateExpr:

View file

@ -15,12 +15,12 @@ package promql
import ( import (
"math" "math"
"regexp"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/grafana/regexp"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -791,7 +791,7 @@ func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNo
func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
q := vals[0].(Vector)[0].V q := vals[0].(Vector)[0].V
inVec := vals[1].(Vector) inVec := vals[1].(Vector)
sigf := signatureFunc(false, enh.lblBuf, excludedLabels...) sigf := signatureFunc(false, enh.lblBuf, labels.BucketLabel)
if enh.signatureToMetricWithBuckets == nil { if enh.signatureToMetricWithBuckets == nil {
enh.signatureToMetricWithBuckets = map[string]*metricWithBuckets{} enh.signatureToMetricWithBuckets = map[string]*metricWithBuckets{}
@ -810,11 +810,14 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
continue continue
} }
l := sigf(el.Metric) l := sigf(el.Metric)
// Add the metric name (which is always removed) to the signature to prevent combining multiple histograms
// with the same label set. See https://github.com/prometheus/prometheus/issues/9910
l = l + el.Metric.Get(model.MetricNameLabel)
mb, ok := enh.signatureToMetricWithBuckets[l] mb, ok := enh.signatureToMetricWithBuckets[l]
if !ok { if !ok {
el.Metric = labels.NewBuilder(el.Metric). el.Metric = labels.NewBuilder(el.Metric).
Del(labels.BucketLabel, labels.MetricName). Del(excludedLabels...).
Labels() Labels()
mb = &metricWithBuckets{el.Metric, nil} mb = &metricWithBuckets{el.Metric, nil}

View file

@ -57,7 +57,13 @@ const (
) )
func fuzzParseMetricWithContentType(in []byte, contentType string) int { func fuzzParseMetricWithContentType(in []byte, contentType string) int {
p := textparse.New(in, contentType) p, warning := textparse.New(in, contentType)
if warning != nil {
// An invalid content type is being passed, which should not happen
// in this context.
panic(warning)
}
var err error var err error
for { for {
_, err = p.Next() _, err = p.Next()

39
promql/fuzz_test.go Normal file
View file

@ -0,0 +1,39 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Only build when go-fuzz is in use
//go:build gofuzz
// +build gofuzz
package promql
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestfuzzParseMetricWithContentTypePanicOnInvalid(t *testing.T) {
defer func() {
if p := recover(); p == nil {
t.Error("invalid content type should panic")
} else {
err, ok := p.(error)
require.True(t, ok)
require.Contains(t, err.Error(), "duplicate parameter name")
}
}()
const invalidContentType = "application/openmetrics-text; charset=UTF-8; charset=utf-8"
fuzzParseMetricWithContentType([]byte{}, invalidContentType)
}

View file

@ -67,10 +67,15 @@ type metricWithBuckets struct {
// //
// If the highest bucket is not +Inf, NaN is returned. // If the highest bucket is not +Inf, NaN is returned.
// //
// If q==NaN, NaN is returned.
//
// If q<0, -Inf is returned. // If q<0, -Inf is returned.
// //
// If q>1, +Inf is returned. // If q>1, +Inf is returned.
func bucketQuantile(q float64, buckets buckets) float64 { func bucketQuantile(q float64, buckets buckets) float64 {
if math.IsNaN(q) {
return math.NaN()
}
if q < 0 { if q < 0 {
return math.Inf(-1) return math.Inf(-1)
} }
@ -182,10 +187,11 @@ func ensureMonotonic(buckets buckets) {
// //
// The Vector will be sorted. // The Vector will be sorted.
// If 'values' has zero elements, NaN is returned. // If 'values' has zero elements, NaN is returned.
// If q==NaN, NaN is returned.
// If q<0, -Inf is returned. // If q<0, -Inf is returned.
// If q>1, +Inf is returned. // If q>1, +Inf is returned.
func quantile(q float64, values vectorByValueHeap) float64 { func quantile(q float64, values vectorByValueHeap) float64 {
if len(values) == 0 { if len(values) == 0 || math.IsNaN(q) {
return math.NaN() return math.NaN()
} }
if q < 0 { if q < 0 {

View file

@ -83,7 +83,11 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) {
func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, error) { func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, error) {
file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666) file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Error opening query log file", "file", filename, "err", err) absPath, pathErr := filepath.Abs(filename)
if pathErr != nil {
absPath = filename
}
level.Error(logger).Log("msg", "Error opening query log file", "file", absPath, "err", err)
return nil, err return nil, err
} }

View file

@ -17,9 +17,9 @@ import (
"context" "context"
"io/ioutil" "io/ioutil"
"os" "os"
"regexp"
"testing" "testing"
"github.com/grafana/regexp"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

View file

@ -18,11 +18,11 @@ import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math" "math"
"regexp"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/grafana/regexp"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"

View file

@ -399,6 +399,11 @@ eval instant at 1m quantile without(point)((scalar(foo)), data)
{test="three samples"} 1.6 {test="three samples"} 1.6
{test="uneven samples"} 2.8 {test="uneven samples"} 2.8
eval instant at 1m quantile without(point)(NaN, data)
{test="two samples"} NaN
{test="three samples"} NaN
{test="uneven samples"} NaN
# Tests for group. # Tests for group.
clear clear

View file

@ -60,6 +60,11 @@ eval instant at 50m histogram_quantile(1.01, testhistogram_bucket)
{start="positive"} +Inf {start="positive"} +Inf
{start="negative"} +Inf {start="negative"} +Inf
# Quantile invalid.
eval instant at 50m histogram_quantile(NaN, testhistogram_bucket)
{start="positive"} NaN
{start="negative"} NaN
# Quantile value in lowest bucket, which is positive. # Quantile value in lowest bucket, which is positive.
eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="positive"}) eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="positive"})
{start="positive"} 0 {start="positive"} 0
@ -219,3 +224,12 @@ load 5m
eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m])) eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m]))
{instance="ins1", job="job1"} NaN {instance="ins1", job="job1"} NaN
# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set
# https://github.com/prometheus/prometheus/issues/9910
load 5m
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration.*"})

View file

@ -24,10 +24,11 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt" "github.com/prometheus/prometheus/model/rulefmt"
@ -584,10 +585,10 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
} }
func(i int, rule Rule) { func(i int, rule Rule) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "rule") ctx, sp := otel.Tracer("").Start(ctx, "rule")
sp.SetTag("name", rule.Name()) sp.SetAttributes(attribute.String("name", rule.Name()))
defer func(t time.Time) { defer func(t time.Time) {
sp.Finish() sp.End()
since := time.Since(t) since := time.Since(t)
g.metrics.EvalDuration.Observe(since.Seconds()) g.metrics.EvalDuration.Observe(since.Seconds())

View file

@ -748,7 +748,7 @@ type targetScraper struct {
var errBodySizeLimit = errors.New("body size limit exceeded") var errBodySizeLimit = errors.New("body size limit exceeded")
const acceptHeader = `application/openmetrics-text; version=0.0.1,text/plain;version=0.0.4;q=0.5,*/*;q=0.1` const acceptHeader = `application/openmetrics-text;version=1.0.0,application/openmetrics-text;version=0.0.1;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version) var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
@ -1418,8 +1418,16 @@ type appendErrors struct {
} }
func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) {
p, err := textparse.New(b, contentType)
if err != nil {
level.Debug(sl.l).Log(
"msg", "Invalid content type on scrape, using prometheus parser as fallback.",
"content_type", contentType,
"err", err,
)
}
var ( var (
p = textparse.New(b, contentType)
defTime = timestamp.FromTime(ts) defTime = timestamp.FromTime(ts)
appErrs = appendErrors{} appErrs = appendErrors{}
sampleLimitErr error sampleLimitErr error

View file

@ -1479,7 +1479,8 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
fakeRef := storage.SeriesRef(1) fakeRef := storage.SeriesRef(1)
expValue := float64(1) expValue := float64(1)
metric := `metric{n="1"} 1` metric := `metric{n="1"} 1`
p := textparse.New([]byte(metric), "") p, warning := textparse.New([]byte(metric), "")
require.NoError(t, warning)
var lset labels.Labels var lset labels.Labels
p.Next() p.Next()

View file

@ -27,14 +27,15 @@ import (
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/golang/snappy" "github.com/golang/snappy"
"github.com/opentracing-contrib/go-stdlib/nethttp"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config" config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/sigv4" "github.com/prometheus/common/sigv4"
"github.com/prometheus/common/version" "github.com/prometheus/common/version"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
"github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/prompb"
) )
@ -119,9 +120,7 @@ func NewReadClient(name string, conf *ClientConfig) (ReadClient, error) {
if len(conf.Headers) > 0 { if len(conf.Headers) > 0 {
t = newInjectHeadersRoundTripper(conf.Headers, t) t = newInjectHeadersRoundTripper(conf.Headers, t)
} }
httpClient.Transport = &nethttp.Transport{ httpClient.Transport = otelhttp.NewTransport(t)
RoundTripper: t,
}
return &Client{ return &Client{
remoteName: name, remoteName: name,
@ -153,9 +152,7 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
t = newInjectHeadersRoundTripper(conf.Headers, t) t = newInjectHeadersRoundTripper(conf.Headers, t)
} }
httpClient.Transport = &nethttp.Transport{ httpClient.Transport = otelhttp.NewTransport(t)
RoundTripper: t,
}
return &Client{ return &Client{
remoteName: name, remoteName: name,
@ -206,20 +203,10 @@ func (c *Client) Store(ctx context.Context, req []byte) error {
ctx, cancel := context.WithTimeout(ctx, c.timeout) ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel() defer cancel()
httpReq = httpReq.WithContext(ctx) ctx, span := otel.Tracer("").Start(ctx, "Remote Store", trace.WithSpanKind(trace.SpanKindClient))
defer span.End()
if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil { httpResp, err := c.Client.Do(httpReq.WithContext(ctx))
var ht *nethttp.Tracer
httpReq, ht = nethttp.TraceRequest(
parentSpan.Tracer(),
httpReq,
nethttp.OperationName("Remote Store"),
nethttp.ClientTrace(false),
)
defer ht.Finish()
}
httpResp, err := c.Client.Do(httpReq)
if err != nil { if err != nil {
// Errors from Client.Do are from (for example) network errors, so are // Errors from Client.Do are from (for example) network errors, so are
// recoverable. // recoverable.
@ -304,21 +291,11 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
ctx, cancel := context.WithTimeout(ctx, c.timeout) ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel() defer cancel()
httpReq = httpReq.WithContext(ctx) ctx, span := otel.Tracer("").Start(ctx, "Remote Read", trace.WithSpanKind(trace.SpanKindClient))
defer span.End()
if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil {
var ht *nethttp.Tracer
httpReq, ht = nethttp.TraceRequest(
parentSpan.Tracer(),
httpReq,
nethttp.OperationName("Remote Read"),
nethttp.ClientTrace(false),
)
defer ht.Finish()
}
start := time.Now() start := time.Now()
httpResp, err := c.Client.Do(httpReq) httpResp, err := c.Client.Do(httpReq.WithContext(ctx))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error sending request") return nil, errors.Wrap(err, "error sending request")
} }

View file

@ -25,10 +25,10 @@ import (
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/golang/snappy" "github.com/golang/snappy"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.uber.org/atomic" "go.uber.org/atomic"
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
@ -472,21 +472,22 @@ func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []p
metadataCount := len(metadata) metadataCount := len(metadata)
attemptStore := func(try int) error { attemptStore := func(try int) error {
span, ctx := opentracing.StartSpanFromContext(ctx, "Remote Metadata Send Batch") ctx, span := otel.Tracer("").Start(ctx, "Remote Metadata Send Batch")
defer span.Finish() defer span.End()
span.SetTag("metadata", metadataCount) span.SetAttributes(
span.SetTag("try", try) attribute.Int("metadata", metadataCount),
span.SetTag("remote_name", t.storeClient.Name()) attribute.Int("try", try),
span.SetTag("remote_url", t.storeClient.Endpoint()) attribute.String("remote_name", t.storeClient.Name()),
attribute.String("remote_url", t.storeClient.Endpoint()),
)
begin := time.Now() begin := time.Now()
err := t.storeClient.Store(ctx, req) err := t.storeClient.Store(ctx, req)
t.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) t.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
if err != nil { if err != nil {
span.LogKV("error", err) span.RecordError(err)
ext.Error.Set(span, true)
return err return err
} }
@ -906,7 +907,8 @@ func (t *QueueManager) newShards() *shards {
} }
type shards struct { type shards struct {
mtx sync.RWMutex // With the WAL, this is never actually contended. mtx sync.RWMutex // With the WAL, this is never actually contended.
writeMtx sync.Mutex
qm *QueueManager qm *QueueManager
queues []*queue queues []*queue
@ -949,6 +951,8 @@ func (s *shards) start(n int) {
s.softShutdown = make(chan struct{}) s.softShutdown = make(chan struct{})
s.running.Store(int32(n)) s.running.Store(int32(n))
s.done = make(chan struct{}) s.done = make(chan struct{})
s.enqueuedSamples.Store(0)
s.enqueuedExemplars.Store(0)
s.samplesDroppedOnHardShutdown.Store(0) s.samplesDroppedOnHardShutdown.Store(0)
s.exemplarsDroppedOnHardShutdown.Store(0) s.exemplarsDroppedOnHardShutdown.Store(0)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
@ -996,6 +1000,8 @@ func (s *shards) stop() {
func (s *shards) enqueue(ref chunks.HeadSeriesRef, data sampleOrExemplar) bool { func (s *shards) enqueue(ref chunks.HeadSeriesRef, data sampleOrExemplar) bool {
s.mtx.RLock() s.mtx.RLock()
defer s.mtx.RUnlock() defer s.mtx.RUnlock()
s.writeMtx.Lock()
defer s.writeMtx.Unlock()
select { select {
case <-s.softShutdown: case <-s.softShutdown:
@ -1024,14 +1030,18 @@ func (s *shards) enqueue(ref chunks.HeadSeriesRef, data sampleOrExemplar) bool {
} }
type queue struct { type queue struct {
// batchMtx covers sending to the batchQueue and batch operations other
// than appending a sample. It is mainly to make sure (*queue).Batch() and
// (*queue).FlushAndShutdown() are not called concurrently.
batchMtx sync.Mutex
batch []sampleOrExemplar batch []sampleOrExemplar
batchQueue chan []sampleOrExemplar batchQueue chan []sampleOrExemplar
// Since we know there are a limited number of batches out, using a stack // Since we know there are a limited number of batches out, using a stack
// is easy and safe so a sync.Pool is not necessary. // is easy and safe so a sync.Pool is not necessary.
// poolMtx covers adding and removing batches from the batchPool.
poolMtx sync.Mutex
batchPool [][]sampleOrExemplar batchPool [][]sampleOrExemplar
// This mutex covers adding and removing batches from the batchPool.
poolMux sync.Mutex
} }
type sampleOrExemplar struct { type sampleOrExemplar struct {
@ -1076,6 +1086,9 @@ func (q *queue) Chan() <-chan []sampleOrExemplar {
// Batch returns the current batch and allocates a new batch. Must not be // Batch returns the current batch and allocates a new batch. Must not be
// called concurrently with Append. // called concurrently with Append.
func (q *queue) Batch() []sampleOrExemplar { func (q *queue) Batch() []sampleOrExemplar {
q.batchMtx.Lock()
defer q.batchMtx.Unlock()
batch := q.batch batch := q.batch
q.batch = q.newBatch(cap(batch)) q.batch = q.newBatch(cap(batch))
return batch return batch
@ -1083,8 +1096,8 @@ func (q *queue) Batch() []sampleOrExemplar {
// ReturnForReuse adds the batch buffer back to the internal pool. // ReturnForReuse adds the batch buffer back to the internal pool.
func (q *queue) ReturnForReuse(batch []sampleOrExemplar) { func (q *queue) ReturnForReuse(batch []sampleOrExemplar) {
q.poolMux.Lock() q.poolMtx.Lock()
defer q.poolMux.Unlock() defer q.poolMtx.Unlock()
if len(q.batchPool) < cap(q.batchPool) { if len(q.batchPool) < cap(q.batchPool) {
q.batchPool = append(q.batchPool, batch[:0]) q.batchPool = append(q.batchPool, batch[:0])
} }
@ -1093,6 +1106,9 @@ func (q *queue) ReturnForReuse(batch []sampleOrExemplar) {
// FlushAndShutdown stops the queue and flushes any samples. No appends can be // FlushAndShutdown stops the queue and flushes any samples. No appends can be
// made after this is called. // made after this is called.
func (q *queue) FlushAndShutdown(done <-chan struct{}) { func (q *queue) FlushAndShutdown(done <-chan struct{}) {
q.batchMtx.Lock()
defer q.batchMtx.Unlock()
if len(q.batch) > 0 { if len(q.batch) > 0 {
select { select {
case q.batchQueue <- q.batch: case q.batchQueue <- q.batch:
@ -1106,8 +1122,8 @@ func (q *queue) FlushAndShutdown(done <-chan struct{}) {
} }
func (q *queue) newBatch(capacity int) []sampleOrExemplar { func (q *queue) newBatch(capacity int) []sampleOrExemplar {
q.poolMux.Lock() q.poolMtx.Lock()
defer q.poolMux.Unlock() defer q.poolMtx.Unlock()
batches := len(q.batchPool) batches := len(q.batchPool)
if batches > 0 { if batches > 0 {
batch := q.batchPool[batches-1] batch := q.batchPool[batches-1]
@ -1186,18 +1202,22 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline)) timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
case <-timer.C: case <-timer.C:
// We need to take the lock when getting a batch to avoid // We need to take the write lock when getting a batch to avoid
// concurrent Appends. Generally this will only happen on low // concurrent Appends. Generally this will only happen on low
// traffic instances. // traffic instances or during resharding. We have to use writeMtx
s.mtx.Lock() // and not the batchMtx on a queue because we do not want to have
// First, we need to see if we can happen to get a batch from the queue if it filled while acquiring the lock. // to lock each queue for each sample, and cannot call
// queue.Batch() while an Append happens.
s.writeMtx.Lock()
// First, we need to see if we can happen to get a batch from the
// queue if it filled while acquiring the lock.
var batch []sampleOrExemplar var batch []sampleOrExemplar
select { select {
case batch = <-batchQueue: case batch = <-batchQueue:
default: default:
batch = queue.Batch() batch = queue.Batch()
} }
s.mtx.Unlock() s.writeMtx.Unlock()
if len(batch) > 0 { if len(batch) > 0 {
nPendingSamples, nPendingExemplars := s.populateTimeSeries(batch, pendingData) nPendingSamples, nPendingExemplars := s.populateTimeSeries(batch, pendingData)
n := nPendingSamples + nPendingExemplars n := nPendingSamples + nPendingExemplars
@ -1279,17 +1299,20 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
// without causing a memory leak, and it has the nice effect of not propagating any // without causing a memory leak, and it has the nice effect of not propagating any
// parameters for sendSamplesWithBackoff/3. // parameters for sendSamplesWithBackoff/3.
attemptStore := func(try int) error { attemptStore := func(try int) error {
span, ctx := opentracing.StartSpanFromContext(ctx, "Remote Send Batch") ctx, span := otel.Tracer("").Start(ctx, "Remote Send Batch")
defer span.Finish() defer span.End()
span.SetAttributes(
attribute.Int("request_size", reqSize),
attribute.Int("samples", sampleCount),
attribute.Int("try", try),
attribute.String("remote_name", s.qm.storeClient.Name()),
attribute.String("remote_url", s.qm.storeClient.Endpoint()),
)
span.SetTag("samples", sampleCount)
if exemplarCount > 0 { if exemplarCount > 0 {
span.SetTag("exemplars", exemplarCount) span.SetAttributes(attribute.Int("exemplars", exemplarCount))
} }
span.SetTag("request_size", reqSize)
span.SetTag("try", try)
span.SetTag("remote_name", s.qm.storeClient.Name())
span.SetTag("remote_url", s.qm.storeClient.Endpoint())
begin := time.Now() begin := time.Now()
s.qm.metrics.samplesTotal.Add(float64(sampleCount)) s.qm.metrics.samplesTotal.Add(float64(sampleCount))
@ -1298,8 +1321,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
if err != nil { if err != nil {
span.LogKV("error", err) span.RecordError(err)
ext.Error.Set(span, true)
return err return err
} }

View file

@ -382,6 +382,44 @@ func TestReshardRaceWithStop(t *testing.T) {
<-exitCh <-exitCh
} }
func TestReshardPartialBatch(t *testing.T) {
samples, series := createTimeseries(1, 10)
c := NewTestBlockedWriteClient()
cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig
cfg.MaxShards = 1
batchSendDeadline := time.Millisecond
flushDeadline := 10 * time.Millisecond
cfg.BatchSendDeadline = model.Duration(batchSendDeadline)
metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
m.StoreSeries(series, 0)
m.Start()
for i := 0; i < 100; i++ {
done := make(chan struct{})
go func() {
m.Append(samples)
time.Sleep(batchSendDeadline)
m.shards.stop()
m.shards.start(1)
done <- struct{}{}
}()
select {
case <-done:
case <-time.After(2 * time.Second):
t.Error("Deadlock between sending and stopping detected")
t.FailNow()
}
}
// We can only call stop if there was not a deadlock.
m.Stop()
}
func TestReleaseNoninternedString(t *testing.T) { func TestReleaseNoninternedString(t *testing.T) {
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
@ -913,11 +951,7 @@ func TestCalculateDesiredShardsDetail(t *testing.T) {
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
dir, err := ioutil.TempDir("", "TestCalculateDesiredShards") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration) samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)

View file

@ -118,9 +118,13 @@ func (s *Storage) ApplyConfig(conf *config.Config) error {
return err return err
} }
externalLabels := conf.GlobalConfig.ExternalLabels
if !rrConf.FilterExternalLabels {
externalLabels = make(labels.Labels, 0)
}
queryables = append(queryables, NewSampleAndChunkQueryableClient( queryables = append(queryables, NewSampleAndChunkQueryableClient(
c, c,
conf.GlobalConfig.ExternalLabels, externalLabels,
labelsToEqualityMatchers(rrConf.RequiredMatchers), labelsToEqualityMatchers(rrConf.RequiredMatchers),
rrConf.ReadRecent, rrConf.ReadRecent,
s.localStartTimeCallback, s.localStartTimeCallback,

View file

@ -21,6 +21,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/labels"
) )
func TestStorageLifecycle(t *testing.T) { func TestStorageLifecycle(t *testing.T) {
@ -80,3 +81,55 @@ func TestUpdateRemoteReadConfigs(t *testing.T) {
err := s.Close() err := s.Close()
require.NoError(t, err) require.NoError(t, err)
} }
func TestFilterExternalLabels(t *testing.T) {
dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{
ExternalLabels: labels.Labels{labels.Label{Name: "foo", Value: "bar"}},
},
}
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 0, len(s.queryables))
conf.RemoteReadConfigs = []*config.RemoteReadConfig{
&config.DefaultRemoteReadConfig,
}
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 1, len(s.queryables))
require.Equal(t, 1, len(s.queryables[0].(*sampleAndChunkQueryableClient).externalLabels))
err := s.Close()
require.NoError(t, err)
}
func TestIgnoreExternalLabels(t *testing.T) {
dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{
ExternalLabels: labels.Labels{labels.Label{Name: "foo", Value: "bar"}},
},
}
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 0, len(s.queryables))
conf.RemoteReadConfigs = []*config.RemoteReadConfig{
&config.DefaultRemoteReadConfig,
}
conf.RemoteReadConfigs[0].FilterExternalLabels = false
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 1, len(s.queryables))
require.Equal(t, 0, len(s.queryables[0].(*sampleAndChunkQueryableClient).externalLabels))
err := s.Close()
require.NoError(t, err)
}

View file

@ -21,13 +21,13 @@ import (
"math" "math"
"net" "net"
"net/url" "net/url"
"regexp"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
text_template "text/template" text_template "text/template"
"time" "time"
"github.com/grafana/regexp"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"

View file

@ -0,0 +1,40 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package template
import (
"math"
"testing"
)
// Some test cases rely upon architecture-specific behaviors with respect
// to numerical conversions. The logic remains the same across architectures,
// but outputs can vary, so the cases are only run on amd64.
// See https://github.com/prometheus/prometheus/issues/10185 for more details.
func TestTemplateExpansionAMD64(t *testing.T) {
testTemplateExpansion(t, []scenario{
{
// HumanizeDuration - MaxInt64.
text: "{{ humanizeDuration . }}",
input: math.MaxInt64,
output: "-106751991167300d -15h -30m -8s",
},
{
// HumanizeDuration - MaxUint64.
text: "{{ humanizeDuration . }}",
input: uint(math.MaxUint64),
output: "-106751991167300d -15h -30m -8s",
},
})
}

View file

@ -27,16 +27,7 @@ import (
) )
func TestTemplateExpansion(t *testing.T) { func TestTemplateExpansion(t *testing.T) {
scenarios := []struct { testTemplateExpansion(t, []scenario{
text string
output string
input interface{}
options []string
queryResult promql.Vector
shouldFail bool
html bool
errorMsg string
}{
{ {
// No template. // No template.
text: "plain text", text: "plain text",
@ -353,14 +344,14 @@ func TestTemplateExpansion(t *testing.T) {
{ {
// HumanizeDuration - int. // HumanizeDuration - int.
text: "{{ range . }}{{ humanizeDuration . }}:{{ end }}", text: "{{ range . }}{{ humanizeDuration . }}:{{ end }}",
input: []int{0, -1, 1, 1234567, math.MaxInt64}, input: []int{0, -1, 1, 1234567},
output: "0s:-1s:1s:14d 6h 56m 7s:-106751991167300d -15h -30m -8s:", output: "0s:-1s:1s:14d 6h 56m 7s:",
}, },
{ {
// HumanizeDuration - uint. // HumanizeDuration - uint.
text: "{{ range . }}{{ humanizeDuration . }}:{{ end }}", text: "{{ range . }}{{ humanizeDuration . }}:{{ end }}",
input: []uint{0, 1, 1234567, math.MaxUint64}, input: []uint{0, 1, 1234567},
output: "0s:1s:14d 6h 56m 7s:-106751991167300d -15h -30m -8s:", output: "0s:1s:14d 6h 56m 7s:",
}, },
{ {
// Humanize* Inf and NaN - float64. // Humanize* Inf and NaN - float64.
@ -489,8 +480,21 @@ func TestTemplateExpansion(t *testing.T) {
text: "{{ printf \"%0.2f\" (parseDuration \"1h2m10ms\") }}", text: "{{ printf \"%0.2f\" (parseDuration \"1h2m10ms\") }}",
output: "3720.01", output: "3720.01",
}, },
} })
}
type scenario struct {
text string
output string
input interface{}
options []string
queryResult promql.Vector
shouldFail bool
html bool
errorMsg string
}
func testTemplateExpansion(t *testing.T, scenarios []scenario) {
extURL, err := url.Parse("http://testhost:9090/path/prefix") extURL, err := url.Parse("http://testhost:9090/path/prefix")
if err != nil { if err != nil {
panic(err) panic(err)

22
tracing/testdata/ca.cer vendored Normal file
View file

@ -0,0 +1,22 @@
-----BEGIN CERTIFICATE-----
MIIDkTCCAnmgAwIBAgIJAJNsnimNN3tmMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV
BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0xNTA4
MDQxNDA5MjFaFw0yNTA4MDExNDA5MjFaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH
DAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxGzAZ
BgNVBAMMElByb21ldGhldXMgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP
ADCCAQoCggEBAOlSBU3yWpUELbhzizznR0hnAL7dbEHzfEtEc6N3PoSvMNcqrUVq
t4kjBRWzqkZ5uJVkzBPERKEBoOI9pWcrqtMTBkMzHJY2Ep7GHTab10e9KC2IFQT6
FKP/jCYixaIVx3azEfajRJooD8r79FGoagWUfHdHyCFWJb/iLt8z8+S91kelSRMS
yB9M1ypWomzBz1UFXZp1oiNO5o7/dgXW4MgLUfC2obJ9j5xqpc6GkhWMW4ZFwEr/
VLjuzxG9B8tLfQuhnXKGn1W8+WzZVWCWMD/sLfZfmjKaWlwcXzL51g8E+IEIBJqV
w51aMI6lDkcvAM7gLq1auLZMVXyKWSKw7XMCAwEAAaNQME4wHQYDVR0OBBYEFMz1
BZnlqxJp2HiJSjHK8IsLrWYbMB8GA1UdIwQYMBaAFMz1BZnlqxJp2HiJSjHK8IsL
rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAI2iA3w3TK5J15Pu
e4fPFB4jxQqsbUwuyXbCCv/jKLeFNCD4BjM181WZEYjPMumeTBVzU3aF45LWQIG1
0DJcrCL4mjMz9qgAoGqA7aDDXiJGbukMgYYsn7vrnVmrZH8T3E8ySlltr7+W578k
pJ5FxnbCroQwn0zLyVB3sFbS8E3vpBr3L8oy8PwPHhIScexcNVc3V6/m4vTZsXTH
U+vUm1XhDgpDcFMTg2QQiJbfpOYUkwIgnRDAT7t282t2KQWtnlqc3zwPQ1F/6Cpx
j19JeNsaF1DArkD7YlyKj/GhZLtHwFHG5cxznH0mLDJTW7bQvqqh2iQTeXmBk1lU
mM5lH/s=
-----END CERTIFICATE-----

210
tracing/tracing.go Normal file
View file

@ -0,0 +1,210 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tracing
import (
"context"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/pkg/errors"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/version"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc/credentials"
"github.com/prometheus/prometheus/config"
)
const serviceName = "prometheus"
// Manager is capable of building, (re)installing and shutting down
// the tracer provider.
type Manager struct {
logger log.Logger
done chan struct{}
config config.TracingConfig
shutdownFunc func() error
}
// NewManager creates a new tracing manager.
func NewManager(logger log.Logger) *Manager {
return &Manager{
logger: logger,
done: make(chan struct{}),
}
}
// Run starts the tracing manager. It registers the global text map propagator and error handler.
// It is blocking.
func (m *Manager) Run() {
otel.SetTextMapPropagator(propagation.TraceContext{})
otel.SetErrorHandler(otelErrHandler(func(err error) {
level.Error(m.logger).Log("msg", "OpenTelemetry handler returned an error", "err", err)
}))
<-m.done
}
// ApplyConfig takes care of refreshing the tracing configuration by shutting down
// the current tracer provider (if any is registered) and installing a new one.
func (m *Manager) ApplyConfig(cfg *config.Config) error {
// Update only if a config change is detected. If TLS configuration is
// set, we have to restart the manager to make sure that new TLS
// certificates are picked up.
var blankTLSConfig config_util.TLSConfig
if m.config == cfg.TracingConfig && m.config.TLSConfig == blankTLSConfig {
return nil
}
if m.shutdownFunc != nil {
if err := m.shutdownFunc(); err != nil {
return errors.Wrap(err, "failed to shut down the tracer provider")
}
}
// If no endpoint is set, assume tracing should be disabled.
if cfg.TracingConfig.Endpoint == "" {
m.config = cfg.TracingConfig
m.shutdownFunc = nil
otel.SetTracerProvider(trace.NewNoopTracerProvider())
level.Info(m.logger).Log("msg", "Tracing provider uninstalled.")
return nil
}
tp, shutdownFunc, err := buildTracerProvider(context.Background(), cfg.TracingConfig)
if err != nil {
return errors.Wrap(err, "failed to install a new tracer provider")
}
m.shutdownFunc = shutdownFunc
m.config = cfg.TracingConfig
otel.SetTracerProvider(tp)
level.Info(m.logger).Log("msg", "Successfully installed a new tracer provider.")
return nil
}
// Stop gracefully shuts down the tracer provider and stops the tracing manager.
func (m *Manager) Stop() {
defer close(m.done)
if m.shutdownFunc == nil {
return
}
if err := m.shutdownFunc(); err != nil {
level.Error(m.logger).Log("msg", "failed to shut down the tracer provider", "err", err)
}
level.Info(m.logger).Log("msg", "Tracing manager stopped")
}
type otelErrHandler func(err error)
func (o otelErrHandler) Handle(err error) {
o(err)
}
// buildTracerProvider return a new tracer provider ready for installation, together
// with a shutdown function.
func buildTracerProvider(ctx context.Context, tracingCfg config.TracingConfig) (trace.TracerProvider, func() error, error) {
client, err := getClient(tracingCfg)
if err != nil {
return nil, nil, err
}
exp, err := otlptrace.New(ctx, client)
if err != nil {
return nil, nil, err
}
// Create a resource describing the service and the runtime.
res, err := resource.New(
ctx,
resource.WithSchemaURL(semconv.SchemaURL),
resource.WithAttributes(
semconv.ServiceNameKey.String(serviceName),
semconv.ServiceVersionKey.String(version.Version),
),
resource.WithProcessRuntimeDescription(),
resource.WithTelemetrySDK(),
)
if err != nil {
return nil, nil, err
}
tp := tracesdk.NewTracerProvider(
tracesdk.WithBatcher(exp),
tracesdk.WithSampler(tracesdk.ParentBased(
tracesdk.TraceIDRatioBased(tracingCfg.SamplingFraction),
)),
tracesdk.WithResource(res),
)
return tp, func() error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
err := tp.Shutdown(ctx)
if err != nil {
return err
}
return nil
}, nil
}
// getClient returns an appropriate OTLP client (either gRPC or HTTP), based
// on the provided tracing configuration.
func getClient(tracingCfg config.TracingConfig) (otlptrace.Client, error) {
var client otlptrace.Client
switch tracingCfg.ClientType {
case config.TracingClientGRPC:
opts := []otlptracegrpc.Option{otlptracegrpc.WithEndpoint(tracingCfg.Endpoint)}
if tracingCfg.Insecure {
opts = append(opts, otlptracegrpc.WithInsecure())
}
tlsConf, err := config_util.NewTLSConfig(&tracingCfg.TLSConfig)
if err != nil {
return nil, err
}
opts = append(opts, otlptracegrpc.WithTLSCredentials(credentials.NewTLS(tlsConf)))
client = otlptracegrpc.NewClient(opts...)
case config.TracingClientHTTP:
opts := []otlptracehttp.Option{otlptracehttp.WithEndpoint(tracingCfg.Endpoint)}
if tracingCfg.Insecure {
opts = append(opts, otlptracehttp.WithInsecure())
}
tlsConf, err := config_util.NewTLSConfig(&tracingCfg.TLSConfig)
if err != nil {
return nil, err
}
opts = append(opts, otlptracehttp.WithTLSClientConfig(tlsConf))
client = otlptracehttp.NewClient(opts...)
}
return client, nil
}

127
tracing/tracing_test.go Normal file
View file

@ -0,0 +1,127 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tracing
import (
"testing"
"github.com/go-kit/log"
config_util "github.com/prometheus/common/config"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
"github.com/prometheus/prometheus/config"
)
func TestInstallingNewTracerProvider(t *testing.T) {
tpBefore := otel.GetTracerProvider()
m := NewManager(log.NewNopLogger())
cfg := config.Config{
TracingConfig: config.TracingConfig{
Endpoint: "localhost:1234",
ClientType: config.TracingClientGRPC,
},
}
require.NoError(t, m.ApplyConfig(&cfg))
require.NotEqual(t, tpBefore, otel.GetTracerProvider())
}
func TestReinstallingTracerProvider(t *testing.T) {
m := NewManager(log.NewNopLogger())
cfg := config.Config{
TracingConfig: config.TracingConfig{
Endpoint: "localhost:1234",
ClientType: config.TracingClientGRPC,
},
}
require.NoError(t, m.ApplyConfig(&cfg))
tpFirstConfig := otel.GetTracerProvider()
// Trying to apply the same config should not reinstall provider.
require.NoError(t, m.ApplyConfig(&cfg))
require.Equal(t, tpFirstConfig, otel.GetTracerProvider())
cfg2 := config.Config{
TracingConfig: config.TracingConfig{
Endpoint: "localhost:1234",
ClientType: config.TracingClientHTTP,
},
}
require.NoError(t, m.ApplyConfig(&cfg2))
require.NotEqual(t, tpFirstConfig, otel.GetTracerProvider())
}
func TestReinstallingTracerProviderWithTLS(t *testing.T) {
m := NewManager(log.NewNopLogger())
cfg := config.Config{
TracingConfig: config.TracingConfig{
Endpoint: "localhost:1234",
ClientType: config.TracingClientGRPC,
TLSConfig: config_util.TLSConfig{
CAFile: "testdata/ca.cer",
},
},
}
require.NoError(t, m.ApplyConfig(&cfg))
tpFirstConfig := otel.GetTracerProvider()
// Trying to apply the same config with TLS should reinstall provider.
require.NoError(t, m.ApplyConfig(&cfg))
require.NotEqual(t, tpFirstConfig, otel.GetTracerProvider())
}
func TestUninstallingTracerProvider(t *testing.T) {
m := NewManager(log.NewNopLogger())
cfg := config.Config{
TracingConfig: config.TracingConfig{
Endpoint: "localhost:1234",
ClientType: config.TracingClientGRPC,
},
}
require.NoError(t, m.ApplyConfig(&cfg))
require.NotEqual(t, trace.NewNoopTracerProvider(), otel.GetTracerProvider())
// Uninstall by passing empty config.
cfg2 := config.Config{
TracingConfig: config.TracingConfig{},
}
require.NoError(t, m.ApplyConfig(&cfg2))
// Make sure we get a no-op tracer provider after uninstallation.
require.Equal(t, trace.NewNoopTracerProvider(), otel.GetTracerProvider())
}
func TestTracerProviderShutdown(t *testing.T) {
m := NewManager(log.NewNopLogger())
cfg := config.Config{
TracingConfig: config.TracingConfig{
Endpoint: "localhost:1234",
ClientType: config.TracingClientGRPC,
},
}
require.NoError(t, m.ApplyConfig(&cfg))
m.Stop()
// Check if we closed the done channel.
_, ok := <-m.done
require.Equal(t, ok, false)
}

View file

@ -93,6 +93,8 @@ type stripeSeries struct {
series []map[chunks.HeadSeriesRef]*memSeries series []map[chunks.HeadSeriesRef]*memSeries
hashes []seriesHashmap hashes []seriesHashmap
locks []stripeLock locks []stripeLock
gcMut sync.Mutex
} }
type stripeLock struct { type stripeLock struct {
@ -120,8 +122,14 @@ func newStripeSeries(stripeSize int) *stripeSeries {
// GC garbage collects old series that have not received a sample after mint // GC garbage collects old series that have not received a sample after mint
// and will fully delete them. // and will fully delete them.
func (s *stripeSeries) GC(mint int64) map[chunks.HeadSeriesRef]struct{} { func (s *stripeSeries) GC(mint int64) map[chunks.HeadSeriesRef]struct{} {
deleted := map[chunks.HeadSeriesRef]struct{}{} // NOTE(rfratto): GC will grab two locks, one for the hash and the other for
// series. It's not valid for any other function to grab both locks,
// otherwise a deadlock might occur when running GC in parallel with
// appending.
s.gcMut.Lock()
defer s.gcMut.Unlock()
deleted := map[chunks.HeadSeriesRef]struct{}{}
for hashLock := 0; hashLock < s.size; hashLock++ { for hashLock := 0; hashLock < s.size; hashLock++ {
s.locks[hashLock].Lock() s.locks[hashLock].Lock()
@ -179,14 +187,17 @@ func (s *stripeSeries) Set(hash uint64, series *memSeries) {
hashLock = hash & uint64(s.size-1) hashLock = hash & uint64(s.size-1)
refLock = uint64(series.ref) & uint64(s.size-1) refLock = uint64(series.ref) & uint64(s.size-1)
) )
s.locks[hashLock].Lock()
defer s.locks[hashLock].Unlock()
if hashLock != refLock { // We can't hold both locks at once otherwise we might deadlock with a
s.locks[refLock].Lock() // simulatenous call to GC.
defer s.locks[refLock].Unlock() //
} // We update s.series first because GC expects anything in s.hashes to
// already exist in s.series.
s.hashes[hashLock].Set(hash, series) s.locks[refLock].Lock()
s.series[refLock][series.ref] = series s.series[refLock][series.ref] = series
s.locks[refLock].Unlock()
s.locks[hashLock].Lock()
s.hashes[hashLock].Set(hash, series)
s.locks[hashLock].Unlock()
} }

76
tsdb/agent/series_test.go Normal file
View file

@ -0,0 +1,76 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"fmt"
"math"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb/chunks"
)
func TestNoDeadlock(t *testing.T) {
const numWorkers = 1000
var (
wg sync.WaitGroup
started = make(chan struct{})
stripeSeries = newStripeSeries(3)
)
wg.Add(numWorkers)
for i := 0; i < numWorkers; i++ {
go func() {
defer wg.Done()
<-started
_ = stripeSeries.GC(math.MaxInt64)
}()
}
wg.Add(numWorkers)
for i := 0; i < numWorkers; i++ {
go func(i int) {
defer wg.Done()
<-started
series := &memSeries{
ref: chunks.HeadSeriesRef(i),
lset: labels.FromMap(map[string]string{
"id": fmt.Sprintf("%d", i),
}),
}
stripeSeries.Set(series.lset.Hash(), series)
}(i)
}
finished := make(chan struct{})
go func() {
wg.Wait()
close(finished)
}()
close(started)
select {
case <-finished:
return
case <-time.After(15 * time.Second):
require.FailNow(t, "deadlock detected")
}
}

View file

@ -19,7 +19,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"hash/crc32" "hash/crc32"
"io/ioutil"
"math/rand" "math/rand"
"os" "os"
"path/filepath" "path/filepath"
@ -42,13 +41,9 @@ import (
// to 2. We had a migration in place resetting it to 1 but we should move immediately to // to 2. We had a migration in place resetting it to 1 but we should move immediately to
// version 3 next time to avoid confusion and issues. // version 3 next time to avoid confusion and issues.
func TestBlockMetaMustNeverBeVersion2(t *testing.T) { func TestBlockMetaMustNeverBeVersion2(t *testing.T) {
dir, err := ioutil.TempDir("", "metaversion") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
_, err = writeMetaFile(log.NewNopLogger(), dir, &BlockMeta{}) _, err := writeMetaFile(log.NewNopLogger(), dir, &BlockMeta{})
require.NoError(t, err) require.NoError(t, err)
meta, _, err := readMetaFile(dir) meta, _, err := readMetaFile(dir)
@ -57,11 +52,7 @@ func TestBlockMetaMustNeverBeVersion2(t *testing.T) {
} }
func TestSetCompactionFailed(t *testing.T) { func TestSetCompactionFailed(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "test") tmpdir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(tmpdir))
}()
blockDir := createBlock(t, tmpdir, genSeries(1, 1, 0, 1)) blockDir := createBlock(t, tmpdir, genSeries(1, 1, 0, 1))
b, err := OpenBlock(nil, blockDir, nil) b, err := OpenBlock(nil, blockDir, nil)
@ -78,11 +69,7 @@ func TestSetCompactionFailed(t *testing.T) {
} }
func TestCreateBlock(t *testing.T) { func TestCreateBlock(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "test") tmpdir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(tmpdir))
}()
b, err := OpenBlock(nil, createBlock(t, tmpdir, genSeries(1, 1, 0, 10)), nil) b, err := OpenBlock(nil, createBlock(t, tmpdir, genSeries(1, 1, 0, 10)), nil)
if err == nil { if err == nil {
require.NoError(t, b.Close()) require.NoError(t, b.Close())
@ -173,11 +160,7 @@ func TestCorruptedChunk(t *testing.T) {
}, },
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "test_open_block_chunk_corrupted") tmpdir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(tmpdir))
}()
series := storage.NewListSeries(labels.FromStrings("a", "b"), []tsdbutil.Sample{sample{1, 1}}) series := storage.NewListSeries(labels.FromStrings("a", "b"), []tsdbutil.Sample{sample{1, 1}})
blockDir := createBlock(t, tmpdir, []storage.Series{series}) blockDir := createBlock(t, tmpdir, []storage.Series{series})
@ -215,11 +198,7 @@ func TestCorruptedChunk(t *testing.T) {
} }
func TestLabelValuesWithMatchers(t *testing.T) { func TestLabelValuesWithMatchers(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "test_block_label_values_with_matchers") tmpdir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(tmpdir))
}()
var seriesEntries []storage.Series var seriesEntries []storage.Series
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
@ -288,16 +267,13 @@ func TestLabelValuesWithMatchers(t *testing.T) {
// TestBlockSize ensures that the block size is calculated correctly. // TestBlockSize ensures that the block size is calculated correctly.
func TestBlockSize(t *testing.T) { func TestBlockSize(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "test_blockSize") tmpdir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(tmpdir))
}()
var ( var (
blockInit *Block blockInit *Block
expSizeInit int64 expSizeInit int64
blockDirInit string blockDirInit string
err error
) )
// Create a block and compare the reported size vs actual disk size. // Create a block and compare the reported size vs actual disk size.
@ -376,11 +352,7 @@ func TestReadIndexFormatV1(t *testing.T) {
} }
func BenchmarkLabelValuesWithMatchers(b *testing.B) { func BenchmarkLabelValuesWithMatchers(b *testing.B) {
tmpdir, err := ioutil.TempDir("", "bench_block_label_values_with_matchers") tmpdir := b.TempDir()
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(tmpdir))
}()
var seriesEntries []storage.Series var seriesEntries []storage.Series
metricCount := 1000000 metricCount := 1000000
@ -419,9 +391,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) {
} }
func TestLabelNamesWithMatchers(t *testing.T) { func TestLabelNamesWithMatchers(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "test_block_label_names_with_matchers") tmpdir := t.TempDir()
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, os.RemoveAll(tmpdir)) })
var seriesEntries []storage.Series var seriesEntries []storage.Series
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {

View file

@ -15,9 +15,7 @@ package tsdb
import ( import (
"context" "context"
"io/ioutil"
"math" "math"
"os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -30,9 +28,7 @@ import (
func TestBlockWriter(t *testing.T) { func TestBlockWriter(t *testing.T) {
ctx := context.Background() ctx := context.Background()
outputDir, err := ioutil.TempDir(os.TempDir(), "output") outputDir := t.TempDir()
require.NoError(t, err)
defer func() { require.NoError(t, os.RemoveAll(outputDir)) }()
w, err := NewBlockWriter(log.NewNopLogger(), outputDir, DefaultBlockDuration) w, err := NewBlockWriter(log.NewNopLogger(), outputDir, DefaultBlockDuration)
require.NoError(t, err) require.NoError(t, err)

View file

@ -34,7 +34,7 @@ type chunkWriteJob struct {
// chunkWriteQueue is a queue for writing chunks to disk in a non-blocking fashion. // chunkWriteQueue is a queue for writing chunks to disk in a non-blocking fashion.
// Chunks that shall be written get added to the queue, which is consumed asynchronously. // Chunks that shall be written get added to the queue, which is consumed asynchronously.
// Adding jobs to the job is non-blocking as long as the queue isn't full. // Adding jobs to the queue is non-blocking as long as the queue isn't full.
type chunkWriteQueue struct { type chunkWriteQueue struct {
jobs chan chunkWriteJob jobs chan chunkWriteJob

View file

@ -61,7 +61,7 @@ const (
CRCSize = 4 CRCSize = 4
// MaxHeadChunkMetaSize is the max size of an mmapped chunks minus the chunks data. // MaxHeadChunkMetaSize is the max size of an mmapped chunks minus the chunks data.
// Max because the uvarint size can be smaller. // Max because the uvarint size can be smaller.
MaxHeadChunkMetaSize = SeriesRefSize + 2*MintMaxtSize + ChunksFormatVersionSize + MaxChunkLengthFieldSize + CRCSize MaxHeadChunkMetaSize = SeriesRefSize + 2*MintMaxtSize + ChunkEncodingSize + MaxChunkLengthFieldSize + CRCSize
// MinWriteBufferSize is the minimum write buffer size allowed. // MinWriteBufferSize is the minimum write buffer size allowed.
MinWriteBufferSize = 64 * 1024 // 64KB. MinWriteBufferSize = 64 * 1024 // 64KB.
// MaxWriteBufferSize is the maximum write buffer size allowed. // MaxWriteBufferSize is the maximum write buffer size allowed.
@ -113,7 +113,7 @@ func (f *chunkPos) getNextChunkRef(chk chunkenc.Chunk) (chkRef ChunkDiskMapperRe
chkLen := uint64(len(chk.Bytes())) chkLen := uint64(len(chk.Bytes()))
bytesToWrite := f.bytesToWriteForChunk(chkLen) bytesToWrite := f.bytesToWriteForChunk(chkLen)
if f.shouldCutNewFile(chkLen) { if f.shouldCutNewFile(bytesToWrite) {
f.toNewFile() f.toNewFile()
f.cutFile = false f.cutFile = false
cutFile = true cutFile = true
@ -144,14 +144,14 @@ func (f *chunkPos) initSeq(seq uint64) {
} }
// shouldCutNewFile returns whether a new file should be cut based on the file size. // shouldCutNewFile returns whether a new file should be cut based on the file size.
// The read or write lock on chunkPos must be held when calling this. // Not thread safe, a lock must be held when calling this.
func (f *chunkPos) shouldCutNewFile(chunkSize uint64) bool { func (f *chunkPos) shouldCutNewFile(bytesToWrite uint64) bool {
if f.cutFile { if f.cutFile {
return true return true
} }
return f.offset == 0 || // First head chunk file. return f.offset == 0 || // First head chunk file.
f.offset+chunkSize+MaxHeadChunkMetaSize > MaxHeadChunkFileSize // Exceeds the max head chunk file size. f.offset+bytesToWrite > MaxHeadChunkFileSize // Exceeds the max head chunk file size.
} }
// bytesToWriteForChunk returns the number of bytes that will need to be written for the given chunk size, // bytesToWriteForChunk returns the number of bytes that will need to be written for the given chunk size,

View file

@ -450,12 +450,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
func createChunkDiskMapper(t *testing.T, dir string) *ChunkDiskMapper { func createChunkDiskMapper(t *testing.T, dir string) *ChunkDiskMapper {
if dir == "" { if dir == "" {
var err error dir = t.TempDir()
dir, err = ioutil.TempDir("", "data")
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, os.RemoveAll(dir))
})
} }
hrw, err := NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), DefaultWriteBufferSize, DefaultWriteQueueSize) hrw, err := NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), DefaultWriteBufferSize, DefaultWriteQueueSize)

View file

@ -16,7 +16,6 @@ package tsdb
import ( import (
"context" "context"
"fmt" "fmt"
"io/ioutil"
"math" "math"
"os" "os"
"path" "path"
@ -435,11 +434,7 @@ func TestCompactionFailWillCleanUpTempDir(t *testing.T) {
}, nil, nil) }, nil, nil)
require.NoError(t, err) require.NoError(t, err)
tmpdir, err := ioutil.TempDir("", "test") tmpdir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(tmpdir))
}()
require.Error(t, compactor.write(tmpdir, &BlockMeta{}, erringBReader{})) require.Error(t, compactor.write(tmpdir, &BlockMeta{}, erringBReader{}))
_, err = os.Stat(filepath.Join(tmpdir, BlockMeta{}.ULID.String()) + tmpForCreationBlockDirSuffix) _, err = os.Stat(filepath.Join(tmpdir, BlockMeta{}.ULID.String()) + tmpForCreationBlockDirSuffix)
@ -1049,11 +1044,7 @@ func BenchmarkCompaction(b *testing.B) {
for _, c := range cases { for _, c := range cases {
nBlocks := len(c.ranges) nBlocks := len(c.ranges)
b.Run(fmt.Sprintf("type=%s,blocks=%d,series=%d,samplesPerSeriesPerBlock=%d", c.compactionType, nBlocks, nSeries, c.ranges[0][1]-c.ranges[0][0]+1), func(b *testing.B) { b.Run(fmt.Sprintf("type=%s,blocks=%d,series=%d,samplesPerSeriesPerBlock=%d", c.compactionType, nBlocks, nSeries, c.ranges[0][1]-c.ranges[0][0]+1), func(b *testing.B) {
dir, err := ioutil.TempDir("", "bench_compaction") dir := b.TempDir()
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(dir))
}()
blockDirs := make([]string, 0, len(c.ranges)) blockDirs := make([]string, 0, len(c.ranges))
var blocks []*Block var blocks []*Block
for _, r := range c.ranges { for _, r := range c.ranges {
@ -1080,20 +1071,12 @@ func BenchmarkCompaction(b *testing.B) {
} }
func BenchmarkCompactionFromHead(b *testing.B) { func BenchmarkCompactionFromHead(b *testing.B) {
dir, err := ioutil.TempDir("", "bench_compaction_from_head") dir := b.TempDir()
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(dir))
}()
totalSeries := 100000 totalSeries := 100000
for labelNames := 1; labelNames < totalSeries; labelNames *= 10 { for labelNames := 1; labelNames < totalSeries; labelNames *= 10 {
labelValues := totalSeries / labelNames labelValues := totalSeries / labelNames
b.Run(fmt.Sprintf("labelnames=%d,labelvalues=%d", labelNames, labelValues), func(b *testing.B) { b.Run(fmt.Sprintf("labelnames=%d,labelvalues=%d", labelNames, labelValues), func(b *testing.B) {
chunkDir, err := ioutil.TempDir("", "chunk_dir") chunkDir := b.TempDir()
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(chunkDir))
}()
opts := DefaultHeadOptions() opts := DefaultHeadOptions()
opts.ChunkRange = 1000 opts.ChunkRange = 1000
opts.ChunkDirRoot = chunkDir opts.ChunkDirRoot = chunkDir
@ -1175,11 +1158,7 @@ func TestDisableAutoCompactions(t *testing.T) {
// TestCancelCompactions ensures that when the db is closed // TestCancelCompactions ensures that when the db is closed
// any running compaction is cancelled to unblock closing the db. // any running compaction is cancelled to unblock closing the db.
func TestCancelCompactions(t *testing.T) { func TestCancelCompactions(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "testCancelCompaction") tmpdir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(tmpdir))
}()
// Create some blocks to fall within the compaction range. // Create some blocks to fall within the compaction range.
createBlock(t, tmpdir, genSeries(1, 10000, 0, 1000)) createBlock(t, tmpdir, genSeries(1, 10000, 0, 1000))
@ -1188,7 +1167,7 @@ func TestCancelCompactions(t *testing.T) {
// Copy the db so we have an exact copy to compare compaction times. // Copy the db so we have an exact copy to compare compaction times.
tmpdirCopy := tmpdir + "Copy" tmpdirCopy := tmpdir + "Copy"
err = fileutil.CopyDirs(tmpdir, tmpdirCopy) err := fileutil.CopyDirs(tmpdir, tmpdirCopy)
require.NoError(t, err) require.NoError(t, err)
defer func() { defer func() {
require.NoError(t, os.RemoveAll(tmpdirCopy)) require.NoError(t, os.RemoveAll(tmpdirCopy))

View file

@ -64,8 +64,8 @@ func TestMain(m *testing.M) {
} }
func openTestDB(t testing.TB, opts *Options, rngs []int64) (db *DB) { func openTestDB(t testing.TB, opts *Options, rngs []int64) (db *DB) {
tmpdir, err := ioutil.TempDir("", "test") tmpdir := t.TempDir()
require.NoError(t, err) var err error
if len(rngs) == 0 { if len(rngs) == 0 {
db, err = Open(tmpdir, nil, nil, opts, nil) db, err = Open(tmpdir, nil, nil, opts, nil)
@ -76,9 +76,6 @@ func openTestDB(t testing.TB, opts *Options, rngs []int64) (db *DB) {
require.NoError(t, err) require.NoError(t, err)
// Do not Close() the test database by default as it will deadlock on test failures. // Do not Close() the test database by default as it will deadlock on test failures.
t.Cleanup(func() {
require.NoError(t, os.RemoveAll(tmpdir))
})
return db return db
} }
@ -573,17 +570,12 @@ func TestDB_Snapshot(t *testing.T) {
require.NoError(t, app.Commit()) require.NoError(t, app.Commit())
// create snapshot // create snapshot
snap, err := ioutil.TempDir("", "snap") snap := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(snap))
}()
require.NoError(t, db.Snapshot(snap, true)) require.NoError(t, db.Snapshot(snap, true))
require.NoError(t, db.Close()) require.NoError(t, db.Close())
// reopen DB from snapshot // reopen DB from snapshot
db, err = Open(snap, nil, nil, nil, nil) db, err := Open(snap, nil, nil, nil, nil)
require.NoError(t, err) require.NoError(t, err)
defer func() { require.NoError(t, db.Close()) }() defer func() { require.NoError(t, db.Close()) }()
@ -622,20 +614,16 @@ func TestDB_Snapshot_ChunksOutsideOfCompactedRange(t *testing.T) {
} }
require.NoError(t, app.Commit()) require.NoError(t, app.Commit())
snap, err := ioutil.TempDir("", "snap") snap := t.TempDir()
require.NoError(t, err)
// Hackingly introduce "race", by having lower max time then maxTime in last chunk. // Hackingly introduce "race", by having lower max time then maxTime in last chunk.
db.head.maxTime.Sub(10) db.head.maxTime.Sub(10)
defer func() {
require.NoError(t, os.RemoveAll(snap))
}()
require.NoError(t, db.Snapshot(snap, true)) require.NoError(t, db.Snapshot(snap, true))
require.NoError(t, db.Close()) require.NoError(t, db.Close())
// Reopen DB from snapshot. // Reopen DB from snapshot.
db, err = Open(snap, nil, nil, nil, nil) db, err := Open(snap, nil, nil, nil, nil)
require.NoError(t, err) require.NoError(t, err)
defer func() { require.NoError(t, db.Close()) }() defer func() { require.NoError(t, db.Close()) }()
@ -696,12 +684,8 @@ Outer:
} }
// create snapshot // create snapshot
snap, err := ioutil.TempDir("", "snap") snap := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(snap))
}()
require.NoError(t, db.Snapshot(snap, true)) require.NoError(t, db.Snapshot(snap, true))
// reopen DB from snapshot // reopen DB from snapshot
@ -1124,17 +1108,12 @@ func TestTombstoneClean(t *testing.T) {
// Delete the ranges. // Delete the ranges.
// Create snapshot. // Create snapshot.
snap, err := ioutil.TempDir("", "snap") snap := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(snap))
}()
require.NoError(t, db.Snapshot(snap, true)) require.NoError(t, db.Snapshot(snap, true))
require.NoError(t, db.Close()) require.NoError(t, db.Close())
// Reopen DB from snapshot. // Reopen DB from snapshot.
db, err = Open(snap, nil, nil, nil, nil) db, err := Open(snap, nil, nil, nil, nil)
require.NoError(t, err) require.NoError(t, err)
defer db.Close() defer db.Close()
@ -1213,17 +1192,12 @@ func TestTombstoneCleanResultEmptyBlock(t *testing.T) {
intervals := tombstones.Intervals{{Mint: 0, Maxt: numSamples}} intervals := tombstones.Intervals{{Mint: 0, Maxt: numSamples}}
// Create snapshot. // Create snapshot.
snap, err := ioutil.TempDir("", "snap") snap := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(snap))
}()
require.NoError(t, db.Snapshot(snap, true)) require.NoError(t, db.Snapshot(snap, true))
require.NoError(t, db.Close()) require.NoError(t, db.Close())
// Reopen DB from snapshot. // Reopen DB from snapshot.
db, err = Open(snap, nil, nil, nil, nil) db, err := Open(snap, nil, nil, nil, nil)
require.NoError(t, err) require.NoError(t, err)
defer db.Close() defer db.Close()
@ -1847,11 +1821,7 @@ func TestQuerierWithBoundaryChunks(t *testing.T) {
// - with blocks with WAL: same as above // - with blocks with WAL: same as above
func TestInitializeHeadTimestamp(t *testing.T) { func TestInitializeHeadTimestamp(t *testing.T) {
t.Run("clean", func(t *testing.T) { t.Run("clean", func(t *testing.T) {
dir, err := ioutil.TempDir("", "test_head_init") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
db, err := Open(dir, nil, nil, nil, nil) db, err := Open(dir, nil, nil, nil, nil)
require.NoError(t, err) require.NoError(t, err)
@ -1871,11 +1841,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
require.Equal(t, int64(1000), db.head.MaxTime()) require.Equal(t, int64(1000), db.head.MaxTime())
}) })
t.Run("wal-only", func(t *testing.T) { t.Run("wal-only", func(t *testing.T) {
dir, err := ioutil.TempDir("", "test_head_init") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777)) require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777))
w, err := wal.New(nil, nil, path.Join(dir, "wal"), false) w, err := wal.New(nil, nil, path.Join(dir, "wal"), false)
@ -1903,11 +1869,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
require.Equal(t, int64(15000), db.head.MaxTime()) require.Equal(t, int64(15000), db.head.MaxTime())
}) })
t.Run("existing-block", func(t *testing.T) { t.Run("existing-block", func(t *testing.T) {
dir, err := ioutil.TempDir("", "test_head_init") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
createBlock(t, dir, genSeries(1, 1, 1000, 2000)) createBlock(t, dir, genSeries(1, 1, 1000, 2000))
@ -1919,11 +1881,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
require.Equal(t, int64(2000), db.head.MaxTime()) require.Equal(t, int64(2000), db.head.MaxTime())
}) })
t.Run("existing-block-and-wal", func(t *testing.T) { t.Run("existing-block-and-wal", func(t *testing.T) {
dir, err := ioutil.TempDir("", "test_head_init") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
createBlock(t, dir, genSeries(1, 1, 1000, 6000)) createBlock(t, dir, genSeries(1, 1, 1000, 6000))
@ -2214,8 +2172,7 @@ func TestBlockRanges(t *testing.T) {
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
ctx := context.Background() ctx := context.Background()
dir, err := ioutil.TempDir("", "test_storage") dir := t.TempDir()
require.NoError(t, err)
// Test that the compactor doesn't create overlapping blocks // Test that the compactor doesn't create overlapping blocks
// when a non standard block already exists. // when a non standard block already exists.
@ -2225,9 +2182,7 @@ func TestBlockRanges(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
rangeToTriggerCompaction := db.compactor.(*LeveledCompactor).ranges[0]/2*3 + 1 rangeToTriggerCompaction := db.compactor.(*LeveledCompactor).ranges[0]/2*3 + 1
defer func() {
os.RemoveAll(dir)
}()
app := db.Appender(ctx) app := db.Appender(ctx)
lbl := labels.Labels{{Name: "a", Value: "b"}} lbl := labels.Labels{{Name: "a", Value: "b"}}
_, err = app.Append(0, lbl, firstBlockMaxT-1, rand.Float64()) _, err = app.Append(0, lbl, firstBlockMaxT-1, rand.Float64())
@ -2314,12 +2269,7 @@ func TestDBReadOnly(t *testing.T) {
// Bootstrap the db. // Bootstrap the db.
{ {
dbDir, err = ioutil.TempDir("", "test") dbDir = t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dbDir))
}()
dbBlocks := []*BlockMeta{ dbBlocks := []*BlockMeta{
// Create three 2-sample blocks. // Create three 2-sample blocks.
@ -2408,12 +2358,7 @@ func TestDBReadOnly(t *testing.T) {
// TestDBReadOnlyClosing ensures that after closing the db // TestDBReadOnlyClosing ensures that after closing the db
// all api methods return an ErrClosed. // all api methods return an ErrClosed.
func TestDBReadOnlyClosing(t *testing.T) { func TestDBReadOnlyClosing(t *testing.T) {
dbDir, err := ioutil.TempDir("", "test") dbDir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dbDir))
}()
db, err := OpenDBReadOnly(dbDir, log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))) db, err := OpenDBReadOnly(dbDir, log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)))
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, db.Close()) require.NoError(t, db.Close())
@ -2435,12 +2380,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
// Bootstrap the db. // Bootstrap the db.
{ {
dbDir, err = ioutil.TempDir("", "test") dbDir = t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dbDir))
}()
// Append data to the WAL. // Append data to the WAL.
db, err := Open(dbDir, logger, nil, nil, nil) db, err := Open(dbDir, logger, nil, nil, nil)
@ -2460,12 +2400,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
db, err := OpenDBReadOnly(dbDir, logger) db, err := OpenDBReadOnly(dbDir, logger)
require.NoError(t, err) require.NoError(t, err)
flush, err := ioutil.TempDir("", "flush") flush := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(flush))
}()
require.NoError(t, db.FlushWAL(flush)) require.NoError(t, db.FlushWAL(flush))
require.NoError(t, db.Close()) require.NoError(t, db.Close())
@ -2503,10 +2438,7 @@ func TestDBCannotSeePartialCommits(t *testing.T) {
t.Skip("skipping test since tsdb isolation is disabled") t.Skip("skipping test since tsdb isolation is disabled")
} }
tmpdir, _ := ioutil.TempDir("", "test") tmpdir := t.TempDir()
defer func() {
require.NoError(t, os.RemoveAll(tmpdir))
}()
db, err := Open(tmpdir, nil, nil, nil, nil) db, err := Open(tmpdir, nil, nil, nil, nil)
require.NoError(t, err) require.NoError(t, err)
@ -2577,10 +2509,7 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) {
t.Skip("skipping test since tsdb isolation is disabled") t.Skip("skipping test since tsdb isolation is disabled")
} }
tmpdir, _ := ioutil.TempDir("", "test") tmpdir := t.TempDir()
defer func() {
require.NoError(t, os.RemoveAll(tmpdir))
}()
db, err := Open(tmpdir, nil, nil, nil, nil) db, err := Open(tmpdir, nil, nil, nil, nil)
require.NoError(t, err) require.NoError(t, err)
@ -2767,9 +2696,7 @@ func TestChunkWriter_ReadAfterWrite(t *testing.T) {
for i, test := range tests { for i, test := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
tempDir, err := ioutil.TempDir("", "test_chunk_writer") tempDir := t.TempDir()
require.NoError(t, err)
defer func() { require.NoError(t, os.RemoveAll(tempDir)) }()
chunkw, err := chunks.NewWriterWithSegSize(tempDir, chunks.SegmentHeaderSize+int64(test.segmentSize)) chunkw, err := chunks.NewWriterWithSegSize(tempDir, chunks.SegmentHeaderSize+int64(test.segmentSize))
require.NoError(t, err) require.NoError(t, err)
@ -2856,9 +2783,7 @@ func TestChunkReader_ConcurrentReads(t *testing.T) {
tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 5}}), tsdbutil.ChunkFromSamples([]tsdbutil.Sample{sample{1, 5}}),
} }
tempDir, err := ioutil.TempDir("", "test_chunk_writer") tempDir := t.TempDir()
require.NoError(t, err)
defer func() { require.NoError(t, os.RemoveAll(tempDir)) }()
chunkw, err := chunks.NewWriter(tempDir) chunkw, err := chunks.NewWriter(tempDir)
require.NoError(t, err) require.NoError(t, err)
@ -2895,9 +2820,7 @@ func TestChunkReader_ConcurrentReads(t *testing.T) {
// * compacts the head; and // * compacts the head; and
// * queries the db to ensure the samples are present from the compacted head. // * queries the db to ensure the samples are present from the compacted head.
func TestCompactHead(t *testing.T) { func TestCompactHead(t *testing.T) {
dbDir, err := ioutil.TempDir("", "testFlush") dbDir := t.TempDir()
require.NoError(t, err)
defer func() { require.NoError(t, os.RemoveAll(dbDir)) }()
// Open a DB and append data to the WAL. // Open a DB and append data to the WAL.
tsdbCfg := &Options{ tsdbCfg := &Options{
@ -2978,11 +2901,7 @@ func deleteNonBlocks(dbDir string) error {
} }
func TestOpen_VariousBlockStates(t *testing.T) { func TestOpen_VariousBlockStates(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "test") tmpDir := t.TempDir()
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, os.RemoveAll(tmpDir))
})
var ( var (
expectedLoadedDirs = map[string]struct{}{} expectedLoadedDirs = map[string]struct{}{}
@ -3097,11 +3016,7 @@ func TestOneCheckpointPerCompactCall(t *testing.T) {
MaxBlockDuration: blockRange, MaxBlockDuration: blockRange,
} }
tmpDir, err := ioutil.TempDir("", "test") tmpDir := t.TempDir()
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, os.RemoveAll(tmpDir))
})
db, err := Open(tmpDir, log.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil) db, err := Open(tmpDir, log.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil)
require.NoError(t, err) require.NoError(t, err)
@ -3210,11 +3125,7 @@ func TestOneCheckpointPerCompactCall(t *testing.T) {
} }
func TestNoPanicOnTSDBOpenError(t *testing.T) { func TestNoPanicOnTSDBOpenError(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "test") tmpdir := t.TempDir()
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, os.RemoveAll(tmpdir))
})
// Taking the lock will cause a TSDB startup error. // Taking the lock will cause a TSDB startup error.
l, err := tsdbutil.NewDirLocker(tmpdir, "tsdb", log.NewNopLogger(), nil) l, err := tsdbutil.NewDirLocker(tmpdir, "tsdb", log.NewNopLogger(), nil)
@ -3499,11 +3410,7 @@ func testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChun
} }
func newTestDB(t *testing.T) *DB { func newTestDB(t *testing.T) *DB {
dir, err := ioutil.TempDir("", "test") dir := t.TempDir()
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, os.RemoveAll(dir))
})
db, err := Open(dir, nil, nil, DefaultOptions(), nil) db, err := Open(dir, nil, nil, DefaultOptions(), nil)
require.NoError(t, err) require.NoError(t, err)

View file

@ -135,10 +135,18 @@ func (h *Head) appender() *headAppender {
} }
} }
// appendableMinValidTime returns the minimum valid timestamp for appends,
// such that samples stay ahead of prior blocks and the head compaction window.
func (h *Head) appendableMinValidTime() int64 { func (h *Head) appendableMinValidTime() int64 {
// Setting the minimum valid time to whichever is greater, the head min valid time or the compaction window, // This boundary ensures that no samples will be added to the compaction window.
// ensures that no samples will be added within the compaction window to avoid races. // This allows race-free, concurrent appending and compaction.
return max(h.minValidTime.Load(), h.MaxTime()-h.chunkRange.Load()/2) cwEnd := h.MaxTime() - h.chunkRange.Load()/2
// This boundary ensures that we avoid overlapping timeframes from one block to the next.
// While not necessary for correctness, it means we're not required to use vertical compaction.
minValid := h.minValidTime.Load()
return max(cwEnd, minValid)
} }
// AppendableMinValidTime returns the minimum valid time for samples to be appended to the Head. // AppendableMinValidTime returns the minimum valid time for samples to be appended to the Head.

View file

@ -14,8 +14,6 @@
package tsdb package tsdb
import ( import (
"io/ioutil"
"os"
"strconv" "strconv"
"testing" "testing"
@ -27,11 +25,7 @@ import (
) )
func BenchmarkHeadStripeSeriesCreate(b *testing.B) { func BenchmarkHeadStripeSeriesCreate(b *testing.B) {
chunkDir, err := ioutil.TempDir("", "chunk_dir") chunkDir := b.TempDir()
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(chunkDir))
}()
// Put a series, select it. GC it and then access it. // Put a series, select it. GC it and then access it.
opts := DefaultHeadOptions() opts := DefaultHeadOptions()
opts.ChunkRange = 1000 opts.ChunkRange = 1000
@ -46,11 +40,7 @@ func BenchmarkHeadStripeSeriesCreate(b *testing.B) {
} }
func BenchmarkHeadStripeSeriesCreateParallel(b *testing.B) { func BenchmarkHeadStripeSeriesCreateParallel(b *testing.B) {
chunkDir, err := ioutil.TempDir("", "chunk_dir") chunkDir := b.TempDir()
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(chunkDir))
}()
// Put a series, select it. GC it and then access it. // Put a series, select it. GC it and then access it.
opts := DefaultHeadOptions() opts := DefaultHeadOptions()
opts.ChunkRange = 1000 opts.ChunkRange = 1000
@ -70,11 +60,7 @@ func BenchmarkHeadStripeSeriesCreateParallel(b *testing.B) {
} }
func BenchmarkHeadStripeSeriesCreate_PreCreationFailure(b *testing.B) { func BenchmarkHeadStripeSeriesCreate_PreCreationFailure(b *testing.B) {
chunkDir, err := ioutil.TempDir("", "chunk_dir") chunkDir := b.TempDir()
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(chunkDir))
}()
// Put a series, select it. GC it and then access it. // Put a series, select it. GC it and then access it.
opts := DefaultHeadOptions() opts := DefaultHeadOptions()
opts.ChunkRange = 1000 opts.ChunkRange = 1000

View file

@ -51,8 +51,7 @@ import (
) )
func newTestHead(t testing.TB, chunkRange int64, compressWAL bool) (*Head, *wal.WAL) { func newTestHead(t testing.TB, chunkRange int64, compressWAL bool) (*Head, *wal.WAL) {
dir, err := ioutil.TempDir("", "test") dir := t.TempDir()
require.NoError(t, err)
wlog, err := wal.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, compressWAL) wlog, err := wal.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, compressWAL)
require.NoError(t, err) require.NoError(t, err)
@ -67,9 +66,6 @@ func newTestHead(t testing.TB, chunkRange int64, compressWAL bool) (*Head, *wal.
require.NoError(t, h.chunkDiskMapper.IterateAllChunks(func(_ chunks.HeadSeriesRef, _ chunks.ChunkDiskMapperRef, _, _ int64, _ uint16) error { return nil })) require.NoError(t, h.chunkDiskMapper.IterateAllChunks(func(_ chunks.HeadSeriesRef, _ chunks.ChunkDiskMapperRef, _, _ int64, _ uint16) error { return nil }))
t.Cleanup(func() {
require.NoError(t, os.RemoveAll(dir))
})
return h, wlog return h, wlog
} }
@ -183,11 +179,7 @@ func BenchmarkLoadWAL(b *testing.B) {
// fmt.Println("exemplars per series: ", exemplarsPerSeries) // fmt.Println("exemplars per series: ", exemplarsPerSeries)
b.Run(fmt.Sprintf("batches=%d,seriesPerBatch=%d,samplesPerSeries=%d,exemplarsPerSeries=%d,mmappedChunkT=%d", c.batches, c.seriesPerBatch, c.samplesPerSeries, exemplarsPerSeries, c.mmappedChunkT), b.Run(fmt.Sprintf("batches=%d,seriesPerBatch=%d,samplesPerSeries=%d,exemplarsPerSeries=%d,mmappedChunkT=%d", c.batches, c.seriesPerBatch, c.samplesPerSeries, exemplarsPerSeries, c.mmappedChunkT),
func(b *testing.B) { func(b *testing.B) {
dir, err := ioutil.TempDir("", "test_load_wal") dir := b.TempDir()
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(dir))
}()
w, err := wal.New(nil, nil, dir, false) w, err := wal.New(nil, nil, dir, false)
require.NoError(b, err) require.NoError(b, err)
@ -724,11 +716,7 @@ func TestHead_Truncate(t *testing.T) {
// Validate various behaviors brought on by firstChunkID accounting for // Validate various behaviors brought on by firstChunkID accounting for
// garbage collected chunks. // garbage collected chunks.
func TestMemSeries_truncateChunks(t *testing.T) { func TestMemSeries_truncateChunks(t *testing.T) {
dir, err := ioutil.TempDir("", "truncate_chunks") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
// This is usually taken from the Head, but passing manually here. // This is usually taken from the Head, but passing manually here.
chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize) chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize)
require.NoError(t, err) require.NoError(t, err)
@ -1268,11 +1256,7 @@ func TestComputeChunkEndTime(t *testing.T) {
} }
func TestMemSeries_append(t *testing.T) { func TestMemSeries_append(t *testing.T) {
dir, err := ioutil.TempDir("", "append") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
// This is usually taken from the Head, but passing manually here. // This is usually taken from the Head, but passing manually here.
chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize) chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize)
require.NoError(t, err) require.NoError(t, err)
@ -1556,11 +1540,7 @@ func TestWalRepair_DecodingError(t *testing.T) {
} { } {
for _, compress := range []bool{false, true} { for _, compress := range []bool{false, true} {
t.Run(fmt.Sprintf("%s,compress=%t", name, compress), func(t *testing.T) { t.Run(fmt.Sprintf("%s,compress=%t", name, compress), func(t *testing.T) {
dir, err := ioutil.TempDir("", "wal_repair") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
// Fill the wal and corrupt it. // Fill the wal and corrupt it.
{ {
@ -1620,11 +1600,7 @@ func TestWalRepair_DecodingError(t *testing.T) {
} }
func TestHeadReadWriterRepair(t *testing.T) { func TestHeadReadWriterRepair(t *testing.T) {
dir, err := ioutil.TempDir("", "head_read_writer_repair") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
const chunkRange = 1000 const chunkRange = 1000
@ -2030,11 +2006,7 @@ func TestIsolationWithoutAdd(t *testing.T) {
} }
func TestOutOfOrderSamplesMetric(t *testing.T) { func TestOutOfOrderSamplesMetric(t *testing.T) {
dir, err := ioutil.TempDir("", "test") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
db, err := Open(dir, nil, nil, DefaultOptions(), nil) db, err := Open(dir, nil, nil, DefaultOptions(), nil)
require.NoError(t, err) require.NoError(t, err)
@ -2456,11 +2428,7 @@ func BenchmarkHeadLabelValuesWithMatchers(b *testing.B) {
} }
func TestMemSafeIteratorSeekIntoBuffer(t *testing.T) { func TestMemSafeIteratorSeekIntoBuffer(t *testing.T) {
dir, err := ioutil.TempDir("", "iterator_seek") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
// This is usually taken from the Head, but passing manually here. // This is usually taken from the Head, but passing manually here.
chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize) chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize)
require.NoError(t, err) require.NoError(t, err)

View file

@ -137,11 +137,7 @@ func (m mockIndex) Series(ref storage.SeriesRef, lset *labels.Labels, chks *[]ch
} }
func TestIndexRW_Create_Open(t *testing.T) { func TestIndexRW_Create_Open(t *testing.T) {
dir, err := ioutil.TempDir("", "test_index_create") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
fn := filepath.Join(dir, indexFilename) fn := filepath.Join(dir, indexFilename)
@ -166,11 +162,7 @@ func TestIndexRW_Create_Open(t *testing.T) {
} }
func TestIndexRW_Postings(t *testing.T) { func TestIndexRW_Postings(t *testing.T) {
dir, err := ioutil.TempDir("", "test_index_postings") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
fn := filepath.Join(dir, indexFilename) fn := filepath.Join(dir, indexFilename)
@ -250,11 +242,7 @@ func TestIndexRW_Postings(t *testing.T) {
} }
func TestPostingsMany(t *testing.T) { func TestPostingsMany(t *testing.T) {
dir, err := ioutil.TempDir("", "test_postings_many") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
fn := filepath.Join(dir, indexFilename) fn := filepath.Join(dir, indexFilename)
@ -344,11 +332,7 @@ func TestPostingsMany(t *testing.T) {
} }
func TestPersistence_index_e2e(t *testing.T) { func TestPersistence_index_e2e(t *testing.T) {
dir, err := ioutil.TempDir("", "test_persistence_e2e") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
lbls, err := labels.ReadLabels(filepath.Join("..", "testdata", "20kseries.json"), 20000) lbls, err := labels.ReadLabels(filepath.Join("..", "testdata", "20kseries.json"), 20000)
require.NoError(t, err) require.NoError(t, err)

View file

@ -16,8 +16,6 @@ package tsdb
import ( import (
"context" "context"
"fmt" "fmt"
"io/ioutil"
"os"
"strconv" "strconv"
"testing" "testing"
@ -32,11 +30,7 @@ const (
) )
func BenchmarkQuerier(b *testing.B) { func BenchmarkQuerier(b *testing.B) {
chunkDir, err := ioutil.TempDir("", "chunk_dir") chunkDir := b.TempDir()
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(chunkDir))
}()
opts := DefaultHeadOptions() opts := DefaultHeadOptions()
opts.ChunkRange = 1000 opts.ChunkRange = 1000
opts.ChunkDirRoot = chunkDir opts.ChunkDirRoot = chunkDir
@ -74,11 +68,7 @@ func BenchmarkQuerier(b *testing.B) {
}) })
}) })
tmpdir, err := ioutil.TempDir("", "test_benchpostingsformatchers") tmpdir := b.TempDir()
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(tmpdir))
}()
blockdir := createBlockFromHead(b, tmpdir, h) blockdir := createBlockFromHead(b, tmpdir, h)
block, err := OpenBlock(nil, blockdir, nil) block, err := OpenBlock(nil, blockdir, nil)
@ -186,11 +176,7 @@ func benchmarkLabelValuesWithMatchers(b *testing.B, ir IndexReader) {
} }
func BenchmarkQuerierSelect(b *testing.B) { func BenchmarkQuerierSelect(b *testing.B) {
chunkDir, err := ioutil.TempDir("", "chunk_dir") chunkDir := b.TempDir()
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(chunkDir))
}()
opts := DefaultHeadOptions() opts := DefaultHeadOptions()
opts.ChunkRange = 1000 opts.ChunkRange = 1000
opts.ChunkDirRoot = chunkDir opts.ChunkDirRoot = chunkDir
@ -230,11 +216,7 @@ func BenchmarkQuerierSelect(b *testing.B) {
bench(b, h, true) bench(b, h, true)
}) })
tmpdir, err := ioutil.TempDir("", "test_benchquerierselect") tmpdir := b.TempDir()
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(tmpdir))
}()
blockdir := createBlockFromHead(b, tmpdir, h) blockdir := createBlockFromHead(b, tmpdir, h)
block, err := OpenBlock(nil, blockdir, nil) block, err := OpenBlock(nil, blockdir, nil)

View file

@ -16,10 +16,8 @@ package tsdb
import ( import (
"context" "context"
"fmt" "fmt"
"io/ioutil"
"math" "math"
"math/rand" "math/rand"
"os"
"path/filepath" "path/filepath"
"sort" "sort"
"strconv" "strconv"
@ -1329,11 +1327,7 @@ func BenchmarkQueryIterator(b *testing.B) {
c.numBlocks, c.numSeries, c.numSamplesPerSeriesPerBlock, overlapPercentage) c.numBlocks, c.numSeries, c.numSamplesPerSeriesPerBlock, overlapPercentage)
b.Run(benchMsg, func(b *testing.B) { b.Run(benchMsg, func(b *testing.B) {
dir, err := ioutil.TempDir("", "bench_query_iterator") dir := b.TempDir()
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(dir))
}()
var ( var (
blocks []*Block blocks []*Block
@ -1396,11 +1390,7 @@ func BenchmarkQuerySeek(b *testing.B) {
c.numBlocks, c.numSeries, c.numSamplesPerSeriesPerBlock, overlapPercentage) c.numBlocks, c.numSeries, c.numSamplesPerSeriesPerBlock, overlapPercentage)
b.Run(benchMsg, func(b *testing.B) { b.Run(benchMsg, func(b *testing.B) {
dir, err := ioutil.TempDir("", "bench_query_iterator") dir := b.TempDir()
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(dir))
}()
var ( var (
blocks []*Block blocks []*Block
@ -1451,7 +1441,6 @@ func BenchmarkQuerySeek(b *testing.B) {
require.NoError(b, it.Err()) require.NoError(b, it.Err())
} }
require.NoError(b, ss.Err()) require.NoError(b, ss.Err())
require.NoError(b, err)
require.Equal(b, 0, len(ss.Warnings())) require.Equal(b, 0, len(ss.Warnings()))
}) })
} }
@ -1537,11 +1526,7 @@ func BenchmarkSetMatcher(b *testing.B) {
} }
for _, c := range cases { for _, c := range cases {
dir, err := ioutil.TempDir("", "bench_postings_for_matchers") dir := b.TempDir()
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(dir))
}()
var ( var (
blocks []*Block blocks []*Block
@ -1654,11 +1639,7 @@ func TestFindSetMatches(t *testing.T) {
} }
func TestPostingsForMatchers(t *testing.T) { func TestPostingsForMatchers(t *testing.T) {
chunkDir, err := ioutil.TempDir("", "chunk_dir") chunkDir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(chunkDir))
}()
opts := DefaultHeadOptions() opts := DefaultHeadOptions()
opts.ChunkRange = 1000 opts.ChunkRange = 1000
opts.ChunkDirRoot = chunkDir opts.ChunkDirRoot = chunkDir
@ -1915,13 +1896,7 @@ func TestPostingsForMatchers(t *testing.T) {
// TestClose ensures that calling Close more than once doesn't block and doesn't panic. // TestClose ensures that calling Close more than once doesn't block and doesn't panic.
func TestClose(t *testing.T) { func TestClose(t *testing.T) {
dir, err := ioutil.TempDir("", "test_storage") dir := t.TempDir()
if err != nil {
t.Fatalf("Opening test dir failed: %s", err)
}
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
createBlock(t, dir, genSeries(1, 1, 0, 10)) createBlock(t, dir, genSeries(1, 1, 0, 10))
createBlock(t, dir, genSeries(1, 1, 10, 20)) createBlock(t, dir, genSeries(1, 1, 10, 20))
@ -1982,11 +1957,7 @@ func BenchmarkQueries(b *testing.B) {
for title, selectors := range cases { for title, selectors := range cases {
for _, nSeries := range []int{10} { for _, nSeries := range []int{10} {
for _, nSamples := range []int64{1000, 10000, 100000} { for _, nSamples := range []int64{1000, 10000, 100000} {
dir, err := ioutil.TempDir("", "test_persisted_query") dir := b.TempDir()
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(dir))
}()
series := genSeries(nSeries, 5, 1, nSamples) series := genSeries(nSeries, 5, 1, nSamples)
@ -2024,11 +1995,7 @@ func BenchmarkQueries(b *testing.B) {
queryTypes["_3-Blocks"] = storage.NewMergeQuerier(qs[0:3], nil, storage.ChainedSeriesMerge) queryTypes["_3-Blocks"] = storage.NewMergeQuerier(qs[0:3], nil, storage.ChainedSeriesMerge)
queryTypes["_10-Blocks"] = storage.NewMergeQuerier(qs, nil, storage.ChainedSeriesMerge) queryTypes["_10-Blocks"] = storage.NewMergeQuerier(qs, nil, storage.ChainedSeriesMerge)
chunkDir, err := ioutil.TempDir("", "chunk_dir") chunkDir := b.TempDir()
require.NoError(b, err)
defer func() {
require.NoError(b, os.RemoveAll(chunkDir))
}()
head := createHead(b, nil, series, chunkDir) head := createHead(b, nil, series, chunkDir)
qHead, err := NewBlockQuerier(head, 1, nSamples) qHead, err := NewBlockQuerier(head, 1, nSamples)
require.NoError(b, err) require.NoError(b, err)

View file

@ -14,7 +14,6 @@
package tsdb package tsdb
import ( import (
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -28,11 +27,7 @@ import (
) )
func TestRepairBadIndexVersion(t *testing.T) { func TestRepairBadIndexVersion(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "test") tmpDir := t.TempDir()
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, os.RemoveAll(tmpDir))
})
// The broken index used in this test was written by the following script // The broken index used in this test was written by the following script
// at a broken revision. // at a broken revision.
@ -74,7 +69,7 @@ func TestRepairBadIndexVersion(t *testing.T) {
// Check the current db. // Check the current db.
// In its current state, lookups should fail with the fixed code. // In its current state, lookups should fail with the fixed code.
_, _, err = readMetaFile(tmpDbDir) _, _, err := readMetaFile(tmpDbDir)
require.Error(t, err) require.Error(t, err)
// Touch chunks dir in block to imitate them. // Touch chunks dir in block to imitate them.

View file

@ -14,10 +14,8 @@
package tombstones package tombstones
import ( import (
"io/ioutil"
"math" "math"
"math/rand" "math/rand"
"os"
"sync" "sync"
"testing" "testing"
"time" "time"
@ -34,10 +32,7 @@ func TestMain(m *testing.M) {
} }
func TestWriteAndReadbackTombstones(t *testing.T) { func TestWriteAndReadbackTombstones(t *testing.T) {
tmpdir, _ := ioutil.TempDir("", "test") tmpdir := t.TempDir()
defer func() {
require.NoError(t, os.RemoveAll(tmpdir))
}()
ref := uint64(0) ref := uint64(0)

View file

@ -45,9 +45,14 @@ const (
// from the WAL on to somewhere else. Functions will be called concurrently // from the WAL on to somewhere else. Functions will be called concurrently
// and it is left to the implementer to make sure they are safe. // and it is left to the implementer to make sure they are safe.
type WriteTo interface { type WriteTo interface {
// Append and AppendExemplar should block until the samples are fully accepted,
// whether enqueued in memory or successfully written to it's final destination.
// Once returned, the WAL Watcher will not attempt to pass that data again.
Append([]record.RefSample) bool Append([]record.RefSample) bool
AppendExemplars([]record.RefExemplar) bool AppendExemplars([]record.RefExemplar) bool
StoreSeries([]record.RefSeries, int) StoreSeries([]record.RefSeries, int)
// Next two methods are intended for garbage-collection: first we call // Next two methods are intended for garbage-collection: first we call
// UpdateSeriesSegment on all current series // UpdateSeriesSegment on all current series
UpdateSeriesSegment([]record.RefSeries, int) UpdateSeriesSegment([]record.RefSeries, int)
@ -511,7 +516,6 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
} }
} }
if len(send) > 0 { if len(send) > 0 {
// Blocks until the sample is sent to all remote write endpoints or closed (because enqueue blocks).
w.writer.Append(send) w.writer.Append(send)
send = send[:0] send = send[:0]
} }

View file

@ -39,11 +39,7 @@ import (
) )
func TestSegmentWAL_cut(t *testing.T) { func TestSegmentWAL_cut(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "test_wal_cut") tmpdir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(tmpdir))
}()
// This calls cut() implicitly the first time without a previous tail. // This calls cut() implicitly the first time without a previous tail.
w, err := OpenSegmentWAL(tmpdir, nil, 0, nil) w, err := OpenSegmentWAL(tmpdir, nil, 0, nil)
@ -89,11 +85,7 @@ func TestSegmentWAL_Truncate(t *testing.T) {
series, err := labels.ReadLabels(filepath.Join("testdata", "20kseries.json"), numMetrics) series, err := labels.ReadLabels(filepath.Join("testdata", "20kseries.json"), numMetrics)
require.NoError(t, err) require.NoError(t, err)
dir, err := ioutil.TempDir("", "test_wal_log_truncate") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
w, err := OpenSegmentWAL(dir, nil, 0, nil) w, err := OpenSegmentWAL(dir, nil, 0, nil)
require.NoError(t, err) require.NoError(t, err)
@ -172,11 +164,7 @@ func TestSegmentWAL_Log_Restore(t *testing.T) {
series, err := labels.ReadLabels(filepath.Join("testdata", "20kseries.json"), numMetrics) series, err := labels.ReadLabels(filepath.Join("testdata", "20kseries.json"), numMetrics)
require.NoError(t, err) require.NoError(t, err)
dir, err := ioutil.TempDir("", "test_wal_log_restore") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
var ( var (
recordedSeries [][]record.RefSeries recordedSeries [][]record.RefSeries
@ -281,11 +269,7 @@ func TestSegmentWAL_Log_Restore(t *testing.T) {
} }
func TestWALRestoreCorrupted_invalidSegment(t *testing.T) { func TestWALRestoreCorrupted_invalidSegment(t *testing.T) {
dir, err := ioutil.TempDir("", "test_wal_log_restore") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
wal, err := OpenSegmentWAL(dir, nil, 0, nil) wal, err := OpenSegmentWAL(dir, nil, 0, nil)
require.NoError(t, err) require.NoError(t, err)
@ -386,11 +370,7 @@ func TestWALRestoreCorrupted(t *testing.T) {
t.Run(c.name, func(t *testing.T) { t.Run(c.name, func(t *testing.T) {
// Generate testing data. It does not make semantic sense but // Generate testing data. It does not make semantic sense but
// for the purpose of this test. // for the purpose of this test.
dir, err := ioutil.TempDir("", "test_corrupted") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
w, err := OpenSegmentWAL(dir, nil, 0, nil) w, err := OpenSegmentWAL(dir, nil, 0, nil)
require.NoError(t, err) require.NoError(t, err)
@ -466,11 +446,7 @@ func TestWALRestoreCorrupted(t *testing.T) {
func TestMigrateWAL_Empty(t *testing.T) { func TestMigrateWAL_Empty(t *testing.T) {
// The migration procedure must properly deal with a zero-length segment, // The migration procedure must properly deal with a zero-length segment,
// which is valid in the new format. // which is valid in the new format.
dir, err := ioutil.TempDir("", "walmigrate") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
wdir := path.Join(dir, "wal") wdir := path.Join(dir, "wal")
@ -483,11 +459,7 @@ func TestMigrateWAL_Empty(t *testing.T) {
} }
func TestMigrateWAL_Fuzz(t *testing.T) { func TestMigrateWAL_Fuzz(t *testing.T) {
dir, err := ioutil.TempDir("", "walmigrate") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
wdir := path.Join(dir, "wal") wdir := path.Join(dir, "wal")

View file

@ -15,7 +15,8 @@ package httputil
import ( import (
"net/http" "net/http"
"regexp"
"github.com/grafana/regexp"
) )
var corsHeaders = map[string]string{ var corsHeaders = map[string]string{

View file

@ -15,9 +15,9 @@ package httputil
import ( import (
"net/http" "net/http"
"regexp"
"testing" "testing"
"github.com/grafana/regexp"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

View file

@ -16,10 +16,10 @@ package logging
import ( import (
"io/ioutil" "io/ioutil"
"os" "os"
"regexp"
"strings" "strings"
"testing" "testing"
"github.com/grafana/regexp"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

View file

@ -16,8 +16,9 @@ package stats
import ( import (
"context" "context"
opentracing "github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
) )
// QueryTiming identifies the code area or functionality in which time is spent // QueryTiming identifies the code area or functionality in which time is spent
@ -120,11 +121,11 @@ type SpanTimer struct {
timer *Timer timer *Timer
observers []prometheus.Observer observers []prometheus.Observer
span opentracing.Span span trace.Span
} }
func NewSpanTimer(ctx context.Context, operation string, timer *Timer, observers ...prometheus.Observer) (*SpanTimer, context.Context) { func NewSpanTimer(ctx context.Context, operation string, timer *Timer, observers ...prometheus.Observer) (*SpanTimer, context.Context) {
span, ctx := opentracing.StartSpanFromContext(ctx, operation) ctx, span := otel.Tracer("").Start(ctx, operation)
timer.Start() timer.Start()
return &SpanTimer{ return &SpanTimer{
@ -137,7 +138,7 @@ func NewSpanTimer(ctx context.Context, operation string, timer *Timer, observers
func (s *SpanTimer) Finish() { func (s *SpanTimer) Finish() {
s.timer.Stop() s.timer.Stop()
s.span.Finish() s.span.End()
for _, obs := range s.observers { for _, obs := range s.observers {
obs.Observe(s.timer.ElapsedTime().Seconds()) obs.Observe(s.timer.ElapsedTime().Seconds())

View file

@ -15,10 +15,10 @@ package stats
import ( import (
"encoding/json" "encoding/json"
"regexp"
"testing" "testing"
"time" "time"
"github.com/grafana/regexp"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"

View file

@ -16,7 +16,8 @@ package strutil
import ( import (
"fmt" "fmt"
"net/url" "net/url"
"regexp"
"github.com/grafana/regexp"
) )
var invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) var invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`)

View file

@ -23,7 +23,6 @@ import (
"net/url" "net/url"
"os" "os"
"path/filepath" "path/filepath"
"regexp"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -32,6 +31,7 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/grafana/regexp"
jsoniter "github.com/json-iterator/go" jsoniter "github.com/json-iterator/go"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"

View file

@ -1249,10 +1249,11 @@
} }
}, },
"node_modules/@codemirror/history": { "node_modules/@codemirror/history": {
"version": "0.19.0", "version": "0.19.2",
"license": "MIT", "resolved": "https://registry.npmjs.org/@codemirror/history/-/history-0.19.2.tgz",
"integrity": "sha512-unhP4t3N2smzmHoo/Yio6ueWi+il8gm9VKrvi6wlcdGH5fOfVDNkmjHQ495SiR+EdOG35+3iNebSPYww0vN7ow==",
"dependencies": { "dependencies": {
"@codemirror/state": "^0.19.0", "@codemirror/state": "^0.19.2",
"@codemirror/view": "^0.19.0" "@codemirror/view": "^0.19.0"
} }
}, },
@ -1598,9 +1599,9 @@
"integrity": "sha512-Z1+ADKY0fxdBE28REraWhUCNy+Bp5UmpK3Tc/5wdCDpY+6fXh8l2csMtbPGaqEBsyGLxJz9wUYGCf+CW9unyvQ==" "integrity": "sha512-Z1+ADKY0fxdBE28REraWhUCNy+Bp5UmpK3Tc/5wdCDpY+6fXh8l2csMtbPGaqEBsyGLxJz9wUYGCf+CW9unyvQ=="
}, },
"node_modules/@nexucis/kvsearch": { "node_modules/@nexucis/kvsearch": {
"version": "0.3.0", "version": "0.4.0",
"resolved": "https://registry.npmjs.org/@nexucis/kvsearch/-/kvsearch-0.3.0.tgz", "resolved": "https://registry.npmjs.org/@nexucis/kvsearch/-/kvsearch-0.4.0.tgz",
"integrity": "sha512-tHIH6W/mRUZZ0ZQyRbgp2uhat+2O1c1jX1EC6NHv7/8OIeHx1HBZ5ZZb0KSUVWl4jkNzYw6AO39OoTELtrjaQw==", "integrity": "sha512-5kWxzjLhCucArZshf0bCcmU61aGFgrm98iG6/LEeKejOuoTq1M7sumcjGQ5FR0xMKQWmwC9mr7OvWgAmolxWSg==",
"dependencies": { "dependencies": {
"@nexucis/fuzzy": "^0.3.0" "@nexucis/fuzzy": "^0.3.0"
} }
@ -2189,18 +2190,16 @@
"license": "ISC" "license": "ISC"
}, },
"node_modules/@wojtekmaj/enzyme-adapter-react-17": { "node_modules/@wojtekmaj/enzyme-adapter-react-17": {
"version": "0.6.5", "version": "0.6.6",
"resolved": "https://registry.npmjs.org/@wojtekmaj/enzyme-adapter-react-17/-/enzyme-adapter-react-17-0.6.5.tgz", "resolved": "https://registry.npmjs.org/@wojtekmaj/enzyme-adapter-react-17/-/enzyme-adapter-react-17-0.6.6.tgz",
"integrity": "sha512-ChIObUiXXYUiqzXPqOai+p6KF5dlbItpDDYsftUOQiAiygbMDlLeJIjynC6ZrJIa2U2MpRp4YJmtR2GQyIHjgA==", "integrity": "sha512-gSfhg8CiL0Vwc2UgUblGVZIy7M0KyXaZsd8+QwzV8TSVRLkGyzdLtYEcs9wRWyQTsdmOd+oRGqbVgUX7AVJxug==",
"dev": true, "dev": true,
"dependencies": { "dependencies": {
"@wojtekmaj/enzyme-adapter-utils": "^0.1.1", "@wojtekmaj/enzyme-adapter-utils": "^0.1.2",
"enzyme-shallow-equal": "^1.0.0", "enzyme-shallow-equal": "^1.0.0",
"has": "^1.0.0", "has": "^1.0.0",
"object.assign": "^4.1.0",
"object.values": "^1.1.0",
"prop-types": "^15.7.0", "prop-types": "^15.7.0",
"react-is": "^17.0.2", "react-is": "^17.0.0",
"react-test-renderer": "^17.0.0" "react-test-renderer": "^17.0.0"
}, },
"peerDependencies": { "peerDependencies": {
@ -2210,14 +2209,13 @@
} }
}, },
"node_modules/@wojtekmaj/enzyme-adapter-utils": { "node_modules/@wojtekmaj/enzyme-adapter-utils": {
"version": "0.1.1", "version": "0.1.2",
"resolved": "https://registry.npmjs.org/@wojtekmaj/enzyme-adapter-utils/-/enzyme-adapter-utils-0.1.1.tgz", "resolved": "https://registry.npmjs.org/@wojtekmaj/enzyme-adapter-utils/-/enzyme-adapter-utils-0.1.2.tgz",
"integrity": "sha512-bNPWtN/d8huKOkC6j1E3EkSamnRrHHT7YuR6f9JppAQqtoAm3v4/vERe4J14jQKmHLCyEBHXrlgb7H6l817hVg==", "integrity": "sha512-MM/DqDqvxNVlWLqSVQiUbRN9MuDLJfefmPbJ8ZKdmdf5ID8G+i42XhFpoQh5bAZUCdwzRae3+WSZl2lXcFOrhw==",
"dev": true, "dev": true,
"dependencies": { "dependencies": {
"function.prototype.name": "^1.1.0", "function.prototype.name": "^1.1.0",
"has": "^1.0.0", "has": "^1.0.0",
"object.assign": "^4.1.0",
"object.fromentries": "^2.0.0", "object.fromentries": "^2.0.0",
"prop-types": "^15.7.0" "prop-types": "^15.7.0"
}, },
@ -7260,7 +7258,7 @@
"@codemirror/commands": "^0.19.5", "@codemirror/commands": "^0.19.5",
"@codemirror/comment": "^0.19.0", "@codemirror/comment": "^0.19.0",
"@codemirror/highlight": "^0.19.6", "@codemirror/highlight": "^0.19.6",
"@codemirror/history": "^0.19.0", "@codemirror/history": "^0.19.2",
"@codemirror/language": "^0.19.7", "@codemirror/language": "^0.19.7",
"@codemirror/lint": "^0.19.3", "@codemirror/lint": "^0.19.3",
"@codemirror/matchbrackets": "^0.19.3", "@codemirror/matchbrackets": "^0.19.3",
@ -7272,7 +7270,7 @@
"@fortawesome/free-solid-svg-icons": "^5.7.2", "@fortawesome/free-solid-svg-icons": "^5.7.2",
"@fortawesome/react-fontawesome": "^0.1.16", "@fortawesome/react-fontawesome": "^0.1.16",
"@nexucis/fuzzy": "^0.3.0", "@nexucis/fuzzy": "^0.3.0",
"@nexucis/kvsearch": "^0.3.0", "@nexucis/kvsearch": "^0.4.0",
"bootstrap": "^4.6.1", "bootstrap": "^4.6.1",
"codemirror-promql": "0.19.0", "codemirror-promql": "0.19.0",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
@ -7309,7 +7307,7 @@
"@types/react-router-dom": "^5.3.2", "@types/react-router-dom": "^5.3.2",
"@types/sanitize-html": "^2.6.1", "@types/sanitize-html": "^2.6.1",
"@types/sinon": "^10.0.6", "@types/sinon": "^10.0.6",
"@wojtekmaj/enzyme-adapter-react-17": "^0.6.5", "@wojtekmaj/enzyme-adapter-react-17": "^0.6.6",
"enzyme": "^3.11.0", "enzyme": "^3.11.0",
"enzyme-to-json": "^3.6.2", "enzyme-to-json": "^3.6.2",
"eslint-config-prettier": "^8.3.0", "eslint-config-prettier": "^8.3.0",
@ -27422,9 +27420,11 @@
} }
}, },
"@codemirror/history": { "@codemirror/history": {
"version": "0.19.0", "version": "0.19.2",
"resolved": "https://registry.npmjs.org/@codemirror/history/-/history-0.19.2.tgz",
"integrity": "sha512-unhP4t3N2smzmHoo/Yio6ueWi+il8gm9VKrvi6wlcdGH5fOfVDNkmjHQ495SiR+EdOG35+3iNebSPYww0vN7ow==",
"requires": { "requires": {
"@codemirror/state": "^0.19.0", "@codemirror/state": "^0.19.2",
"@codemirror/view": "^0.19.0" "@codemirror/view": "^0.19.0"
} }
}, },
@ -27696,9 +27696,9 @@
"integrity": "sha512-Z1+ADKY0fxdBE28REraWhUCNy+Bp5UmpK3Tc/5wdCDpY+6fXh8l2csMtbPGaqEBsyGLxJz9wUYGCf+CW9unyvQ==" "integrity": "sha512-Z1+ADKY0fxdBE28REraWhUCNy+Bp5UmpK3Tc/5wdCDpY+6fXh8l2csMtbPGaqEBsyGLxJz9wUYGCf+CW9unyvQ=="
}, },
"@nexucis/kvsearch": { "@nexucis/kvsearch": {
"version": "0.3.0", "version": "0.4.0",
"resolved": "https://registry.npmjs.org/@nexucis/kvsearch/-/kvsearch-0.3.0.tgz", "resolved": "https://registry.npmjs.org/@nexucis/kvsearch/-/kvsearch-0.4.0.tgz",
"integrity": "sha512-tHIH6W/mRUZZ0ZQyRbgp2uhat+2O1c1jX1EC6NHv7/8OIeHx1HBZ5ZZb0KSUVWl4jkNzYw6AO39OoTELtrjaQw==", "integrity": "sha512-5kWxzjLhCucArZshf0bCcmU61aGFgrm98iG6/LEeKejOuoTq1M7sumcjGQ5FR0xMKQWmwC9mr7OvWgAmolxWSg==",
"requires": { "requires": {
"@nexucis/fuzzy": "^0.3.0" "@nexucis/fuzzy": "^0.3.0"
} }
@ -28107,30 +28107,27 @@
"dev": true "dev": true
}, },
"@wojtekmaj/enzyme-adapter-react-17": { "@wojtekmaj/enzyme-adapter-react-17": {
"version": "0.6.5", "version": "0.6.6",
"resolved": "https://registry.npmjs.org/@wojtekmaj/enzyme-adapter-react-17/-/enzyme-adapter-react-17-0.6.5.tgz", "resolved": "https://registry.npmjs.org/@wojtekmaj/enzyme-adapter-react-17/-/enzyme-adapter-react-17-0.6.6.tgz",
"integrity": "sha512-ChIObUiXXYUiqzXPqOai+p6KF5dlbItpDDYsftUOQiAiygbMDlLeJIjynC6ZrJIa2U2MpRp4YJmtR2GQyIHjgA==", "integrity": "sha512-gSfhg8CiL0Vwc2UgUblGVZIy7M0KyXaZsd8+QwzV8TSVRLkGyzdLtYEcs9wRWyQTsdmOd+oRGqbVgUX7AVJxug==",
"dev": true, "dev": true,
"requires": { "requires": {
"@wojtekmaj/enzyme-adapter-utils": "^0.1.1", "@wojtekmaj/enzyme-adapter-utils": "^0.1.2",
"enzyme-shallow-equal": "^1.0.0", "enzyme-shallow-equal": "^1.0.0",
"has": "^1.0.0", "has": "^1.0.0",
"object.assign": "^4.1.0",
"object.values": "^1.1.0",
"prop-types": "^15.7.0", "prop-types": "^15.7.0",
"react-is": "^17.0.2", "react-is": "^17.0.0",
"react-test-renderer": "^17.0.0" "react-test-renderer": "^17.0.0"
} }
}, },
"@wojtekmaj/enzyme-adapter-utils": { "@wojtekmaj/enzyme-adapter-utils": {
"version": "0.1.1", "version": "0.1.2",
"resolved": "https://registry.npmjs.org/@wojtekmaj/enzyme-adapter-utils/-/enzyme-adapter-utils-0.1.1.tgz", "resolved": "https://registry.npmjs.org/@wojtekmaj/enzyme-adapter-utils/-/enzyme-adapter-utils-0.1.2.tgz",
"integrity": "sha512-bNPWtN/d8huKOkC6j1E3EkSamnRrHHT7YuR6f9JppAQqtoAm3v4/vERe4J14jQKmHLCyEBHXrlgb7H6l817hVg==", "integrity": "sha512-MM/DqDqvxNVlWLqSVQiUbRN9MuDLJfefmPbJ8ZKdmdf5ID8G+i42XhFpoQh5bAZUCdwzRae3+WSZl2lXcFOrhw==",
"dev": true, "dev": true,
"requires": { "requires": {
"function.prototype.name": "^1.1.0", "function.prototype.name": "^1.1.0",
"has": "^1.0.0", "has": "^1.0.0",
"object.assign": "^4.1.0",
"object.fromentries": "^2.0.0", "object.fromentries": "^2.0.0",
"prop-types": "^15.7.0" "prop-types": "^15.7.0"
} }
@ -29713,7 +29710,7 @@
"@codemirror/commands": "^0.19.5", "@codemirror/commands": "^0.19.5",
"@codemirror/comment": "^0.19.0", "@codemirror/comment": "^0.19.0",
"@codemirror/highlight": "^0.19.6", "@codemirror/highlight": "^0.19.6",
"@codemirror/history": "^0.19.0", "@codemirror/history": "^0.19.2",
"@codemirror/language": "^0.19.7", "@codemirror/language": "^0.19.7",
"@codemirror/lint": "^0.19.3", "@codemirror/lint": "^0.19.3",
"@codemirror/matchbrackets": "^0.19.3", "@codemirror/matchbrackets": "^0.19.3",
@ -29725,7 +29722,7 @@
"@fortawesome/free-solid-svg-icons": "^5.7.2", "@fortawesome/free-solid-svg-icons": "^5.7.2",
"@fortawesome/react-fontawesome": "^0.1.16", "@fortawesome/react-fontawesome": "^0.1.16",
"@nexucis/fuzzy": "^0.3.0", "@nexucis/fuzzy": "^0.3.0",
"@nexucis/kvsearch": "^0.3.0", "@nexucis/kvsearch": "^0.4.0",
"@testing-library/react-hooks": "^7.0.1", "@testing-library/react-hooks": "^7.0.1",
"@types/enzyme": "^3.10.10", "@types/enzyme": "^3.10.10",
"@types/flot": "0.0.32", "@types/flot": "0.0.32",
@ -29739,7 +29736,7 @@
"@types/react-router-dom": "^5.3.2", "@types/react-router-dom": "^5.3.2",
"@types/sanitize-html": "^2.6.1", "@types/sanitize-html": "^2.6.1",
"@types/sinon": "^10.0.6", "@types/sinon": "^10.0.6",
"@wojtekmaj/enzyme-adapter-react-17": "^0.6.5", "@wojtekmaj/enzyme-adapter-react-17": "^0.6.6",
"bootstrap": "^4.6.1", "bootstrap": "^4.6.1",
"codemirror-promql": "0.19.0", "codemirror-promql": "0.19.0",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",

View file

@ -8,7 +8,7 @@
"@codemirror/commands": "^0.19.5", "@codemirror/commands": "^0.19.5",
"@codemirror/comment": "^0.19.0", "@codemirror/comment": "^0.19.0",
"@codemirror/highlight": "^0.19.6", "@codemirror/highlight": "^0.19.6",
"@codemirror/history": "^0.19.0", "@codemirror/history": "^0.19.2",
"@codemirror/language": "^0.19.7", "@codemirror/language": "^0.19.7",
"@codemirror/lint": "^0.19.3", "@codemirror/lint": "^0.19.3",
"@codemirror/matchbrackets": "^0.19.3", "@codemirror/matchbrackets": "^0.19.3",
@ -20,7 +20,7 @@
"@fortawesome/free-solid-svg-icons": "^5.7.2", "@fortawesome/free-solid-svg-icons": "^5.7.2",
"@fortawesome/react-fontawesome": "^0.1.16", "@fortawesome/react-fontawesome": "^0.1.16",
"@nexucis/fuzzy": "^0.3.0", "@nexucis/fuzzy": "^0.3.0",
"@nexucis/kvsearch": "^0.3.0", "@nexucis/kvsearch": "^0.4.0",
"bootstrap": "^4.6.1", "bootstrap": "^4.6.1",
"codemirror-promql": "0.19.0", "codemirror-promql": "0.19.0",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
@ -51,7 +51,8 @@
"test:debug": "react-scripts --inspect-brk test --runInBand --no-cache", "test:debug": "react-scripts --inspect-brk test --runInBand --no-cache",
"eject": "react-scripts eject", "eject": "react-scripts eject",
"lint:ci": "eslint --quiet \"src/**/*.{ts,tsx}\"", "lint:ci": "eslint --quiet \"src/**/*.{ts,tsx}\"",
"lint": "eslint --fix \"src/**/*.{ts,tsx}\"" "lint": "eslint --fix \"src/**/*.{ts,tsx}\"",
"snapshot": "react-scripts test --updateSnapshot"
}, },
"prettier": { "prettier": {
"singleQuote": true, "singleQuote": true,
@ -78,7 +79,7 @@
"@types/react-router-dom": "^5.3.2", "@types/react-router-dom": "^5.3.2",
"@types/sanitize-html": "^2.6.1", "@types/sanitize-html": "^2.6.1",
"@types/sinon": "^10.0.6", "@types/sinon": "^10.0.6",
"@wojtekmaj/enzyme-adapter-react-17": "^0.6.5", "@wojtekmaj/enzyme-adapter-react-17": "^0.6.6",
"enzyme": "^3.11.0", "enzyme": "^3.11.0",
"enzyme-to-json": "^3.6.2", "enzyme-to-json": "^3.6.2",
"eslint-config-prettier": "^8.3.0", "eslint-config-prettier": "^8.3.0",

View file

@ -12,8 +12,8 @@ import {
DropdownMenu, DropdownMenu,
DropdownItem, DropdownItem,
} from 'reactstrap'; } from 'reactstrap';
import { usePathPrefix } from './contexts/PathPrefixContext';
import { ThemeToggle } from './Theme'; import { ThemeToggle } from './Theme';
import logo from './images/prometheus_logo_grey.svg';
interface NavbarProps { interface NavbarProps {
consolesLink: string | null; consolesLink: string | null;
@ -23,11 +23,11 @@ interface NavbarProps {
const Navigation: FC<NavbarProps> = ({ consolesLink, agentMode }) => { const Navigation: FC<NavbarProps> = ({ consolesLink, agentMode }) => {
const [isOpen, setIsOpen] = useState(false); const [isOpen, setIsOpen] = useState(false);
const toggle = () => setIsOpen(!isOpen); const toggle = () => setIsOpen(!isOpen);
const pathPrefix = usePathPrefix();
return ( return (
<Navbar className="mb-3" dark color="dark" expand="md" fixed="top"> <Navbar className="mb-3" dark color="dark" expand="md" fixed="top">
<NavbarToggler onClick={toggle} className="mr-2" /> <NavbarToggler onClick={toggle} className="mr-2" />
<Link className="pt-0 navbar-brand" to={agentMode ? '/agent' : '/graph'}> <Link className="pt-0 navbar-brand" to={agentMode ? '/agent' : '/graph'}>
<img src={logo} className="d-inline-block align-top" alt="Prometheus logo" title="Prometheus" />
Prometheus{agentMode && ' Agent'} Prometheus{agentMode && ' Agent'}
</Link> </Link>
<Collapse isOpen={isOpen} navbar style={{ justifyContent: 'space-between' }}> <Collapse isOpen={isOpen} navbar style={{ justifyContent: 'space-between' }}>
@ -86,11 +86,6 @@ const Navigation: FC<NavbarProps> = ({ consolesLink, agentMode }) => {
<NavItem> <NavItem>
<NavLink href="https://prometheus.io/docs/prometheus/latest/getting_started/">Help</NavLink> <NavLink href="https://prometheus.io/docs/prometheus/latest/getting_started/">Help</NavLink>
</NavItem> </NavItem>
{!agentMode && (
<NavItem>
<NavLink href={`${pathPrefix}/classic/graph${window.location.search}`}>Classic UI</NavLink>
</NavItem>
)}
</Nav> </Nav>
</Collapse> </Collapse>
<ThemeToggle /> <ThemeToggle />

View file

@ -0,0 +1,31 @@
import React, { ChangeEvent, FC } from 'react';
import { Input, InputGroup, InputGroupAddon, InputGroupText } from 'reactstrap';
import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
import { faSearch } from '@fortawesome/free-solid-svg-icons';
export interface SearchBarProps {
handleChange: (e: ChangeEvent<HTMLTextAreaElement | HTMLInputElement>) => void;
placeholder: string;
}
const SearchBar: FC<SearchBarProps> = ({ handleChange, placeholder }) => {
let filterTimeout: NodeJS.Timeout;
const handleSearchChange = (e: ChangeEvent<HTMLTextAreaElement | HTMLInputElement>) => {
clearTimeout(filterTimeout);
filterTimeout = setTimeout(() => {
handleChange(e);
}, 300);
};
return (
<InputGroup>
<InputGroupAddon addonType="prepend">
<InputGroupText>{<FontAwesomeIcon icon={faSearch} />}</InputGroupText>
</InputGroupAddon>
<Input autoFocus onChange={handleSearchChange} placeholder={placeholder} />
</InputGroup>
);
};
export default SearchBar;

View file

@ -0,0 +1,19 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 16.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
width="115.333px" height="114px" viewBox="0 0 115.333 114" enable-background="new 0 0 115.333 114" xml:space="preserve">
<g id="Layer_2">
</g>
<g>
<path fill="#EEEEEE" d="M56.667,0.667C25.372,0.667,0,26.036,0,57.332c0,31.295,25.372,56.666,56.667,56.666
s56.666-25.371,56.666-56.666C113.333,26.036,87.961,0.667,56.667,0.667z M56.667,106.722c-8.904,0-16.123-5.948-16.123-13.283
H72.79C72.79,100.773,65.571,106.722,56.667,106.722z M83.297,89.04H30.034v-9.658h53.264V89.04z M83.106,74.411h-52.92
c-0.176-0.203-0.356-0.403-0.526-0.609c-5.452-6.62-6.736-10.076-7.983-13.598c-0.021-0.116,6.611,1.355,11.314,2.413
c0,0,2.42,0.56,5.958,1.205c-3.397-3.982-5.414-9.044-5.414-14.218c0-11.359,8.712-21.285,5.569-29.308
c3.059,0.249,6.331,6.456,6.552,16.161c3.252-4.494,4.613-12.701,4.613-17.733c0-5.21,3.433-11.262,6.867-11.469
c-3.061,5.045,0.793,9.37,4.219,20.099c1.285,4.03,1.121,10.812,2.113,15.113C63.797,33.534,65.333,20.5,71,16
c-2.5,5.667,0.37,12.758,2.333,16.167c3.167,5.5,5.087,9.667,5.087,17.548c0,5.284-1.951,10.259-5.242,14.148
c3.742-0.702,6.326-1.335,6.326-1.335l12.152-2.371C91.657,60.156,89.891,67.418,83.106,74.411z"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.5 KiB

View file

@ -1,10 +1,13 @@
import React, { FC, Fragment } from 'react'; import React, { ChangeEvent, FC, Fragment, useEffect, useState } from 'react';
import { Badge } from 'reactstrap'; import { Badge, Col, Row } from 'reactstrap';
import CollapsibleAlertPanel from './CollapsibleAlertPanel'; import CollapsibleAlertPanel from './CollapsibleAlertPanel';
import Checkbox from '../../components/Checkbox'; import Checkbox from '../../components/Checkbox';
import { isPresent } from '../../utils'; import { isPresent } from '../../utils';
import { Rule } from '../../types/types'; import { Rule } from '../../types/types';
import { useLocalStorage } from '../../hooks/useLocalStorage'; import { useLocalStorage } from '../../hooks/useLocalStorage';
import CustomInfiniteScroll, { InfiniteScrollItemsProps } from '../../components/CustomInfiniteScroll';
import { KVSearch } from '@nexucis/kvsearch';
import SearchBar from '../../components/SearchBar';
// eslint-disable-next-line @typescript-eslint/no-explicit-any // eslint-disable-next-line @typescript-eslint/no-explicit-any
export type RuleState = keyof RuleStatus<any>; export type RuleState = keyof RuleStatus<any>;
@ -35,13 +38,33 @@ interface RuleGroup {
interval: number; interval: number;
} }
const kvSearchRule = new KVSearch({
shouldSort: true,
indexedKeys: ['name', 'labels', ['labels', /.*/]],
});
const stateColorTuples: Array<[RuleState, 'success' | 'warning' | 'danger']> = [ const stateColorTuples: Array<[RuleState, 'success' | 'warning' | 'danger']> = [
['inactive', 'success'], ['inactive', 'success'],
['pending', 'warning'], ['pending', 'warning'],
['firing', 'danger'], ['firing', 'danger'],
]; ];
function GroupContent(showAnnotations: boolean) {
const Content: FC<InfiniteScrollItemsProps<Rule>> = ({ items }) => {
return (
<>
{items.map((rule, j) => (
<CollapsibleAlertPanel key={rule.name + j} showAnnotations={showAnnotations} rule={rule} />
))}
</>
);
};
return Content;
}
const AlertsContent: FC<AlertsProps> = ({ groups = [], statsCount }) => { const AlertsContent: FC<AlertsProps> = ({ groups = [], statsCount }) => {
const [groupList, setGroupList] = useState(groups);
const [filteredList, setFilteredList] = useState(groups);
const [filter, setFilter] = useLocalStorage('alerts-status-filter', { const [filter, setFilter] = useLocalStorage('alerts-status-filter', {
firing: true, firing: true,
pending: true, pending: true,
@ -56,50 +79,80 @@ const AlertsContent: FC<AlertsProps> = ({ groups = [], statsCount }) => {
}); });
}; };
const handleSearchChange = (e: ChangeEvent<HTMLTextAreaElement | HTMLInputElement>) => {
if (e.target.value !== '') {
const pattern = e.target.value.trim();
const result: RuleGroup[] = [];
for (const group of groups) {
const ruleFilterList = kvSearchRule.filter(pattern, group.rules);
if (ruleFilterList.length > 0) {
result.push({
file: group.file,
name: group.name,
interval: group.interval,
rules: ruleFilterList.map((value) => value.original as unknown as Rule),
});
}
}
setGroupList(result);
} else {
setGroupList(groups);
}
};
useEffect(() => {
const result: RuleGroup[] = [];
for (const group of groupList) {
const newGroup = {
file: group.file,
name: group.name,
interval: group.interval,
rules: group.rules.filter((value) => filter[value.state]),
};
if (newGroup.rules.length > 0) {
result.push(newGroup);
}
}
setFilteredList(result);
}, [groupList, filter]);
return ( return (
<> <>
<div className="d-flex togglers-wrapper"> <Row className="align-items-center">
{stateColorTuples.map(([state, color]) => { <Col className="d-flex" lg="4" md="5">
return ( {stateColorTuples.map(([state, color]) => {
<Checkbox return (
key={state} <Checkbox key={state} checked={filter[state]} id={`${state}-toggler`} onChange={toggleFilter(state)}>
wrapperStyles={{ marginRight: 20 }} <Badge color={color} className="text-capitalize">
checked={filter[state]} {state} ({statsCount[state]})
id={`${state}-toggler`} </Badge>
onChange={toggleFilter(state)} </Checkbox>
> );
<Badge color={color} className="text-capitalize"> })}
{state} ({statsCount[state]}) </Col>
</Badge> <Col lg="5" md="4">
</Checkbox> <SearchBar handleChange={handleSearchChange} placeholder="Filter by name or labels" />
); </Col>
})} <Col className="d-flex flex-row-reverse" md="3">
<Checkbox <Checkbox
wrapperStyles={{ marginLeft: 'auto' }} checked={showAnnotations.checked}
checked={showAnnotations.checked} id="show-annotations-toggler"
id="show-annotations-toggler" onChange={({ target }) => setShowAnnotations({ checked: target.checked })}
onChange={({ target }) => setShowAnnotations({ checked: target.checked })} >
> <span style={{ fontSize: '0.9rem', lineHeight: 1.9, display: 'inline-block', whiteSpace: 'nowrap' }}>
<span style={{ fontSize: '0.9rem', lineHeight: 1.9 }}>Show annotations</span> Show annotations
</Checkbox> </span>
</div> </Checkbox>
{groups.map((group, i) => { </Col>
const hasFilterOn = group.rules.some((rule) => filter[rule.state]); </Row>
return hasFilterOn ? ( {filteredList.map((group, i) => (
<Fragment key={i}> <Fragment key={i}>
<GroupInfo rules={group.rules}> <GroupInfo rules={group.rules}>
{group.file} &gt; {group.name} {group.file} &gt; {group.name}
</GroupInfo> </GroupInfo>
{group.rules.map((rule, j) => { <CustomInfiniteScroll allItems={group.rules} child={GroupContent(showAnnotations.checked)} />
return ( </Fragment>
filter[rule.state] && ( ))}
<CollapsibleAlertPanel key={rule.name + j} showAnnotations={showAnnotations.checked} rule={rule} />
)
);
})}
</Fragment>
) : null;
})}
</> </>
); );
}; };

View file

@ -2,99 +2,141 @@
exports[`AlertsContent matches a snapshot 1`] = ` exports[`AlertsContent matches a snapshot 1`] = `
<Fragment> <Fragment>
<div <Row
className="d-flex togglers-wrapper" className="align-items-center"
tag="div"
widths={
Array [
"xs",
"sm",
"md",
"lg",
"xl",
]
}
> >
<Memo(Checkbox) <Col
checked={true} className="d-flex"
id="inactive-toggler" lg="4"
key="inactive" md="5"
onChange={[Function]} tag="div"
wrapperStyles={ widths={
Object { Array [
"marginRight": 20, "xs",
} "sm",
"md",
"lg",
"xl",
]
} }
> >
<Badge <Memo(Checkbox)
className="text-capitalize" checked={true}
color="success" id="inactive-toggler"
pill={false} key="inactive"
tag="span" onChange={[Function]}
> >
inactive <Badge
( className="text-capitalize"
0 color="success"
) pill={false}
</Badge> tag="span"
</Memo(Checkbox)> >
<Memo(Checkbox) inactive
checked={true} (
id="pending-toggler" 0
key="pending" )
onChange={[Function]} </Badge>
wrapperStyles={ </Memo(Checkbox)>
Object { <Memo(Checkbox)
"marginRight": 20, checked={true}
} id="pending-toggler"
} key="pending"
> onChange={[Function]}
<Badge
className="text-capitalize"
color="warning"
pill={false}
tag="span"
> >
pending <Badge
( className="text-capitalize"
0 color="warning"
) pill={false}
</Badge> tag="span"
</Memo(Checkbox)> >
<Memo(Checkbox) pending
checked={true} (
id="firing-toggler" 0
key="firing" )
onChange={[Function]} </Badge>
wrapperStyles={ </Memo(Checkbox)>
Object { <Memo(Checkbox)
"marginRight": 20, checked={true}
} id="firing-toggler"
} key="firing"
> onChange={[Function]}
<Badge
className="text-capitalize"
color="danger"
pill={false}
tag="span"
> >
firing <Badge
( className="text-capitalize"
0 color="danger"
) pill={false}
</Badge> tag="span"
</Memo(Checkbox)> >
<Memo(Checkbox) firing
checked={false} (
id="show-annotations-toggler" 0
onChange={[Function]} )
wrapperStyles={ </Badge>
Object { </Memo(Checkbox)>
"marginLeft": "auto", </Col>
} <Col
lg="5"
md="4"
tag="div"
widths={
Array [
"xs",
"sm",
"md",
"lg",
"xl",
]
} }
> >
<span <SearchBar
style={ handleChange={[Function]}
Object { placeholder="Filter by name or labels"
"fontSize": "0.9rem", />
"lineHeight": 1.9, </Col>
<Col
className="d-flex flex-row-reverse"
md="3"
tag="div"
widths={
Array [
"xs",
"sm",
"md",
"lg",
"xl",
]
}
>
<Memo(Checkbox)
checked={false}
id="show-annotations-toggler"
onChange={[Function]}
>
<span
style={
Object {
"display": "inline-block",
"fontSize": "0.9rem",
"lineHeight": 1.9,
"whiteSpace": "nowrap",
}
} }
} >
> Show annotations
Show annotations </span>
</span> </Memo(Checkbox)>
</Memo(Checkbox)> </Col>
</div> </Row>
</Fragment> </Fragment>
`; `;

View file

@ -0,0 +1,935 @@
export const colorPool = [
'#008000',
'#008080',
'#800000',
'#800080',
'#808000',
'#808080',
'#0000c0',
'#008040',
'#0080c0',
'#800040',
'#8000c0',
'#808040',
'#8080c0',
'#00c000',
'#00c080',
'#804000',
'#804080',
'#80c000',
'#80c080',
'#0040c0',
'#00c040',
'#00c0c0',
'#804040',
'#8040c0',
'#80c040',
'#80c0c0',
'#408000',
'#408080',
'#c00000',
'#c00080',
'#c08000',
'#c08080',
'#4000c0',
'#408040',
'#4080c0',
'#c00040',
'#c000c0',
'#c08040',
'#c080c0',
'#404000',
'#404080',
'#40c000',
'#40c080',
'#c04000',
'#c04080',
'#c0c000',
'#c0c080',
'#404040',
'#4040c0',
'#40c040',
'#40c0c0',
'#c04040',
'#c040c0',
'#c0c040',
'#0000a0',
'#008020',
'#0080a0',
'#800020',
'#8000a0',
'#808020',
'#8080a0',
'#0000e0',
'#008060',
'#0080e0',
'#800060',
'#8000e0',
'#808060',
'#8080e0',
'#0040a0',
'#00c020',
'#00c0a0',
'#804020',
'#8040a0',
'#80c020',
'#80c0a0',
'#0040e0',
'#00c060',
'#00c0e0',
'#804060',
'#8040e0',
'#80c060',
'#80c0e0',
'#4000a0',
'#408020',
'#4080a0',
'#c00020',
'#c000a0',
'#c08020',
'#c080a0',
'#4000e0',
'#408060',
'#4080e0',
'#c00060',
'#c000e0',
'#c08060',
'#c080e0',
'#404020',
'#4040a0',
'#40c020',
'#40c0a0',
'#c04020',
'#c040a0',
'#c0c020',
'#c0c0a0',
'#404060',
'#4040e0',
'#40c060',
'#40c0e0',
'#c04060',
'#c040e0',
'#c0c060',
'#00a000',
'#00a080',
'#802000',
'#802080',
'#80a000',
'#80a080',
'#0020c0',
'#00a040',
'#00a0c0',
'#802040',
'#8020c0',
'#80a040',
'#80a0c0',
'#006000',
'#006080',
'#00e000',
'#00e080',
'#806000',
'#806080',
'#80e000',
'#80e080',
'#006040',
'#0060c0',
'#00e040',
'#00e0c0',
'#806040',
'#8060c0',
'#80e040',
'#80e0c0',
'#40a000',
'#40a080',
'#c02000',
'#c02080',
'#c0a000',
'#c0a080',
'#4020c0',
'#40a040',
'#40a0c0',
'#c02040',
'#c020c0',
'#c0a040',
'#c0a0c0',
'#406000',
'#406080',
'#40e000',
'#40e080',
'#c06000',
'#c06080',
'#c0e000',
'#c0e080',
'#406040',
'#4060c0',
'#40e040',
'#40e0c0',
'#c06040',
'#c060c0',
'#c0e040',
'#c0e0c0',
'#0020a0',
'#00a020',
'#00a0a0',
'#802020',
'#8020a0',
'#80a020',
'#80a0a0',
'#0020e0',
'#00a060',
'#00a0e0',
'#802060',
'#8020e0',
'#80a060',
'#80a0e0',
'#006020',
'#0060a0',
'#00e020',
'#00e0a0',
'#806020',
'#8060a0',
'#80e020',
'#80e0a0',
'#006060',
'#0060e0',
'#00e060',
'#00e0e0',
'#806060',
'#8060e0',
'#80e060',
'#80e0e0',
'#4020a0',
'#40a020',
'#40a0a0',
'#c02020',
'#c020a0',
'#c0a020',
'#c0a0a0',
'#4020e0',
'#40a060',
'#40a0e0',
'#c02060',
'#c020e0',
'#c0a060',
'#c0a0e0',
'#406020',
'#4060a0',
'#40e020',
'#40e0a0',
'#c06020',
'#c060a0',
'#c0e020',
'#c0e0a0',
'#406060',
'#4060e0',
'#40e060',
'#40e0e0',
'#c06060',
'#c060e0',
'#c0e060',
'#208000',
'#208080',
'#a00000',
'#a00080',
'#a08000',
'#a08080',
'#208040',
'#2080c0',
'#a00040',
'#a000c0',
'#a08040',
'#a080c0',
'#204080',
'#20c000',
'#20c080',
'#a04000',
'#a04080',
'#a0c000',
'#a0c080',
'#2040c0',
'#20c040',
'#20c0c0',
'#a04040',
'#a040c0',
'#a0c040',
'#a0c0c0',
'#608000',
'#608080',
'#e00000',
'#e00080',
'#e08000',
'#e08080',
'#6000c0',
'#608040',
'#6080c0',
'#e00040',
'#e000c0',
'#e08040',
'#e080c0',
'#604080',
'#60c000',
'#60c080',
'#e04000',
'#e04080',
'#e0c000',
'#e0c080',
'#604040',
'#6040c0',
'#60c040',
'#60c0c0',
'#e04040',
'#e040c0',
'#e0c040',
'#e0c0c0',
'#208020',
'#2080a0',
'#a00020',
'#a000a0',
'#a08020',
'#a080a0',
'#2000e0',
'#208060',
'#2080e0',
'#a00060',
'#a000e0',
'#a08060',
'#a080e0',
'#2040a0',
'#20c020',
'#20c0a0',
'#a04020',
'#a040a0',
'#a0c020',
'#2040e0',
'#20c060',
'#20c0e0',
'#a04060',
'#a040e0',
'#a0c060',
'#a0c0e0',
'#6000a0',
'#608020',
'#6080a0',
'#e00020',
'#e000a0',
'#e08020',
'#e080a0',
'#6000e0',
'#608060',
'#6080e0',
'#e00060',
'#e000e0',
'#e08060',
'#e080e0',
'#604020',
'#6040a0',
'#60c020',
'#60c0a0',
'#e04020',
'#e040a0',
'#e0c020',
'#e0c0a0',
'#604060',
'#6040e0',
'#60c060',
'#60c0e0',
'#e04060',
'#e040e0',
'#e0c060',
'#e0c0e0',
'#20a000',
'#20a080',
'#a02000',
'#a02080',
'#a0a000',
'#a0a080',
'#2020c0',
'#20a040',
'#20a0c0',
'#a02040',
'#a020c0',
'#a0a040',
'#a0a0c0',
'#206000',
'#206080',
'#20e000',
'#20e080',
'#a06000',
'#a06080',
'#a0e000',
'#a0e080',
'#206040',
'#2060c0',
'#20e040',
'#20e0c0',
'#a06040',
'#a060c0',
'#a0e040',
'#a0e0c0',
'#602080',
'#60a000',
'#60a080',
'#e02000',
'#e02080',
'#e0a000',
'#e0a080',
'#6020c0',
'#60a040',
'#60a0c0',
'#e02040',
'#e020c0',
'#e0a040',
'#e0a0c0',
'#606000',
'#606080',
'#60e000',
'#60e080',
'#e06000',
'#e06080',
'#e0e000',
'#e0e080',
'#606040',
'#6060c0',
'#60e040',
'#60e0c0',
'#e06040',
'#e060c0',
'#e0e040',
'#e0e0c0',
'#20a020',
'#20a0a0',
'#a02020',
'#a020a0',
'#a0a020',
'#a0a0a0',
'#2020e0',
'#20a060',
'#20a0e0',
'#a02060',
'#a020e0',
'#a0a060',
'#a0a0e0',
'#206020',
'#2060a0',
'#20e020',
'#20e0a0',
'#a06020',
'#a060a0',
'#a0e020',
'#a0e0a0',
'#206060',
'#2060e0',
'#20e060',
'#20e0e0',
'#a06060',
'#a060e0',
'#a0e060',
'#a0e0e0',
'#6020a0',
'#60a020',
'#60a0a0',
'#e02020',
'#e020a0',
'#e0a020',
'#e0a0a0',
'#602060',
'#6020e0',
'#60a060',
'#60a0e0',
'#e02060',
'#e020e0',
'#e0a060',
'#e0a0e0',
'#606020',
'#6060a0',
'#60e020',
'#60e0a0',
'#e06020',
'#e060a0',
'#e0e020',
'#e0e0a0',
'#606060',
'#6060e0',
'#60e060',
'#60e0e0',
'#e06060',
'#e060e0',
'#e0e060',
'#008010',
'#008090',
'#800010',
'#800090',
'#808010',
'#808090',
'#0000d0',
'#008050',
'#0080d0',
'#800050',
'#8000d0',
'#808050',
'#8080d0',
'#004010',
'#004090',
'#00c010',
'#00c090',
'#804010',
'#804090',
'#80c010',
'#80c090',
'#004050',
'#0040d0',
'#00c050',
'#00c0d0',
'#804050',
'#8040d0',
'#80c050',
'#80c0d0',
'#400090',
'#408010',
'#408090',
'#c00010',
'#c00090',
'#c08010',
'#c08090',
'#4000d0',
'#408050',
'#4080d0',
'#c00050',
'#c000d0',
'#c08050',
'#c080d0',
'#404010',
'#404090',
'#40c010',
'#40c090',
'#c04010',
'#c04090',
'#c0c010',
'#c0c090',
'#404050',
'#4040d0',
'#40c050',
'#40c0d0',
'#c04050',
'#c040d0',
'#c0c050',
'#0000b0',
'#008030',
'#0080b0',
'#800030',
'#8000b0',
'#808030',
'#8080b0',
'#0000f0',
'#008070',
'#0080f0',
'#800070',
'#8000f0',
'#808070',
'#8080f0',
'#004030',
'#0040b0',
'#00c030',
'#00c0b0',
'#804030',
'#8040b0',
'#80c030',
'#80c0b0',
'#004070',
'#0040f0',
'#00c070',
'#00c0f0',
'#804070',
'#8040f0',
'#80c070',
'#80c0f0',
'#4000b0',
'#408030',
'#4080b0',
'#c00030',
'#c000b0',
'#c08030',
'#c080b0',
'#400070',
'#4000f0',
'#408070',
'#4080f0',
'#c00070',
'#c000f0',
'#c08070',
'#c080f0',
'#404030',
'#4040b0',
'#40c030',
'#40c0b0',
'#c04030',
'#c040b0',
'#c0c030',
'#c0c0b0',
'#404070',
'#4040f0',
'#40c070',
'#40c0f0',
'#c04070',
'#c040f0',
'#c0c070',
'#c0c0f0',
'#002090',
'#00a010',
'#00a090',
'#802010',
'#802090',
'#80a010',
'#80a090',
'#0020d0',
'#00a050',
'#00a0d0',
'#802050',
'#8020d0',
'#80a050',
'#80a0d0',
'#006010',
'#006090',
'#00e010',
'#00e090',
'#806010',
'#806090',
'#80e010',
'#80e090',
'#006050',
'#0060d0',
'#00e050',
'#00e0d0',
'#806050',
'#8060d0',
'#80e050',
'#80e0d0',
'#402090',
'#40a010',
'#40a090',
'#c02010',
'#c02090',
'#c0a010',
'#c0a090',
'#402050',
'#4020d0',
'#40a050',
'#40a0d0',
'#c02050',
'#c020d0',
'#c0a050',
'#c0a0d0',
'#406010',
'#406090',
'#40e010',
'#40e090',
'#c06010',
'#c06090',
'#c0e010',
'#c0e090',
'#406050',
'#4060d0',
'#40e050',
'#40e0d0',
'#c06050',
'#c060d0',
'#c0e050',
'#c0e0d0',
'#0020b0',
'#00a030',
'#00a0b0',
'#802030',
'#8020b0',
'#80a030',
'#80a0b0',
'#0020f0',
'#00a070',
'#00a0f0',
'#802070',
'#8020f0',
'#80a070',
'#80a0f0',
'#006030',
'#0060b0',
'#00e030',
'#00e0b0',
'#806030',
'#8060b0',
'#80e030',
'#80e0b0',
'#006070',
'#0060f0',
'#00e070',
'#00e0f0',
'#806070',
'#8060f0',
'#80e070',
'#80e0f0',
'#4020b0',
'#40a030',
'#40a0b0',
'#c02030',
'#c020b0',
'#c0a030',
'#c0a0b0',
'#4020f0',
'#40a070',
'#40a0f0',
'#c02070',
'#c020f0',
'#c0a070',
'#c0a0f0',
'#406030',
'#4060b0',
'#40e030',
'#40e0b0',
'#c06030',
'#c060b0',
'#c0e030',
'#c0e0b0',
'#406070',
'#4060f0',
'#40e070',
'#40e0f0',
'#c06070',
'#c060f0',
'#c0e070',
'#208010',
'#208090',
'#a00010',
'#a00090',
'#a08010',
'#a08090',
'#2000d0',
'#208050',
'#2080d0',
'#a00050',
'#a000d0',
'#a08050',
'#a080d0',
'#204010',
'#204090',
'#20c010',
'#20c090',
'#a04010',
'#a04090',
'#a0c010',
'#a0c090',
'#204050',
'#2040d0',
'#20c050',
'#20c0d0',
'#a04050',
'#a040d0',
'#a0c050',
'#a0c0d0',
'#600090',
'#608010',
'#608090',
'#e00010',
'#e00090',
'#e08010',
'#e08090',
'#600050',
'#6000d0',
'#608050',
'#6080d0',
'#e00050',
'#e000d0',
'#e08050',
'#e080d0',
'#604010',
'#604090',
'#60c010',
'#60c090',
'#e04010',
'#e04090',
'#e0c010',
'#e0c090',
'#604050',
'#6040d0',
'#60c050',
'#60c0d0',
'#e04050',
'#e040d0',
'#e0c050',
'#e0c0d0',
'#2000b0',
'#208030',
'#2080b0',
'#a00030',
'#a000b0',
'#a08030',
'#a080b0',
'#2000f0',
'#208070',
'#2080f0',
'#a00070',
'#a000f0',
'#a08070',
'#a080f0',
'#204030',
'#2040b0',
'#20c030',
'#20c0b0',
'#a04030',
'#a040b0',
'#a0c030',
'#a0c0b0',
'#204070',
'#2040f0',
'#20c070',
'#20c0f0',
'#a04070',
'#a040f0',
'#a0c070',
'#a0c0f0',
'#6000b0',
'#608030',
'#6080b0',
'#e00030',
'#e000b0',
'#e08030',
'#e080b0',
'#600070',
'#6000f0',
'#608070',
'#e00070',
'#e000f0',
'#e08070',
'#e080f0',
'#604030',
'#6040b0',
'#60c030',
'#60c0b0',
'#e04030',
'#e040b0',
'#e0c030',
'#e0c0b0',
'#604070',
'#6040f0',
'#60c070',
'#60c0f0',
'#e04070',
'#e040f0',
'#e0c070',
'#e0c0f0',
'#20a010',
'#20a090',
'#a02010',
'#a02090',
'#a0a010',
'#a0a090',
'#2020d0',
'#20a050',
'#20a0d0',
'#a02050',
'#a020d0',
'#a0a050',
'#a0a0d0',
'#206010',
'#206090',
'#20e010',
'#20e090',
'#a06010',
'#a06090',
'#a0e010',
'#a0e090',
'#206050',
'#2060d0',
'#20e050',
'#20e0d0',
'#a06050',
'#a060d0',
'#a0e050',
'#a0e0d0',
'#602090',
'#60a010',
'#60a090',
'#e02010',
'#e02090',
'#e0a010',
'#e0a090',
'#602050',
'#6020d0',
'#60a050',
'#60a0d0',
'#e02050',
'#e020d0',
'#e0a050',
'#e0a0d0',
'#606010',
'#606090',
'#60e010',
'#60e090',
'#e06010',
'#e06090',
'#e0e010',
'#e0e090',
'#606050',
'#6060d0',
'#60e050',
'#60e0d0',
'#e06050',
'#e060d0',
'#e0e050',
'#2020b0',
'#20a030',
'#20a0b0',
'#a02030',
'#a020b0',
'#a0a030',
'#a0a0b0',
'#2020f0',
'#20a070',
'#20a0f0',
'#a02070',
'#a020f0',
'#a0a070',
'#a0a0f0',
'#206030',
'#2060b0',
'#20e030',
'#20e0b0',
'#a06030',
'#a060b0',
'#a0e030',
'#a0e0b0',
'#206070',
'#2060f0',
'#20e070',
'#20e0f0',
'#a06070',
'#a060f0',
'#a0e070',
'#a0e0f0',
'#6020b0',
'#60a030',
'#60a0b0',
'#e02030',
'#e020b0',
'#e0a030',
'#e0a0b0',
'#6020f0',
'#60a070',
'#60a0f0',
'#e02070',
'#e020f0',
'#e0a070',
'#e0a0f0',
'#606030',
'#6060b0',
'#60e030',
'#60e0b0',
'#e06030',
'#e060b0',
'#e0e030',
'#e0e0b0',
'#606070',
'#6060f0',
'#60e070',
'#60e0f0',
'#e06070',
'#e060f0',
'#e0e070',
];

Some files were not shown because too many files have changed in this diff Show more