Merge pull request #14896 from jan--f/release-3.0-beta.0

Cut release 3.0 beta.0
This commit is contained in:
Julius Volz 2024-09-10 21:39:51 +02:00 committed by GitHub
commit 4e1716610e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
130 changed files with 1113 additions and 42025 deletions

View file

@ -16,7 +16,9 @@ jobs:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: ./.github/promci/actions/setup_environment
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
with:
enable_npm: true
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 test-flags=""
- run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false
- run: make -C documentation/examples/remote_storage
- run: make -C documentation/examples
@ -110,6 +112,8 @@ jobs:
if: |
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
&&
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
&&
!(github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-'))
&&
!(github.event_name == 'push' && github.event.ref == 'refs/heads/main')
@ -130,6 +134,8 @@ jobs:
if: |
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
||
(github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-'))
||
(github.event_name == 'push' && github.event.ref == 'refs/heads/main')
@ -214,7 +220,10 @@ jobs:
name: Publish release artefacts
runs-on: ubuntu-latest
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
if: |
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
@ -245,17 +254,26 @@ jobs:
restore-keys: |
${{ runner.os }}-node-
- name: Check libraries version
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
run: ./scripts/ui_release.sh --check-package "$(echo ${{ github.ref_name }}|sed s/v2/v0/)"
if: |
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
run: ./scripts/ui_release.sh --check-package "$(./scripts/get_module_version.sh ${{ github.ref_name }})"
- name: build
run: make assets
- name: Copy files before publishing libs
run: ./scripts/ui_release.sh --copy
- name: Publish dry-run libraries
if: "!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))"
if: |
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
&&
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
run: ./scripts/ui_release.sh --publish dry-run
- name: Publish libraries
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
if: |
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
run: ./scripts/ui_release.sh --publish
env:
# The setup-node action writes an .npmrc file with this env variable

3
.gitignore vendored
View file

@ -22,8 +22,7 @@ benchmark.txt
/documentation/examples/remote_storage/example_write_adapter/example_write_adapter
npm_licenses.tar.bz2
/web/ui/static/react-app
/web/ui/static/mantine-ui
/web/ui/static
/vendor
/.build

View file

@ -28,8 +28,6 @@ tarball:
# Whenever there are new files to include in the tarball,
# remember to make sure the new files will be generated after `make build`.
files:
- consoles
- console_libraries
- documentation/examples/prometheus.yml
- LICENSE
- NOTICE

View file

@ -1,10 +1,46 @@
# Changelog
## unreleased
## 3.0.0-beta.0 / 2024-09-05
Release 3.0.0-beta.0 includes new features such as a brand new UI and UTF-8 support enabled by default. As a new major version, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. Most users should be able to try this release out of the box without any configuration changes.
As is traditional with a beta release, we do **not** recommend users install 3.0.0-beta on critical production systems, but we do want everyone to test it out and find bugs.
* [CHANGE] UI: The old web UI has been replaced by a completely new one that is less cluttered and adds a few new features (PromLens-style tree view, better metrics explorer, "Explain" tab). However, it is still missing some features of the old UI (notably, exemplar display and heatmaps). To switch back to the old UI, you can use the feature flag `--enable-feature=old-ui` for the time being. #14872
* [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904
* [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365
* [CHANGE] Kubernetes SD: Remove support for `networking.k8s.io/v1beta1` API version of Ingress. This version is no longer served as of Kubernetes v1.22. #14365
* [CHANGE] UTF-8: Enable UTF-8 support by default. Prometheus now allows all UTF-8 characters in metric and label names. The corresponding `utf8-name` feature flag has been removed. #14705
* [CHANGE] Console: Remove example files for the console feature. Users can continue using the console feature by supplying their own JavaScript and templates. #14807
* [CHANGE] SD: Enable the new service discovery manager by default. This SD manager does not restart unchanged discoveries upon reloading. This makes reloads faster and reduces pressure on service discoveries' sources. The corresponding `new-service-discovery-manager` feature flag has been removed. #14770
* [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747
* [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526
* [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643
* [FEATURE] Promtool: Allow additional labels to be added to blocks created from openmetrics. #14402
* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200
* [FEATURE] Automatic reloading of the Prometheus configuration file at a specified interval #14769
* [ENHANCEMENT] OTLP receiver: Warn when encountering exponential histograms with zero count and non-zero sum. #14706
* [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612
* [ENHANCEMENT] Scrape: Only parse created timestamp if `created-timestamp-zero-ingestion` feature flag is enabled. This is as a lot of memory is used when parsing the created timestamp in the OM text format. #14815
* [ENHANCEMENT] Scrape: Add support for logging scrape failures to a specified file. #14734
* [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379
* [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477
* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875
* [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls.#14816
* [ENHANCEMENT] Add support for multiple listening addresses. #14665
* [ENHANCEMENT] Add the ability to set custom HTTP headers. #14817
* [BUGFIX] TSDB: Fix shard initialization after WAL repair. #14731
* [BUGFIX] UTF-8: Ensure correct validation when legacy mode turned on. #14736
* [BUGFIX] SD: Make discovery manager notify consumers of dropped targets for still defined jobs. #13147
* [BUGFIX] SD: Prevent the new service discovery manager from storing stale targets. #13622
* [BUGFIX] Remote Write 2.0: Ensure metadata records are sent from the WAL to remote write during WAL replay. #14766
* [BUGFIX] Scrape: Do no override target parameter labels with config params. #11029
* [BUGFIX] Scrape: Reset exemplar position when scraping histograms in protobuf. #14810
* [BUGFIX] Native Histograms: Do not re-use spans between histograms. #14771
* [BUGFIX] Scrape: Only parse created timestamp if `created-timestamp-zero-ingestion` feature flag is enabled. This is as a lot of memory is used when parsing the created timestamp in the OM text format. #14815
* [BUGFIX] TSDB: Fix panic in query during truncation with OOO head. #14831
* [BUGFIX] TSDB: Fix panic in chunk querier. #14874
* [BUGFIX] promql.Engine.Close: No-op if nil. #14861
* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042
## 2.54.1 / 2024-08-27

View file

@ -8,21 +8,16 @@ ARG OS="linux"
COPY .build/${OS}-${ARCH}/prometheus /bin/prometheus
COPY .build/${OS}-${ARCH}/promtool /bin/promtool
COPY documentation/examples/prometheus.yml /etc/prometheus/prometheus.yml
COPY console_libraries/ /usr/share/prometheus/console_libraries/
COPY consoles/ /usr/share/prometheus/consoles/
COPY LICENSE /LICENSE
COPY NOTICE /NOTICE
COPY npm_licenses.tar.bz2 /npm_licenses.tar.bz2
WORKDIR /prometheus
RUN ln -s /usr/share/prometheus/console_libraries /usr/share/prometheus/consoles/ /etc/prometheus/ && \
chown -R nobody:nobody /etc/prometheus /prometheus
RUN chown -R nobody:nobody /etc/prometheus /prometheus
USER nobody
EXPOSE 9090
VOLUME [ "/prometheus" ]
ENTRYPOINT [ "/bin/prometheus" ]
CMD [ "--config.file=/etc/prometheus/prometheus.yml", \
"--storage.tsdb.path=/prometheus", \
"--web.console.libraries=/usr/share/prometheus/console_libraries", \
"--web.console.templates=/usr/share/prometheus/consoles" ]
"--storage.tsdb.path=/prometheus" ]

View file

@ -42,7 +42,7 @@ upgrade-npm-deps:
.PHONY: ui-bump-version
ui-bump-version:
version=$$(sed s/2/0/ < VERSION) && ./scripts/ui_release.sh --bump-version "$${version}"
version=$$(./scripts/get_module_version.sh) && ./scripts/ui_release.sh --bump-version "$${version}"
cd web/ui && npm install
git add "./web/ui/package-lock.json" "./**/package.json"

View file

@ -187,7 +187,7 @@ the Prometheus server, we use major version zero releases for the libraries.
Tag the new library release via the following commands:
```bash
tag="v$(sed s/2/0/ < VERSION)"
tag="v$(./scripts/get_module_version.sh)"
git tag -s "${tag}" -m "${tag}"
git push origin "${tag}"
```

View file

@ -1 +1 @@
2.54.1
3.0.0-beta.0

View file

@ -58,8 +58,6 @@ import (
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/legacymanager"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
@ -103,6 +101,8 @@ var (
)
func init() {
// This can be removed when the default validation scheme in common is updated.
model.NameValidationScheme = model.UTF8Validation
prometheus.MustRegister(versioncollector.NewCollector(strings.ReplaceAll(appName, "-", "_")))
var err error
@ -162,7 +162,6 @@ type flagConfig struct {
// These options are extracted from featureList
// for ease of use.
enableExpandExternalLabels bool
enableNewSDManager bool
enablePerStepStats bool
enableAutoGOMAXPROCS bool
enableAutoGOMEMLIMIT bool
@ -182,9 +181,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
opts := strings.Split(f, ",")
for _, o := range opts {
switch o {
case "remote-write-receiver":
c.web.EnableRemoteWriteReceiver = true
level.Warn(logger).Log("msg", "Remote write receiver enabled via feature flag remote-write-receiver. This is DEPRECATED. Use --web.enable-remote-write-receiver.")
case "otlp-write-receiver":
c.web.EnableOTLPWriteReceiver = true
level.Info(logger).Log("msg", "Experimental OTLP write receiver enabled")
@ -203,12 +199,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
case "metadata-wal-records":
c.scrape.AppendMetadata = true
level.Info(logger).Log("msg", "Experimental metadata records in WAL enabled, required for remote write 2.0")
case "new-service-discovery-manager":
c.enableNewSDManager = true
level.Info(logger).Log("msg", "Experimental service discovery manager")
case "agent":
agentMode = true
level.Info(logger).Log("msg", "Experimental agent mode enabled.")
case "promql-per-step-stats":
c.enablePerStepStats = true
level.Info(logger).Log("msg", "Experimental per-step statistics reporting")
@ -252,13 +242,8 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
case "promql-delayed-name-removal":
c.promqlEnableDelayedNameRemoval = true
level.Info(logger).Log("msg", "Experimental PromQL delayed name removal enabled.")
case "utf8-names":
model.NameValidationScheme = model.UTF8Validation
level.Info(logger).Log("msg", "Experimental UTF-8 support enabled")
case "":
continue
case "promql-at-modifier", "promql-negative-offset":
level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o)
case "old-ui":
c.web.UseOldUI = true
level.Info(logger).Log("msg", "Serving previous version of the Prometheus web UI.")
@ -277,11 +262,6 @@ func main() {
runtime.SetMutexProfileFraction(20)
}
var (
oldFlagRetentionDuration model.Duration
newFlagRetentionDuration model.Duration
)
// Unregister the default GoCollector, and reregister with our defaults.
if prometheus.Unregister(collectors.NewGoCollector()) {
prometheus.MustRegister(
@ -391,11 +371,8 @@ func main() {
"Size at which to split the tsdb WAL segment files. Example: 100MB").
Hidden().PlaceHolder("<bytes>").BytesVar(&cfg.tsdb.WALSegmentSize)
serverOnlyFlag(a, "storage.tsdb.retention", "[DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use \"storage.tsdb.retention.time\" instead.").
SetValue(&oldFlagRetentionDuration)
serverOnlyFlag(a, "storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms.").
SetValue(&newFlagRetentionDuration)
serverOnlyFlag(a, "storage.tsdb.retention.time", "How long to retain samples in storage. If neither this flag nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms.").
SetValue(&cfg.tsdb.RetentionDuration)
serverOnlyFlag(a, "storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\". Based on powers-of-2, so 1KB is 1024B.").
BytesVar(&cfg.tsdb.MaxBytes)
@ -403,14 +380,6 @@ func main() {
serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
Default("false").BoolVar(&cfg.tsdb.NoLockfile)
// TODO: Remove in Prometheus 3.0.
var b bool
serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "[DEPRECATED] This flag has no effect. Overlapping blocks are enabled by default now.").
Default("true").Hidden().BoolVar(&b)
serverOnlyFlag(a, "storage.tsdb.allow-overlapping-compaction", "Allow compaction of overlapping blocks. If set to false, TSDB stops vertical compaction and leaves overlapping blocks there. The use case is to let another component handle the compaction of overlapping blocks.").
Default("true").Hidden().BoolVar(&cfg.tsdb.EnableOverlappingCompaction)
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL.").
Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
@ -487,9 +456,6 @@ func main() {
serverOnlyFlag(a, "alertmanager.drain-notification-queue-on-shutdown", "Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down.").
Default("true").BoolVar(&cfg.notifier.DrainOnShutdown)
// TODO: Remove in Prometheus 3.0.
alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String()
serverOnlyFlag(a, "query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations and federation.").
Default("5m").SetValue(&cfg.lookbackDelta)
@ -505,11 +471,11 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
a.Flag("scrape.name-escaping-scheme", `Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots".`).Default(scrape.DefaultNameEscapingScheme.String()).StringVar(&cfg.nameEscapingScheme)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, auto-reload-config, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, no-default-scrape-port, old-ui, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList)
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
promlogflag.AddFlags(a, &cfg.promlogConfig)
a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error {
@ -576,10 +542,6 @@ func main() {
os.Exit(2)
}
if *alertmanagerTimeout != "" {
level.Warn(logger).Log("msg", "The flag --alertmanager.timeout has no effect and will be removed in the future.")
}
// Throw error for invalid config before starting other components.
var cfgFile *config.Config
if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, log.NewNopLogger()); err != nil {
@ -626,17 +588,6 @@ func main() {
cfg.web.RoutePrefix = "/" + strings.Trim(cfg.web.RoutePrefix, "/")
if !agentMode {
// Time retention settings.
if oldFlagRetentionDuration != 0 {
level.Warn(logger).Log("deprecation_notice", "'storage.tsdb.retention' flag is deprecated use 'storage.tsdb.retention.time' instead.")
cfg.tsdb.RetentionDuration = oldFlagRetentionDuration
}
// When the new flag is set it takes precedence.
if newFlagRetentionDuration != 0 {
cfg.tsdb.RetentionDuration = newFlagRetentionDuration
}
if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 {
cfg.tsdb.RetentionDuration = defaultRetentionDuration
level.Info(logger).Log("msg", "No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration)
@ -708,8 +659,8 @@ func main() {
ctxScrape, cancelScrape = context.WithCancel(context.Background())
ctxNotify, cancelNotify = context.WithCancel(context.Background())
discoveryManagerScrape discoveryManager
discoveryManagerNotify discoveryManager
discoveryManagerScrape *discovery.Manager
discoveryManagerNotify *discovery.Manager
)
// Kubernetes client metrics are used by Kubernetes SD.
@ -729,42 +680,16 @@ func main() {
os.Exit(1)
}
if cfg.enableNewSDManager {
{
discMgr := discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"))
if discMgr == nil {
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
os.Exit(1)
}
discoveryManagerScrape = discMgr
}
discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"))
if discoveryManagerScrape == nil {
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
os.Exit(1)
}
{
discMgr := discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"))
if discMgr == nil {
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
os.Exit(1)
}
discoveryManagerNotify = discMgr
}
} else {
{
discMgr := legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("scrape"))
if discMgr == nil {
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
os.Exit(1)
}
discoveryManagerScrape = discMgr
}
{
discMgr := legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("notify"))
if discMgr == nil {
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
os.Exit(1)
}
discoveryManagerNotify = discMgr
}
discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"))
if discoveryManagerNotify == nil {
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
os.Exit(1)
}
scrapeManager, err := scrape.NewManager(
@ -1864,15 +1789,6 @@ func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Option
}
}
// discoveryManager interfaces the discovery manager. This is used to keep using
// the manager that restarts SD's on reload for a few releases until we feel
// the new manager can be enabled for all users.
type discoveryManager interface {
ApplyConfig(cfg map[string]discovery.Configs) error
Run() error
SyncCh() <-chan map[string][]*targetgroup.Group
}
// rwProtoMsgFlagParser is a custom parser for config.RemoteWriteProtoMsg enum.
type rwProtoMsgFlagParser struct {
msgs *[]config.RemoteWriteProtoMsg

View file

@ -42,6 +42,11 @@ import (
"github.com/prometheus/prometheus/rules"
)
func init() {
// This can be removed when the default validation scheme in common is updated.
model.NameValidationScheme = model.UTF8Validation
}
const startupTime = 10 * time.Second
var (
@ -348,7 +353,7 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames
}
func TestAgentSuccessfulStartup(t *testing.T) {
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig)
prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig)
require.NoError(t, prom.Start())
actualExitStatus := 0
@ -366,7 +371,7 @@ func TestAgentSuccessfulStartup(t *testing.T) {
}
func TestAgentFailedStartupWithServerFlag(t *testing.T) {
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
prom := exec.Command(promPath, "-test.main", "--agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
output := bytes.Buffer{}
prom.Stderr = &output
@ -393,7 +398,7 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) {
}
func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
require.NoError(t, prom.Start())
actualExitStatus := 0
@ -431,7 +436,7 @@ func TestModeSpecificFlags(t *testing.T) {
args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"}
if tc.mode == "agent" {
args = append(args, "--enable-feature=agent", "--config.file="+agentConfig)
args = append(args, "--agent", "--config.file="+agentConfig)
} else {
args = append(args, "--config.file="+promConfig)
}

View file

@ -62,6 +62,11 @@ import (
"github.com/prometheus/prometheus/util/documentcli"
)
func init() {
// This can be removed when the default validation scheme in common is updated.
model.NameValidationScheme = model.UTF8Validation
}
const (
successExitCode = 0
failureExitCode = 1
@ -325,8 +330,6 @@ func main() {
noDefaultScrapePort = true
case "":
continue
case "promql-at-modifier", "promql-negative-offset":
fmt.Printf(" WARNING: Option for --enable-feature is a no-op after promotion to a stable feature: %q\n", o)
default:
fmt.Printf(" WARNING: Unknown option for --enable-feature: %q\n", o)
}

View file

@ -31,6 +31,7 @@ import (
"testing"
"time"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
@ -38,6 +39,11 @@ import (
"github.com/prometheus/prometheus/promql/promqltest"
)
func init() {
// This can be removed when the default validation scheme in common is updated.
model.NameValidationScheme = model.UTF8Validation
}
var promtoolPath = os.Args[0]
func TestMain(m *testing.M) {

View file

@ -89,11 +89,11 @@ tests:
# Ensure lookback delta is respected, when a value is missing.
- expr: timestamp(test_missing)
eval_time: 5m
eval_time: 4m59s
exp_samples:
- value: 0
- expr: timestamp(test_missing)
eval_time: 5m1s
eval_time: 5m
exp_samples: []
# Minimal test case to check edge case of a single sample.
@ -113,7 +113,7 @@ tests:
- expr: count_over_time(fixed_data[1h])
eval_time: 1h
exp_samples:
- value: 61
- value: 60
- expr: timestamp(fixed_data)
eval_time: 1h
exp_samples:
@ -183,7 +183,7 @@ tests:
- expr: job:test:count_over_time1m
eval_time: 1m
exp_samples:
- value: 61
- value: 60
labels: 'job:test:count_over_time1m{job="test"}'
- expr: timestamp(job:test:count_over_time1m)
eval_time: 1m10s
@ -194,7 +194,7 @@ tests:
- expr: job:test:count_over_time1m
eval_time: 2m
exp_samples:
- value: 61
- value: 60
labels: 'job:test:count_over_time1m{job="test"}'
- expr: timestamp(job:test:count_over_time1m)
eval_time: 2m59s999ms

View file

@ -784,10 +784,10 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
}
switch globalConfig.MetricNameValidationScheme {
case "", LegacyValidationConfig:
case UTF8ValidationConfig:
case LegacyValidationConfig:
case "", UTF8ValidationConfig:
if model.NameValidationScheme != model.UTF8Validation {
return fmt.Errorf("utf8 name validation requested but feature not enabled via --enable-feature=utf8-names")
panic("utf8 name validation requested but model.NameValidationScheme is not set to UTF8")
}
default:
return fmt.Errorf("unknown name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme)

View file

@ -62,6 +62,11 @@ import (
"github.com/prometheus/prometheus/util/testutil"
)
func init() {
// This can be removed when the default validation scheme in common is updated.
model.NameValidationScheme = model.UTF8Validation
}
func mustParseURL(u string) *config.URL {
parsed, err := url.Parse(u)
if err != nil {
@ -2084,6 +2089,10 @@ var expectedErrors = []struct {
}
func TestBadConfigs(t *testing.T) {
model.NameValidationScheme = model.LegacyValidation
defer func() {
model.NameValidationScheme = model.UTF8Validation
}()
for _, ee := range expectedErrors {
_, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger())
require.Error(t, err, "%s", ee.filename)
@ -2093,6 +2102,10 @@ func TestBadConfigs(t *testing.T) {
}
func TestBadStaticConfigsJSON(t *testing.T) {
model.NameValidationScheme = model.LegacyValidation
defer func() {
model.NameValidationScheme = model.UTF8Validation
}()
content, err := os.ReadFile("testdata/static_config.bad.json")
require.NoError(t, err)
var tg targetgroup.Group
@ -2101,6 +2114,10 @@ func TestBadStaticConfigsJSON(t *testing.T) {
}
func TestBadStaticConfigsYML(t *testing.T) {
model.NameValidationScheme = model.LegacyValidation
defer func() {
model.NameValidationScheme = model.UTF8Validation
}()
content, err := os.ReadFile("testdata/static_config.bad.yml")
require.NoError(t, err)
var tg targetgroup.Group
@ -2365,17 +2382,17 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) {
{
name: "global setting implies local settings",
inputFile: "scrape_config_global_validation_mode",
expectScheme: "utf8",
expectScheme: "legacy",
},
{
name: "local setting",
inputFile: "scrape_config_local_validation_mode",
expectScheme: "utf8",
expectScheme: "legacy",
},
{
name: "local setting overrides global setting",
inputFile: "scrape_config_local_global_validation_mode",
expectScheme: "legacy",
expectScheme: "utf8",
},
}

View file

@ -1,4 +1,6 @@
# Two scrape configs with the same job names are not allowed.
global:
metric_name_validation_scheme: legacy
scrape_configs:
- job_name: prometheus
- job_name: service-x

View file

@ -1,3 +1,5 @@
global:
metric_name_validation_scheme: legacy
scrape_configs:
- job_name: prometheus
relabel_configs:

View file

@ -1,4 +1,4 @@
global:
metric_name_validation_scheme: utf8
metric_name_validation_scheme: legacy
scrape_configs:
- job_name: prometheus

View file

@ -1,5 +1,5 @@
global:
metric_name_validation_scheme: utf8
metric_name_validation_scheme: legacy
scrape_configs:
- job_name: prometheus
metric_name_validation_scheme: legacy
metric_name_validation_scheme: utf8

View file

@ -1,3 +1,3 @@
scrape_configs:
- job_name: prometheus
metric_name_validation_scheme: utf8
metric_name_validation_scheme: legacy

View file

@ -1,82 +0,0 @@
{{/* vim: set ft=html: */}}
{{/* Navbar, should be passed . */}}
{{ define "navbar" }}
<nav class="navbar fixed-top navbar-expand-sm navbar-dark bg-dark">
<div class="container-fluid">
<!-- Brand and toggle get grouped for better mobile display -->
<div class="navbar-header">
<button type="button" class="navbar-toggler" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria-expanded="false" aria-controls="navbar-nav" aria-label="toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<a class="navbar-brand" href="{{ pathPrefix }}/">Prometheus</a>
</div>
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">
<ul class="nav navbar-nav">
<li class="nav-item"><a class="nav-link" href="{{ pathPrefix }}/alerts">Alerts</a></li>
<li class="nav-item"><a class="nav-link" href="https://www.pagerduty.com/">PagerDuty</a></li>
</ul>
</div>
</div>
</nav>
{{ end }}
{{/* LHS menu, should be passed . */}}
{{ define "menu" }}
<div class="prom_lhs_menu row">
<nav class="col-md-2 md-block bg-dark sidebar prom_lhs_menu_nav">
<div class="sidebar-sticky">
<ul class="nav flex-column">
{{ template "_menuItem" (args . "index.html.example" "Overview") }}
{{ if query "up{job='node'}" }}
{{ template "_menuItem" (args . "node.html" "Node") }}
{{ if match "^node" .Path }}
{{ if .Params.instance }}
<ul>
<li {{ if eq .Path "node-overview.html" }}class="prom_lhs_menu_selected nav-item"{{ end }}>
<a class="nav-link" href="node-overview.html?instance={{ .Params.instance }}">{{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</a>
</li>
<ul>
<li {{ if eq .Path "node-cpu.html" }}class="prom_lhs_menu_selected nav-item"{{ end }}>
<a class="nav-link" href="node-cpu.html?instance={{ .Params.instance }}">CPU</a>
</li>
<li {{ if eq .Path "node-disk.html" }}class="prom_lhs_menu_selected nav-item"{{ end }}>
<a class="nav-link" href="node-disk.html?instance={{ .Params.instance }}">Disk</a>
</li>
</ul>
</ul>
{{ end }}
{{ end }}
{{ end }}
{{ if query "up{job='prometheus'}" }}
{{ template "_menuItem" (args . "prometheus.html" "Prometheus") }}
{{ if match "^prometheus" .Path }}
{{ if .Params.instance }}
<ul>
<li {{ if eq .Path "prometheus-overview.html" }}class="prom_lhs_menu_selected nav-item"{{ end }}>
<a class="nav-link" href="prometheus-overview.html?instance={{ .Params.instance }}">{{.Params.instance }}</a>
</li>
</ul>
{{ end }}
{{ end }}
{{ end }}
</ul>
</div>
</nav>
</div>
{{ end }}
{{/* Helper, pass (args . path name) */}}
{{ define "_menuItem" }}
<li {{ if eq .arg0.Path .arg1 }} class="prom_lhs_menu_selected nav-item" {{ end }}><a class="nav-link" href="{{ .arg1 }}">{{ .arg2 }}</a></li>
{{ end }}

View file

@ -1,138 +0,0 @@
{{/* vim: set ft=html: */}}
{{/* Load Prometheus console library JS/CSS. Should go in <head> */}}
{{ define "prom_console_head" }}
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/rickshaw/rickshaw.min.css">
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap-4.5.2/css/bootstrap.min.css">
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/css/prom_console.css">
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.min.css">
<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/vendor/d3.v3.js"></script>
<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/vendor/d3.layout.min.js"></script>
<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/rickshaw.min.js"></script>
<script src="{{ pathPrefix }}/classic/static/vendor/js/jquery-3.5.1.min.js"></script>
<script src="{{ pathPrefix }}/classic/static/vendor/js/popper.min.js"></script>
<script src="{{ pathPrefix }}/classic/static/vendor/bootstrap-4.5.2/js/bootstrap.min.js"></script>
<script>
var PATH_PREFIX = "{{ pathPrefix }}";
</script>
<script src="{{ pathPrefix }}/classic/static/js/prom_console.js"></script>
{{ end }}
{{/* Top of all pages. */}}
{{ define "head" -}}
<!doctype html>
<html lang="en">
<head>
{{ template "prom_console_head" }}
</head>
<body>
{{ template "navbar" . }}
{{ template "menu" . }}
{{ end }}
{{ define "__prom_query_drilldown_noop" }}{{ . }}{{ end }}
{{ define "humanize" }}{{ humanize . }}{{ end }}
{{ define "humanizeNoSmallPrefix" }}{{ if and (lt . 1.0) (gt . -1.0) }}{{ printf "%.3g" . }}{{ else }}{{ humanize . }}{{ end }}{{ end }}
{{ define "humanize1024" }}{{ humanize1024 . }}{{ end }}
{{ define "humanizeDuration" }}{{ humanizeDuration . }}{{ end }}
{{ define "humanizePercentage" }}{{ humanizePercentage . }}{{ end }}
{{ define "humanizeTimestamp" }}{{ humanizeTimestamp . }}{{ end }}
{{ define "printf.1f" }}{{ printf "%.1f" . }}{{ end }}
{{ define "printf.3g" }}{{ printf "%.3g" . }}{{ end }}
{{/* prom_query_drilldown (args expr suffix? renderTemplate?)
Displays the result of the expression, with a link to /graph for it.
renderTemplate is the name of the template to use to render the value.
*/}}
{{ define "prom_query_drilldown" }}
{{ $expr := .arg0 }}{{ $suffix := (or .arg1 "") }}{{ $renderTemplate := (or .arg2 "__prom_query_drilldown_noop") }}
<a class="prom_query_drilldown" href="{{ pathPrefix }}{{ graphLink $expr }}">{{ with query $expr }}{{tmpl $renderTemplate ( . | first | value )}}{{ $suffix }}{{ else }}-{{ end }}</a>
{{ end }}
{{ define "prom_path" }}/consoles/{{ .Path }}?{{ range $param, $value := .Params }}{{ $param }}={{ $value }}&amp;{{ end }}{{ end }}"
{{ define "prom_right_table_head" }}
<div class="prom_console_rhs">
<table class="table table-bordered table-hover table-sm">
{{ end }}
{{ define "prom_right_table_tail" }}
</table>
</div>
{{ end }}
{{/* RHS table head, pass job name. Should be used after prom_right_table_head. */}}
{{ define "prom_right_table_job_head" }}
<tr>
<th>{{ . }}</th>
<th>{{ template "prom_query_drilldown" (args (printf "sum(up{job='%s'})" .)) }} / {{ template "prom_query_drilldown" (args (printf "count(up{job='%s'})" .)) }}</th>
</tr>
<tr>
<td>CPU</td>
<td>{{ template "prom_query_drilldown" (args (printf "avg by(job)(irate(process_cpu_seconds_total{job='%s'}[5m]))" .) "s/s" "humanizeNoSmallPrefix") }}</td>
</tr>
<tr>
<td>Memory</td>
<td>{{ template "prom_query_drilldown" (args (printf "avg by(job)(process_resident_memory_bytes{job='%s'})" .) "B" "humanize1024") }}</td>
</tr>
{{ end }}
{{ define "prom_content_head" }}
<div class="prom_console_content">
<div class="container-fluid">
{{ template "prom_graph_timecontrol" . }}
{{ end }}
{{ define "prom_content_tail" }}
</div>
</div>
{{ end }}
{{ define "prom_graph_timecontrol" }}
<div class="prom_graph_timecontrol">
<div class="prom_graph_timecontrol_inner">
<div class="prom_graph_timecontrol_group ">
<button class="btn btn-light pull-left" type="button" id="prom_graph_duration_shrink" title="Shrink the time range.">
<i class="glyphicon glyphicon-minus"></i>
</button><!-- Comments between elements to remove spaces
--><input class="input pull-left align-middle" size="3" title="Time range of graph" type="text" id="prom_graph_duration"><!--
--><button class="btn btn-light pull-left" type="button" id="prom_graph_duration_grow" title="Grow the time range.">
<i class="glyphicon glyphicon-plus"></i>
</button>
</div>
<div class="prom_graph_timecontrol_group ">
<button class="btn btn-light pull-left" type="button" id="prom_graph_time_back" title="Rewind the end time.">
<i class="glyphicon glyphicon-backward"></i>
</button><!--
--><input class="input pull-left align-middle" title="End time of graph" placeholder="Until" type="text" id="prom_graph_time_end" size="16" value=""><!--
--><button class="btn btn-light pull-left" type="button" id="prom_graph_time_forward" title="Advance the end time.">
<i class="glyphicon glyphicon-forward"></i>
</button>
</div>
<div class="prom_graph_timecontrol_group ">
<div class="btn-group dropup prom_graph_timecontrol_refresh pull-left">
<button type="button" class="btn btn-light pull-left" id="prom_graph_refresh_button" title="Refresh.">
<i class="glyphicon glyphicon-repeat"></i>
<span class="icon-repeat"></span>
(<span id="prom_graph_refresh_button_value">Off</span>)
</button>
<button type="button" class="btn btn-light pull-left dropdown-toggle" data-toggle="dropdown" title="Set autorefresh."aria-haspopup="true" aria-expanded="false">
<span class="caret"></span>&nbsp;
</button>
<ul class="dropdown-menu" id="prom_graph_refresh_intervals" role="menu">
</ul>
</div>
</div>
</div>
<script>
new PromConsole.TimeControl();
</script>
</div>
{{ end }}
{{/* Bottom of all pages. */}}
{{ define "tail" }}
</body>
</html>
{{ end }}

View file

@ -1,28 +0,0 @@
{{ template "head" . }}
{{ template "prom_right_table_head" }}
{{ template "prom_right_table_tail" }}
{{ template "prom_content_head" . }}
<h1>Overview</h1>
<p>These are example consoles for Prometheus.</p>
<p>These consoles expect exporters to have the following job labels:</p>
<table class="table table-sm table-striped table-bordered" style="width: 0%">
<tr>
<th>Exporter</th>
<th>Job label</th>
</tr>
<tr>
<td>Node Exporter</td>
<td><code>node</code></td>
</tr>
<tr>
<td>Prometheus</td>
<td><code>prometheus</code></td>
</tr>
</table>
{{ template "prom_content_tail" . }}
{{ template "tail" }}

View file

@ -1,60 +0,0 @@
{{ template "head" . }}
{{ template "prom_right_table_head" }}
<tr>
<th colspan="2">CPU(s): {{ template "prom_query_drilldown" (args (printf "scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance)) }}</th>
</tr>
{{ range printf "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='%s'}[5m])) * 100 / scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance .Params.instance | query | sortByLabel "mode" }}
<tr>
<td>{{ .Labels.mode | title }} CPU</td>
<td>{{ .Value | printf "%.1f" }}%</td>
</tr>
{{ end }}
<tr><th colspan="2">Misc</th></tr>
<tr>
<td>Processes Running</td>
<td>{{ template "prom_query_drilldown" (args (printf "node_procs_running{job='node',instance='%s'}" .Params.instance) "" "humanize") }}</td>
</tr>
<tr>
<td>Processes Blocked</td>
<td>{{ template "prom_query_drilldown" (args (printf "node_procs_blocked{job='node',instance='%s'}" .Params.instance) "" "humanize") }}</td>
</tr>
<tr>
<td>Forks</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_forks_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td>
</tr>
<tr>
<td>Context Switches</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_context_switches_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td>
</tr>
<tr>
<td>Interrupts</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_intr_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td>
</tr>
<tr>
<td>1m Loadavg</td>
<td>{{ template "prom_query_drilldown" (args (printf "node_load1{job='node',instance='%s'}" .Params.instance)) }}</td>
</tr>
<tr>
</tr>
{{ template "prom_right_table_tail" }}
{{ template "prom_content_head" . }}
<h1>Node CPU - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</h1>
<h3>CPU Usage</h3>
<div id="cpuGraph"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#cpuGraph"),
expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle',mode!='iowait',mode!='steal'}[5m]))",
renderer: 'area',
max: {{ with printf "count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yTitle: 'Cores'
})
</script>
{{ template "prom_content_tail" . }}
{{ template "tail" }}

View file

@ -1,78 +0,0 @@
{{ template "head" . }}
{{ template "prom_right_table_head" }}
<tr>
<th colspan="2">Disks</th>
</tr>
{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }}
<th colspan="2">{{ .Labels.device }}</th>
<tr>
<td>Utilization</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }}</td>
</tr>
<tr>
<td>Throughput</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
</tr>
<tr>
<td>Avg Read Time</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_reads_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }}</td>
</tr>
<tr>
<td>Avg Write Time</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_write_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_writes_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }}</td>
</tr>
{{ end }}
<tr>
<th colspan="2">Filesystem Fullness</th>
</tr>
{{ define "roughlyNearZero" }}
{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }}
{{ end }}
{{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }}
<tr>
<td>{{ .Labels.mountpoint }}</td>
<td>{{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }}</td>
</tr>
{{ end }}
<tr>
</tr>
{{ template "prom_right_table_tail" }}
{{ template "prom_content_head" . }}
<h1>Node Disk - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</h1>
<h3>Disk I/O Utilization</h3>
<div id="diskioGraph"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#diskioGraph"),
expr: [
"irate(node_disk_io_time_seconds_total{job='node',instance='{{ .Params.instance }}',device!~'^(md\\\\d+$|dm-)'}[5m]) * 100",
],
min: 0,
name: '[[ device ]]',
yUnits: "%",
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yTitle: 'Disk I/O Utilization'
})
</script>
<h3>Filesystem Usage</h3>
<div id="fsGraph"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#fsGraph"),
expr: "100 - node_filesystem_avail_bytes{job='node',instance='{{ .Params.instance }}'} / node_filesystem_size_bytes{job='node'} * 100",
min: 0,
max: 100,
name: '[[ mountpoint ]]',
yUnits: "%",
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yTitle: 'Filesystem Fullness'
})
</script>
{{ template "prom_content_tail" . }}
{{ template "tail" }}

View file

@ -1,121 +0,0 @@
{{ template "head" . }}
{{ template "prom_right_table_head" }}
<tr><th colspan="2">Overview</th></tr>
<tr>
<td>User CPU</td>
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='user'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
</tr>
<tr>
<td>System CPU</td>
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='system'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
</tr>
<tr>
<td>Memory Total</td>
<td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemTotal_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
</tr>
<tr>
<td>Memory Free</td>
<td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
</tr>
<tr>
<th colspan="2">Network</th>
</tr>
{{ range printf "node_network_receive_bytes_total{job='node',instance='%s',device!='lo'}" .Params.instance | query | sortByLabel "device" }}
<tr>
<td>{{ .Labels.device }} Received</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_network_receive_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
</tr>
<tr>
<td>{{ .Labels.device }} Transmitted</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_network_transmit_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
</tr>
{{ end }}
<tr>
<th colspan="2">Disks</th>
</tr>
{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s',device!~'^(md\\\\d+$|dm-)'}" .Params.instance | query | sortByLabel "device" }}
<tr>
<td>{{ .Labels.device }} Utilization</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }}</td>
</tr>
{{ end }}
{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }}
<tr>
<td>{{ .Labels.device }} Throughput</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
</tr>
{{ end }}
<tr>
<th colspan="2">Filesystem Fullness</th>
</tr>
{{ define "roughlyNearZero" }}
{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }}
{{ end }}
{{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }}
<tr>
<td>{{ .Labels.mountpoint }}</td>
<td>{{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }}</td>
</tr>
{{ end }}
{{ template "prom_right_table_tail" }}
{{ template "prom_content_head" . }}
<h1>Node Overview - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</h1>
<h3>CPU Usage</h3>
<div id="cpuGraph"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#cpuGraph"),
expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle',mode!='iowait',mode!='steal'}[5m]))",
renderer: 'area',
max: {{ with printf "count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yTitle: 'Cores'
})
</script>
<h3>Disk I/O Utilization</h3>
<div id="diskioGraph"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#diskioGraph"),
expr: [
"irate(node_disk_io_time_seconds_total{job='node',instance='{{ .Params.instance }}',device!~'^(md\\\\d+$|dm-)'}[5m]) * 100",
],
min: 0,
name: '[[ device ]]',
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yUnits: "%",
yTitle: 'Disk I/O Utilization'
})
</script>
<h3>Memory</h3>
<div id="memoryGraph"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#memoryGraph"),
renderer: 'area',
expr: [
"node_memory_Cached_bytes{job='node',instance='{{ .Params.instance }}'}",
"node_memory_Buffers_bytes{job='node',instance='{{ .Params.instance }}'}",
"node_memory_MemTotal_bytes{job='node',instance='{{ .Params.instance }}'} - node_memory_MemFree_bytes{job='node',instance='{{.Params.instance}}'} - node_memory_Buffers_bytes{job='node',instance='{{.Params.instance}}'} - node_memory_Cached_bytes{job='node',instance='{{.Params.instance}}'}",
"node_memory_MemFree{job='node',instance='{{ .Params.instance }}'}",
],
name: ["Cached", "Buffers", "Used", "Free"],
min: 0,
yUnits: "B",
yAxisFormatter: PromConsole.NumberFormatter.humanize1024,
yHoverFormatter: PromConsole.NumberFormatter.humanize1024,
yTitle: 'Memory'
})
</script>
{{ template "prom_content_tail" . }}
{{ template "tail" }}

View file

@ -1,35 +0,0 @@
{{ template "head" . }}
{{ template "prom_right_table_head" }}
<tr>
<th>Node</th>
<th>{{ template "prom_query_drilldown" (args "sum(up{job='node'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='node'})") }}</th>
</tr>
{{ template "prom_right_table_tail" }}
{{ template "prom_content_head" . }}
<h1>Node</h1>
<table class="table table-condensed table-striped table-bordered" style="width: 0%">
<tr>
<th>Node</th>
<th>Up</th>
<th>CPU<br/>Used</th>
<th>Memory<br/> Available</th>
</tr>
{{ range query "up{job='node'}" | sortByLabel "instance" }}
<tr>
<td><a href="node-overview.html?instance={{ .Labels.instance }}">{{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Labels.instance }}</a></td>
<td{{ if eq (. | value) 1.0 }}>Yes{{ else }} class="alert-danger">No{{ end }}</td>
<td>{{ template "prom_query_drilldown" (args (printf "100 * (1 - avg by(instance) (sum without(mode) (irate(node_cpu_seconds_total{job='node',mode=~'idle|iowait|steal',instance='%s'}[5m]))))" .Labels.instance) "%" "printf.1f") }}</td>
<td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'} + node_memory_Cached_bytes{job='node',instance='%s'} + node_memory_Buffers_bytes{job='node',instance='%s'}" .Labels.instance .Labels.instance .Labels.instance) "B" "humanize1024") }}</td>
</tr>
{{ else }}
<tr><td colspan=4>No nodes found.</td></tr>
{{ end }}
</table>
{{ template "prom_content_tail" . }}
{{ template "tail" }}

View file

@ -1,96 +0,0 @@
{{ template "head" . }}
{{ template "prom_right_table_head" }}
<tr>
<th colspan="2">Overview</th>
</tr>
<tr>
<td>CPU</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(process_cpu_seconds_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "s/s" "humanizeNoSmallPrefix") }}</td>
</tr>
<tr>
<td>Memory</td>
<td>{{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
</tr>
<tr>
<td>Version</td>
<td>{{ with query (printf "prometheus_build_info{job='prometheus',instance='%s'}" .Params.instance) }}{{. | first | label "version"}}{{end}}</td>
</tr>
<tr>
<th colspan="2">Storage</th>
</tr>
<tr>
<td>Ingested Samples</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "/s" "humanizeNoSmallPrefix") }}</td>
</tr>
<tr>
<td>Head Series</td>
<td>{{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_head_series{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }}</td>
</tr>
<tr>
<td>Blocks Loaded</td>
<td>{{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_blocks_loaded{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }}</td>
</tr>
<tr>
<th colspan="2">Rules</th>
</tr>
<tr>
<td>Evaluation Duration</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(prometheus_evaluator_duration_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_evaluator_duration_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }}</td>
</tr>
<tr>
<td>Notification Latency</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(prometheus_notifications_latency_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_notifications_latency_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }}</td>
</tr>
<tr>
<td>Notification Queue</td>
<td>{{ template "prom_query_drilldown" (args (printf "prometheus_notifications_queue_length{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }}</td>
</tr>
<tr>
<th colspan="2">HTTP Server</th>
</tr>
{{ range printf "prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s'}" .Params.instance | query | sortByLabel "handler" }}
<tr>
<td>{{ .Labels.handler }}</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s',handler='%s'}[5m])" .Labels.instance .Labels.handler) "/s" "humanizeNoSmallPrefix") }}</td>
</tr>
{{ end }}
{{ template "prom_right_table_tail" }}
{{ template "prom_content_head" . }}
<div class="prom_content_div">
<h1>Prometheus Overview - {{ .Params.instance }}</h1>
<h3>Ingested Samples</h3>
<div id="samplesGraph"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#samplesGraph"),
expr: "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='{{ .Params.instance }}'}[5m])",
name: 'Ingested Samples',
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yTitle: "Samples",
yUnits: "/s",
})
</script>
<h3>HTTP Server</h3>
<div id="serverGraph"></div>
<script>
new PromConsole.Graph({
node: document.querySelector("#serverGraph"),
expr: "irate(prometheus_http_request_duration_seconds_count{job='prometheus',instance='{{ .Params.instance }}'}[5m])",
name: '[[handler]]',
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yTitle: "Requests",
yUnits: "/s",
})
</script>
</div>
{{ template "prom_content_tail" . }}
{{ template "tail" }}

View file

@ -1,34 +0,0 @@
{{ template "head" . }}
{{ template "prom_right_table_head" }}
<tr>
<th>Prometheus</th>
<th>{{ template "prom_query_drilldown" (args "sum(up{job='prometheus'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='prometheus'})") }}</th>
</tr>
{{ template "prom_right_table_tail" }}
{{ template "prom_content_head" . }}
<h1>Prometheus</h1>
<table class="table table-sm table-striped table-bordered" style="width: 0%">
<tr>
<th>Prometheus</th>
<th>Up</th>
<th>Ingested Samples</th>
<th>Memory</th>
</tr>
{{ range query "up{job='prometheus'}" | sortByLabel "instance" }}
<tr>
<td><a href="prometheus-overview.html?instance={{ .Labels.instance }}">{{ .Labels.instance }}</a></td>
<td {{ if eq (. | value) 1.0 }}>Yes{{ else }} class="alert-danger">No{{ end }}</td>
<td class="text-right">{{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Labels.instance) "/s" "humanizeNoSmallPrefix") }}</td>
<td class="text-right">{{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Labels.instance) "B" "humanize1024")}}</td>
</tr>
{{ else }}
<tr><td colspan=4>No devices found.</td></tr>
{{ end }}
</table>
{{ template "prom_content_tail" . }}
{{ template "tail" }}

View file

@ -26,7 +26,6 @@ import (
"github.com/prometheus/common/model"
apiv1 "k8s.io/api/core/v1"
v1 "k8s.io/api/discovery/v1"
"k8s.io/api/discovery/v1beta1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
@ -103,9 +102,9 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
return
}
// TODO(brancz): use cache.Indexer to index endpoints by
// disv1beta1.LabelServiceName so this operation doesn't have to
// iterate over all endpoint objects.
// TODO(brancz): use cache.Indexer to index endpointslices by
// LabelServiceName so this operation doesn't have to iterate over all
// endpoint objects.
for _, obj := range e.endpointSliceStore.List() {
esa, err := e.getEndpointSliceAdaptor(obj)
if err != nil {
@ -241,8 +240,6 @@ func (e *EndpointSlice) getEndpointSliceAdaptor(o interface{}) (endpointSliceAda
switch endpointSlice := o.(type) {
case *v1.EndpointSlice:
return newEndpointSliceAdaptorFromV1(endpointSlice), nil
case *v1beta1.EndpointSlice:
return newEndpointSliceAdaptorFromV1beta1(endpointSlice), nil
default:
return nil, fmt.Errorf("received unexpected object: %v", o)
}

View file

@ -16,7 +16,6 @@ package kubernetes
import (
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/discovery/v1"
"k8s.io/api/discovery/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -109,59 +108,6 @@ func (e *endpointSliceAdaptorV1) labelServiceName() string {
return v1.LabelServiceName
}
// Adaptor for k8s.io/api/discovery/v1beta1.
type endpointSliceAdaptorV1Beta1 struct {
endpointSlice *v1beta1.EndpointSlice
}
func newEndpointSliceAdaptorFromV1beta1(endpointSlice *v1beta1.EndpointSlice) endpointSliceAdaptor {
return &endpointSliceAdaptorV1Beta1{endpointSlice: endpointSlice}
}
func (e *endpointSliceAdaptorV1Beta1) get() interface{} {
return e.endpointSlice
}
func (e *endpointSliceAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta {
return e.endpointSlice.ObjectMeta
}
func (e *endpointSliceAdaptorV1Beta1) name() string {
return e.endpointSlice.Name
}
func (e *endpointSliceAdaptorV1Beta1) namespace() string {
return e.endpointSlice.Namespace
}
func (e *endpointSliceAdaptorV1Beta1) addressType() string {
return string(e.endpointSlice.AddressType)
}
func (e *endpointSliceAdaptorV1Beta1) endpoints() []endpointSliceEndpointAdaptor {
eps := make([]endpointSliceEndpointAdaptor, 0, len(e.endpointSlice.Endpoints))
for i := 0; i < len(e.endpointSlice.Endpoints); i++ {
eps = append(eps, newEndpointSliceEndpointAdaptorFromV1beta1(e.endpointSlice.Endpoints[i]))
}
return eps
}
func (e *endpointSliceAdaptorV1Beta1) ports() []endpointSlicePortAdaptor {
ports := make([]endpointSlicePortAdaptor, 0, len(e.endpointSlice.Ports))
for i := 0; i < len(e.endpointSlice.Ports); i++ {
ports = append(ports, newEndpointSlicePortAdaptorFromV1beta1(e.endpointSlice.Ports[i]))
}
return ports
}
func (e *endpointSliceAdaptorV1Beta1) labels() map[string]string {
return e.endpointSlice.Labels
}
func (e *endpointSliceAdaptorV1Beta1) labelServiceName() string {
return v1beta1.LabelServiceName
}
type endpointSliceEndpointAdaptorV1 struct {
endpoint v1.Endpoint
}
@ -218,62 +164,6 @@ func (e *endpointSliceEndpointConditionsAdaptorV1) terminating() *bool {
return e.endpointConditions.Terminating
}
type endpointSliceEndpointAdaptorV1beta1 struct {
endpoint v1beta1.Endpoint
}
func newEndpointSliceEndpointAdaptorFromV1beta1(endpoint v1beta1.Endpoint) endpointSliceEndpointAdaptor {
return &endpointSliceEndpointAdaptorV1beta1{endpoint: endpoint}
}
func (e *endpointSliceEndpointAdaptorV1beta1) addresses() []string {
return e.endpoint.Addresses
}
func (e *endpointSliceEndpointAdaptorV1beta1) hostname() *string {
return e.endpoint.Hostname
}
func (e *endpointSliceEndpointAdaptorV1beta1) nodename() *string {
return e.endpoint.NodeName
}
func (e *endpointSliceEndpointAdaptorV1beta1) zone() *string {
return nil
}
func (e *endpointSliceEndpointAdaptorV1beta1) conditions() endpointSliceEndpointConditionsAdaptor {
return newEndpointSliceEndpointConditionsAdaptorFromV1beta1(e.endpoint.Conditions)
}
func (e *endpointSliceEndpointAdaptorV1beta1) targetRef() *corev1.ObjectReference {
return e.endpoint.TargetRef
}
func (e *endpointSliceEndpointAdaptorV1beta1) topology() map[string]string {
return e.endpoint.Topology
}
type endpointSliceEndpointConditionsAdaptorV1beta1 struct {
endpointConditions v1beta1.EndpointConditions
}
func newEndpointSliceEndpointConditionsAdaptorFromV1beta1(endpointConditions v1beta1.EndpointConditions) endpointSliceEndpointConditionsAdaptor {
return &endpointSliceEndpointConditionsAdaptorV1beta1{endpointConditions: endpointConditions}
}
func (e *endpointSliceEndpointConditionsAdaptorV1beta1) ready() *bool {
return e.endpointConditions.Ready
}
func (e *endpointSliceEndpointConditionsAdaptorV1beta1) serving() *bool {
return e.endpointConditions.Serving
}
func (e *endpointSliceEndpointConditionsAdaptorV1beta1) terminating() *bool {
return e.endpointConditions.Terminating
}
type endpointSlicePortAdaptorV1 struct {
endpointPort v1.EndpointPort
}
@ -298,28 +188,3 @@ func (e *endpointSlicePortAdaptorV1) protocol() *string {
func (e *endpointSlicePortAdaptorV1) appProtocol() *string {
return e.endpointPort.AppProtocol
}
type endpointSlicePortAdaptorV1beta1 struct {
endpointPort v1beta1.EndpointPort
}
func newEndpointSlicePortAdaptorFromV1beta1(port v1beta1.EndpointPort) endpointSlicePortAdaptor {
return &endpointSlicePortAdaptorV1beta1{endpointPort: port}
}
func (e *endpointSlicePortAdaptorV1beta1) name() *string {
return e.endpointPort.Name
}
func (e *endpointSlicePortAdaptorV1beta1) port() *int32 {
return e.endpointPort.Port
}
func (e *endpointSlicePortAdaptorV1beta1) protocol() *string {
val := string(*e.endpointPort.Protocol)
return &val
}
func (e *endpointSlicePortAdaptorV1beta1) appProtocol() *string {
return e.endpointPort.AppProtocol
}

View file

@ -18,7 +18,6 @@ import (
"github.com/stretchr/testify/require"
v1 "k8s.io/api/discovery/v1"
"k8s.io/api/discovery/v1beta1"
)
func Test_EndpointSliceAdaptor_v1(t *testing.T) {
@ -48,31 +47,3 @@ func Test_EndpointSliceAdaptor_v1(t *testing.T) {
require.Equal(t, endpointSlice.Ports[i].AppProtocol, portAdaptor.appProtocol())
}
}
func Test_EndpointSliceAdaptor_v1beta1(t *testing.T) {
endpointSlice := makeEndpointSliceV1beta1()
adaptor := newEndpointSliceAdaptorFromV1beta1(endpointSlice)
require.Equal(t, endpointSlice.ObjectMeta.Name, adaptor.name())
require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace())
require.Equal(t, endpointSlice.AddressType, v1beta1.AddressType(adaptor.addressType()))
require.Equal(t, endpointSlice.Labels, adaptor.labels())
require.Equal(t, "testendpoints", endpointSlice.Labels[v1beta1.LabelServiceName])
for i, endpointAdaptor := range adaptor.endpoints() {
require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses())
require.Equal(t, endpointSlice.Endpoints[i].Hostname, endpointAdaptor.hostname())
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Ready, endpointAdaptor.conditions().ready())
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Serving, endpointAdaptor.conditions().serving())
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Terminating, endpointAdaptor.conditions().terminating())
require.Equal(t, endpointSlice.Endpoints[i].TargetRef, endpointAdaptor.targetRef())
require.Equal(t, endpointSlice.Endpoints[i].Topology, endpointAdaptor.topology())
}
for i, portAdaptor := range adaptor.ports() {
require.Equal(t, endpointSlice.Ports[i].Name, portAdaptor.name())
require.Equal(t, endpointSlice.Ports[i].Port, portAdaptor.port())
require.EqualValues(t, endpointSlice.Ports[i].Protocol, portAdaptor.protocol())
require.Equal(t, endpointSlice.Ports[i].AppProtocol, portAdaptor.appProtocol())
}
}

View file

@ -21,7 +21,6 @@ import (
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/discovery/v1"
"k8s.io/api/discovery/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
@ -114,62 +113,8 @@ func makeEndpointSliceV1() *v1.EndpointSlice {
}
}
func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice {
return &v1beta1.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",
Namespace: "default",
Labels: map[string]string{
v1beta1.LabelServiceName: "testendpoints",
},
Annotations: map[string]string{
"test.annotation": "test",
},
},
AddressType: v1beta1.AddressTypeIPv4,
Ports: []v1beta1.EndpointPort{
{
Name: strptr("testport"),
Port: int32ptr(9000),
Protocol: protocolptr(corev1.ProtocolTCP),
},
},
Endpoints: []v1beta1.Endpoint{
{
Addresses: []string{"1.2.3.4"},
Hostname: strptr("testendpoint1"),
}, {
Addresses: []string{"2.3.4.5"},
Conditions: v1beta1.EndpointConditions{
Ready: boolptr(true),
Serving: boolptr(true),
Terminating: boolptr(false),
},
}, {
Addresses: []string{"3.4.5.6"},
Conditions: v1beta1.EndpointConditions{
Ready: boolptr(false),
Serving: boolptr(true),
Terminating: boolptr(true),
},
}, {
Addresses: []string{"4.5.6.7"},
Conditions: v1beta1.EndpointConditions{
Ready: boolptr(true),
Serving: boolptr(true),
Terminating: boolptr(false),
},
TargetRef: &corev1.ObjectReference{
Kind: "Node",
Name: "barbaz",
},
},
},
}
}
func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.25.0")
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}})
k8sDiscoveryTest{
discovery: n,
@ -249,71 +194,6 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
}.Run(t)
}
func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) {
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "1.20.0")
k8sDiscoveryTest{
discovery: n,
beforeRun: func() {
obj := makeEndpointSliceV1beta1()
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
},
expectedMaxItems: 1,
expectedRes: map[string]*targetgroup.Group{
"endpointslice/default/testendpoints": {
Targets: []model.LabelSet{
{
"__address__": "1.2.3.4:9000",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "2.3.4.5:9000",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "3.4.5.6:9000",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "4.5.6.7:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_namespace": "default",
"__meta_kubernetes_endpointslice_name": "testendpoints",
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
},
Source: "endpointslice/default/testendpoints",
},
},
}.Run(t)
}
func TestEndpointSliceDiscoveryAdd(t *testing.T) {
obj := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -353,25 +233,25 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
PodIP: "1.2.3.4",
},
}
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.20.0", obj)
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, obj)
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
obj := &v1beta1.EndpointSlice{
obj := &v1.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",
Namespace: "default",
},
AddressType: v1beta1.AddressTypeIPv4,
Ports: []v1beta1.EndpointPort{
AddressType: v1.AddressTypeIPv4,
Ports: []v1.EndpointPort{
{
Name: strptr("testport"),
Port: int32ptr(9000),
Protocol: protocolptr(corev1.ProtocolTCP),
},
},
Endpoints: []v1beta1.Endpoint{
Endpoints: []v1.Endpoint{
{
Addresses: []string{"4.3.2.1"},
TargetRef: &corev1.ObjectReference{
@ -379,13 +259,13 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
Name: "testpod",
Namespace: "default",
},
Conditions: v1beta1.EndpointConditions{
Conditions: v1.EndpointConditions{
Ready: boolptr(false),
},
},
},
}
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
c.DiscoveryV1().EndpointSlices(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
},
expectedMaxItems: 1,
expectedRes: map[string]*targetgroup.Group{
@ -440,118 +320,34 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
}
func TestEndpointSliceDiscoveryDelete(t *testing.T) {
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1())
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
obj := makeEndpointSliceV1()
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{})
c.DiscoveryV1().EndpointSlices(obj.Namespace).Delete(context.Background(), obj.Name, metav1.DeleteOptions{})
},
expectedMaxItems: 2,
expectedRes: map[string]*targetgroup.Group{
"endpointslice/default/testendpoints": {
Source: "endpointslice/default/testendpoints",
Targets: []model.LabelSet{
{
"__address__": "1.2.3.4:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "",
"__meta_kubernetes_endpointslice_address_target_name": "",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_endpoint_node_name": "foobar",
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "2.3.4.5:9000",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "3.4.5.6:9000",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "4.5.6.7:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
},
Labels: map[model.LabelName]model.LabelValue{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_name": "testendpoints",
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
"__meta_kubernetes_namespace": "default",
},
},
},
}.Run(t)
}
func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1())
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
obj := &v1beta1.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",
Namespace: "default",
},
AddressType: v1beta1.AddressTypeIPv4,
Ports: []v1beta1.EndpointPort{
{
Name: strptr("testport"),
Port: int32ptr(9000),
Protocol: protocolptr(corev1.ProtocolTCP),
},
},
Endpoints: []v1beta1.Endpoint{
{
Addresses: []string{"1.2.3.4"},
Hostname: strptr("testendpoint1"),
}, {
Addresses: []string{"2.3.4.5"},
Conditions: v1beta1.EndpointConditions{
Ready: boolptr(true),
},
},
},
}
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{})
obj := makeEndpointSliceV1()
obj.ObjectMeta.Labels = nil
obj.ObjectMeta.Annotations = nil
obj.Endpoints = obj.Endpoints[0:2]
c.DiscoveryV1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{})
},
expectedMaxItems: 2,
expectedRes: map[string]*targetgroup.Group{
@ -586,39 +382,11 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "3.4.5.6:9000",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "4.5.6.7:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_name": "testendpoints",
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
"__meta_kubernetes_namespace": "default",
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_name": "testendpoints",
"__meta_kubernetes_namespace": "default",
},
},
},
@ -626,85 +394,18 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
}
func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1())
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
obj := &v1beta1.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",
Namespace: "default",
},
AddressType: v1beta1.AddressTypeIPv4,
Ports: []v1beta1.EndpointPort{
{
Name: strptr("testport"),
Port: int32ptr(9000),
Protocol: protocolptr(corev1.ProtocolTCP),
},
},
Endpoints: []v1beta1.Endpoint{},
}
c.DiscoveryV1beta1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{})
obj := makeEndpointSliceV1()
obj.Endpoints = []v1.Endpoint{}
c.DiscoveryV1().EndpointSlices(obj.Namespace).Update(context.Background(), obj, metav1.UpdateOptions{})
},
expectedMaxItems: 2,
expectedRes: map[string]*targetgroup.Group{
"endpointslice/default/testendpoints": {
Targets: []model.LabelSet{
{
"__address__": "1.2.3.4:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "",
"__meta_kubernetes_endpointslice_address_target_name": "",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_endpoint_node_name": "foobar",
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "2.3.4.5:9000",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "3.4.5.6:9000",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
{
"__address__": "4.5.6.7:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_name": "testendpoints",
@ -721,7 +422,7 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
}
func TestEndpointSliceDiscoveryWithService(t *testing.T) {
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1())
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
k8sDiscoveryTest{
discovery: n,
@ -813,7 +514,7 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) {
}
func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
n, c := makeDiscoveryWithVersion(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, "v1.21.0", makeEndpointSliceV1())
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
k8sDiscoveryTest{
discovery: n,

View file

@ -24,7 +24,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
v1 "k8s.io/api/networking/v1"
"k8s.io/api/networking/v1beta1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
@ -127,8 +126,6 @@ func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) b
switch ingress := o.(type) {
case *v1.Ingress:
ia = newIngressAdaptorFromV1(ingress)
case *v1beta1.Ingress:
ia = newIngressAdaptorFromV1beta1(ingress)
default:
level.Error(i.logger).Log("msg", "converting to Ingress object failed", "err",
fmt.Errorf("received unexpected object: %v", o))

View file

@ -15,7 +15,6 @@ package kubernetes
import (
v1 "k8s.io/api/networking/v1"
"k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -89,56 +88,3 @@ func (i *ingressRuleAdaptorV1) paths() []string {
}
func (i *ingressRuleAdaptorV1) host() string { return i.rule.Host }
// Adaptor for networking.k8s.io/v1beta1.
type ingressAdaptorV1Beta1 struct {
ingress *v1beta1.Ingress
}
func newIngressAdaptorFromV1beta1(ingress *v1beta1.Ingress) ingressAdaptor {
return &ingressAdaptorV1Beta1{ingress: ingress}
}
func (i *ingressAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta }
func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name }
func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace }
func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels }
func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations }
func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName }
func (i *ingressAdaptorV1Beta1) tlsHosts() []string {
var hosts []string
for _, tls := range i.ingress.Spec.TLS {
hosts = append(hosts, tls.Hosts...)
}
return hosts
}
func (i *ingressAdaptorV1Beta1) rules() []ingressRuleAdaptor {
var rules []ingressRuleAdaptor
for _, rule := range i.ingress.Spec.Rules {
rules = append(rules, newIngressRuleAdaptorFromV1Beta1(rule))
}
return rules
}
type ingressRuleAdaptorV1Beta1 struct {
rule v1beta1.IngressRule
}
func newIngressRuleAdaptorFromV1Beta1(rule v1beta1.IngressRule) ingressRuleAdaptor {
return &ingressRuleAdaptorV1Beta1{rule: rule}
}
func (i *ingressRuleAdaptorV1Beta1) paths() []string {
rv := i.rule.IngressRuleValue
if rv.HTTP == nil {
return nil
}
paths := make([]string, len(rv.HTTP.Paths))
for n, p := range rv.HTTP.Paths {
paths[n] = p.Path
}
return paths
}
func (i *ingressRuleAdaptorV1Beta1) host() string { return i.rule.Host }

View file

@ -20,7 +20,6 @@ import (
"github.com/prometheus/common/model"
v1 "k8s.io/api/networking/v1"
"k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/prometheus/prometheus/discovery/targetgroup"
@ -89,60 +88,6 @@ func makeIngress(tls TLSMode) *v1.Ingress {
return ret
}
func makeIngressV1beta1(tls TLSMode) *v1beta1.Ingress {
ret := &v1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "testingress",
Namespace: "default",
Labels: map[string]string{"test/label": "testvalue"},
Annotations: map[string]string{"test/annotation": "testannotationvalue"},
},
Spec: v1beta1.IngressSpec{
IngressClassName: classString("testclass"),
TLS: nil,
Rules: []v1beta1.IngressRule{
{
Host: "example.com",
IngressRuleValue: v1beta1.IngressRuleValue{
HTTP: &v1beta1.HTTPIngressRuleValue{
Paths: []v1beta1.HTTPIngressPath{
{Path: "/"},
{Path: "/foo"},
},
},
},
},
{
// No backend config, ignored
Host: "nobackend.example.com",
IngressRuleValue: v1beta1.IngressRuleValue{
HTTP: &v1beta1.HTTPIngressRuleValue{},
},
},
{
Host: "test.example.com",
IngressRuleValue: v1beta1.IngressRuleValue{
HTTP: &v1beta1.HTTPIngressRuleValue{
Paths: []v1beta1.HTTPIngressPath{{}},
},
},
},
},
},
}
switch tls {
case TLSYes:
ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com", "test.example.com"}}}
case TLSMixed:
ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"example.com"}}}
case TLSWildcard:
ret.Spec.TLS = []v1beta1.IngressTLS{{Hosts: []string{"*.example.com"}}}
}
return ret
}
func classString(v string) *string {
return &v
}
@ -212,20 +157,6 @@ func TestIngressDiscoveryAdd(t *testing.T) {
}.Run(t)
}
func TestIngressDiscoveryAddV1beta1(t *testing.T) {
n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0")
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
obj := makeIngressV1beta1(TLSNo)
c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{})
},
expectedMaxItems: 1,
expectedRes: expectedTargetGroups("default", TLSNo),
}.Run(t)
}
func TestIngressDiscoveryAddTLS(t *testing.T) {
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}})
@ -240,20 +171,6 @@ func TestIngressDiscoveryAddTLS(t *testing.T) {
}.Run(t)
}
func TestIngressDiscoveryAddTLSV1beta1(t *testing.T) {
n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0")
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
obj := makeIngressV1beta1(TLSYes)
c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{})
},
expectedMaxItems: 1,
expectedRes: expectedTargetGroups("default", TLSYes),
}.Run(t)
}
func TestIngressDiscoveryAddMixed(t *testing.T) {
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}})
@ -268,20 +185,6 @@ func TestIngressDiscoveryAddMixed(t *testing.T) {
}.Run(t)
}
func TestIngressDiscoveryAddMixedV1beta1(t *testing.T) {
n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"default"}}, "v1.18.0")
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
obj := makeIngressV1beta1(TLSMixed)
c.NetworkingV1beta1().Ingresses("default").Create(context.Background(), obj, metav1.CreateOptions{})
},
expectedMaxItems: 1,
expectedRes: expectedTargetGroups("default", TLSMixed),
}.Run(t)
}
func TestIngressDiscoveryNamespaces(t *testing.T) {
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}})
@ -303,27 +206,6 @@ func TestIngressDiscoveryNamespaces(t *testing.T) {
}.Run(t)
}
func TestIngressDiscoveryNamespacesV1beta1(t *testing.T) {
n, c := makeDiscoveryWithVersion(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}, "v1.18.0")
expected := expectedTargetGroups("ns1", TLSNo)
for k, v := range expectedTargetGroups("ns2", TLSNo) {
expected[k] = v
}
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
for _, ns := range []string{"ns1", "ns2"} {
obj := makeIngressV1beta1(TLSNo)
obj.Namespace = ns
c.NetworkingV1beta1().Ingresses(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
}
},
expectedMaxItems: 2,
expectedRes: expected,
}.Run(t)
}
func TestIngressDiscoveryOwnNamespace(t *testing.T) {
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{IncludeOwnNamespace: true})

View file

@ -25,8 +25,6 @@ import (
"github.com/prometheus/prometheus/util/strutil"
disv1beta1 "k8s.io/api/discovery/v1beta1"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
@ -36,12 +34,10 @@ import (
apiv1 "k8s.io/api/core/v1"
disv1 "k8s.io/api/discovery/v1"
networkv1 "k8s.io/api/networking/v1"
"k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
@ -401,55 +397,22 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
switch d.role {
case RoleEndpointSlice:
// Check "networking.k8s.io/v1" availability with retries.
// If "v1" is not available, use "networking.k8s.io/v1beta1" for backward compatibility
var v1Supported bool
if retryOnError(ctx, 10*time.Second,
func() (err error) {
v1Supported, err = checkDiscoveryV1Supported(d.client)
if err != nil {
level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err)
}
return err
},
) {
d.Unlock()
return
}
for _, namespace := range namespaces {
var informer cache.SharedIndexInformer
if v1Supported {
e := d.client.DiscoveryV1().EndpointSlices(namespace)
elw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.endpointslice.field
options.LabelSelector = d.selectors.endpointslice.label
return e.List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.endpointslice.field
options.LabelSelector = d.selectors.endpointslice.label
return e.Watch(ctx, options)
},
}
informer = d.newEndpointSlicesByNodeInformer(elw, &disv1.EndpointSlice{})
} else {
e := d.client.DiscoveryV1beta1().EndpointSlices(namespace)
elw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.endpointslice.field
options.LabelSelector = d.selectors.endpointslice.label
return e.List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.endpointslice.field
options.LabelSelector = d.selectors.endpointslice.label
return e.Watch(ctx, options)
},
}
informer = d.newEndpointSlicesByNodeInformer(elw, &disv1beta1.EndpointSlice{})
e := d.client.DiscoveryV1().EndpointSlices(namespace)
elw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.endpointslice.field
options.LabelSelector = d.selectors.endpointslice.label
return e.List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.endpointslice.field
options.LabelSelector = d.selectors.endpointslice.label
return e.Watch(ctx, options)
},
}
informer = d.newEndpointSlicesByNodeInformer(elw, &disv1.EndpointSlice{})
s := d.client.CoreV1().Services(namespace)
slw := &cache.ListWatch{
@ -609,55 +572,22 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
go svc.informer.Run(ctx.Done())
}
case RoleIngress:
// Check "networking.k8s.io/v1" availability with retries.
// If "v1" is not available, use "networking.k8s.io/v1beta1" for backward compatibility
var v1Supported bool
if retryOnError(ctx, 10*time.Second,
func() (err error) {
v1Supported, err = checkNetworkingV1Supported(d.client)
if err != nil {
level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err)
}
return err
},
) {
d.Unlock()
return
}
for _, namespace := range namespaces {
var informer cache.SharedInformer
if v1Supported {
i := d.client.NetworkingV1().Ingresses(namespace)
ilw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.ingress.field
options.LabelSelector = d.selectors.ingress.label
return i.List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.ingress.field
options.LabelSelector = d.selectors.ingress.label
return i.Watch(ctx, options)
},
}
informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled)
} else {
i := d.client.NetworkingV1beta1().Ingresses(namespace)
ilw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.ingress.field
options.LabelSelector = d.selectors.ingress.label
return i.List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.ingress.field
options.LabelSelector = d.selectors.ingress.label
return i.Watch(ctx, options)
},
}
informer = d.mustNewSharedInformer(ilw, &v1beta1.Ingress{}, resyncDisabled)
i := d.client.NetworkingV1().Ingresses(namespace)
ilw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = d.selectors.ingress.field
options.LabelSelector = d.selectors.ingress.label
return i.List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = d.selectors.ingress.field
options.LabelSelector = d.selectors.ingress.label
return i.Watch(ctx, options)
},
}
informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled)
ingress := NewIngress(
log.With(d.logger, "role", "ingress"),
informer,
@ -720,20 +650,6 @@ func retryOnError(ctx context.Context, interval time.Duration, f func() error) (
}
}
func checkNetworkingV1Supported(client kubernetes.Interface) (bool, error) {
k8sVer, err := client.Discovery().ServerVersion()
if err != nil {
return false, err
}
semVer, err := utilversion.ParseSemantic(k8sVer.String())
if err != nil {
return false, err
}
// networking.k8s.io/v1 is available since Kubernetes v1.19
// https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.19.md
return semVer.Major() >= 1 && semVer.Minor() >= 19, nil
}
func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer {
nlw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
@ -834,19 +750,6 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
}
}
}
case *disv1beta1.EndpointSlice:
for _, target := range e.Endpoints {
if target.TargetRef != nil {
switch target.TargetRef.Kind {
case "Pod":
if target.NodeName != nil {
nodes = append(nodes, *target.NodeName)
}
case "Node":
nodes = append(nodes, target.TargetRef.Name)
}
}
}
default:
return nil, fmt.Errorf("object is not an endpointslice")
}
@ -882,21 +785,6 @@ func (d *Discovery) mustNewSharedIndexInformer(lw cache.ListerWatcher, exampleOb
return informer
}
func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) {
k8sVer, err := client.Discovery().ServerVersion()
if err != nil {
return false, err
}
semVer, err := utilversion.ParseSemantic(k8sVer.String())
if err != nil {
return false, err
}
// The discovery.k8s.io/v1beta1 API version of EndpointSlice will no longer be served in v1.25.
// discovery.k8s.io/v1 is available since Kubernetes v1.21
// https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-25
return semVer.Major() >= 1 && semVer.Minor() >= 21, nil
}
func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, role Role) {
labelSet[model.LabelName(metaLabelPrefix+string(role)+"_name")] = lv(objectMeta.Name)

View file

@ -46,7 +46,7 @@ func TestMain(m *testing.M) {
// makeDiscovery creates a kubernetes.Discovery instance for testing.
func makeDiscovery(role Role, nsDiscovery NamespaceDiscovery, objects ...runtime.Object) (*Discovery, kubernetes.Interface) {
return makeDiscoveryWithVersion(role, nsDiscovery, "v1.22.0", objects...)
return makeDiscoveryWithVersion(role, nsDiscovery, "v1.25.0", objects...)
}
// makeDiscoveryWithVersion creates a kubernetes.Discovery instance with the specified kubernetes version for testing.
@ -285,40 +285,6 @@ func TestRetryOnError(t *testing.T) {
}
}
func TestCheckNetworkingV1Supported(t *testing.T) {
tests := []struct {
version string
wantSupported bool
wantErr bool
}{
{version: "v1.18.0", wantSupported: false, wantErr: false},
{version: "v1.18.1", wantSupported: false, wantErr: false},
// networking v1 is supported since Kubernetes v1.19
{version: "v1.19.0", wantSupported: true, wantErr: false},
{version: "v1.20.0-beta.2", wantSupported: true, wantErr: false},
// error patterns
{version: "", wantSupported: false, wantErr: true},
{version: "<>", wantSupported: false, wantErr: true},
}
for _, tc := range tests {
tc := tc
t.Run(tc.version, func(t *testing.T) {
clientset := fake.NewSimpleClientset()
fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery)
fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: tc.version}
supported, err := checkNetworkingV1Supported(clientset)
if tc.wantErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
require.Equal(t, tc.wantSupported, supported)
})
}
}
func TestFailuresCountMetric(t *testing.T) {
tests := []struct {
role Role

View file

@ -1,332 +0,0 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package legacymanager
import (
"context"
"fmt"
"reflect"
"sync"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
type poolKey struct {
setName string
provider string
}
// provider holds a Discoverer instance, its configuration and its subscribers.
type provider struct {
name string
d discovery.Discoverer
subs []string
config interface{}
}
// NewManager is the Discovery Manager constructor.
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]discovery.DiscovererMetrics, options ...func(*Manager)) *Manager {
if logger == nil {
logger = log.NewNopLogger()
}
mgr := &Manager{
logger: logger,
syncCh: make(chan map[string][]*targetgroup.Group),
targets: make(map[poolKey]map[string]*targetgroup.Group),
discoverCancel: []context.CancelFunc{},
ctx: ctx,
updatert: 5 * time.Second,
triggerSend: make(chan struct{}, 1),
registerer: registerer,
sdMetrics: sdMetrics,
}
for _, option := range options {
option(mgr)
}
// Register the metrics.
// We have to do this after setting all options, so that the name of the Manager is set.
if metrics, err := discovery.NewManagerMetrics(registerer, mgr.name); err == nil {
mgr.metrics = metrics
} else {
level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
return nil
}
return mgr
}
// Name sets the name of the manager.
func Name(n string) func(*Manager) {
return func(m *Manager) {
m.mtx.Lock()
defer m.mtx.Unlock()
m.name = n
}
}
// Manager maintains a set of discovery providers and sends each update to a map channel.
// Targets are grouped by the target set name.
type Manager struct {
logger log.Logger
name string
mtx sync.RWMutex
ctx context.Context
discoverCancel []context.CancelFunc
// Some Discoverers(eg. k8s) send only the updates for a given target group
// so we use map[tg.Source]*targetgroup.Group to know which group to update.
targets map[poolKey]map[string]*targetgroup.Group
// providers keeps track of SD providers.
providers []*provider
// The sync channel sends the updates as a map where the key is the job value from the scrape config.
syncCh chan map[string][]*targetgroup.Group
// How long to wait before sending updates to the channel. The variable
// should only be modified in unit tests.
updatert time.Duration
// The triggerSend channel signals to the manager that new updates have been received from providers.
triggerSend chan struct{}
// A registerer for all service discovery metrics.
registerer prometheus.Registerer
metrics *discovery.Metrics
sdMetrics map[string]discovery.DiscovererMetrics
}
// Run starts the background processing.
func (m *Manager) Run() error {
go m.sender()
<-m.ctx.Done()
m.cancelDiscoverers()
return m.ctx.Err()
}
// SyncCh returns a read only channel used by all the clients to receive target updates.
func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group {
return m.syncCh
}
// ApplyConfig removes all running discovery providers and starts new ones using the provided config.
func (m *Manager) ApplyConfig(cfg map[string]discovery.Configs) error {
m.mtx.Lock()
defer m.mtx.Unlock()
for pk := range m.targets {
if _, ok := cfg[pk.setName]; !ok {
m.metrics.DiscoveredTargets.DeleteLabelValues(m.name, pk.setName)
}
}
m.cancelDiscoverers()
m.targets = make(map[poolKey]map[string]*targetgroup.Group)
m.providers = nil
m.discoverCancel = nil
failedCount := 0
for name, scfg := range cfg {
failedCount += m.registerProviders(scfg, name)
m.metrics.DiscoveredTargets.WithLabelValues(name).Set(0)
}
m.metrics.FailedConfigs.Set(float64(failedCount))
for _, prov := range m.providers {
m.startProvider(m.ctx, prov)
}
return nil
}
// StartCustomProvider is used for sdtool. Only use this if you know what you're doing.
func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker discovery.Discoverer) {
p := &provider{
name: name,
d: worker,
subs: []string{name},
}
m.providers = append(m.providers, p)
m.startProvider(ctx, p)
}
func (m *Manager) startProvider(ctx context.Context, p *provider) {
level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs))
ctx, cancel := context.WithCancel(ctx)
updates := make(chan []*targetgroup.Group)
m.discoverCancel = append(m.discoverCancel, cancel)
go p.d.Run(ctx, updates)
go m.updater(ctx, p, updates)
}
func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targetgroup.Group) {
for {
select {
case <-ctx.Done():
return
case tgs, ok := <-updates:
m.metrics.ReceivedUpdates.Inc()
if !ok {
level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name)
return
}
for _, s := range p.subs {
m.updateGroup(poolKey{setName: s, provider: p.name}, tgs)
}
select {
case m.triggerSend <- struct{}{}:
default:
}
}
}
}
func (m *Manager) sender() {
ticker := time.NewTicker(m.updatert)
defer ticker.Stop()
for {
select {
case <-m.ctx.Done():
return
case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker.
select {
case <-m.triggerSend:
m.metrics.SentUpdates.Inc()
select {
case m.syncCh <- m.allGroups():
default:
m.metrics.DelayedUpdates.Inc()
level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle")
select {
case m.triggerSend <- struct{}{}:
default:
}
}
default:
}
}
}
}
func (m *Manager) cancelDiscoverers() {
for _, c := range m.discoverCancel {
c()
}
}
func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) {
m.mtx.Lock()
defer m.mtx.Unlock()
if _, ok := m.targets[poolKey]; !ok {
m.targets[poolKey] = make(map[string]*targetgroup.Group)
}
for _, tg := range tgs {
if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics.
m.targets[poolKey][tg.Source] = tg
}
}
}
func (m *Manager) allGroups() map[string][]*targetgroup.Group {
m.mtx.RLock()
defer m.mtx.RUnlock()
tSets := map[string][]*targetgroup.Group{}
n := map[string]int{}
for pkey, tsets := range m.targets {
for _, tg := range tsets {
// Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager'
// to signal that it needs to stop all scrape loops for this target set.
tSets[pkey.setName] = append(tSets[pkey.setName], tg)
n[pkey.setName] += len(tg.Targets)
}
}
for setName, v := range n {
m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v))
}
return tSets
}
// registerProviders returns a number of failed SD config.
func (m *Manager) registerProviders(cfgs discovery.Configs, setName string) int {
var (
failed int
added bool
)
add := func(cfg discovery.Config) {
for _, p := range m.providers {
if reflect.DeepEqual(cfg, p.config) {
p.subs = append(p.subs, setName)
added = true
return
}
}
typ := cfg.Name()
d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{
Logger: log.With(m.logger, "discovery", typ, "config", setName),
Metrics: m.sdMetrics[typ],
})
if err != nil {
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName)
failed++
return
}
m.providers = append(m.providers, &provider{
name: fmt.Sprintf("%s/%d", typ, len(m.providers)),
d: d,
config: cfg,
subs: []string{setName},
})
added = true
}
for _, cfg := range cfgs {
add(cfg)
}
if !added {
// Add an empty target group to force the refresh of the corresponding
// scrape pool and to notify the receiver that this target set has no
// current targets.
// It can happen because the combined set of SD configurations is empty
// or because we fail to instantiate all the SD configurations.
add(discovery.StaticConfig{{}})
}
return failed
}
// StaticProvider holds a list of target groups that never change.
type StaticProvider struct {
TargetGroups []*targetgroup.Group
}
// Run implements the Worker interface.
func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
// We still have to consider that the consumer exits right away in which case
// the context will be canceled.
select {
case ch <- sd.TargetGroups:
case <-ctx.Done():
}
close(ch)
}

File diff suppressed because it is too large Load diff

View file

@ -1,261 +0,0 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package legacymanager
import (
"errors"
"fmt"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
const (
configFieldPrefix = "AUTO_DISCOVERY_"
staticConfigsKey = "static_configs"
staticConfigsFieldName = configFieldPrefix + staticConfigsKey
)
var (
configNames = make(map[string]discovery.Config)
configFieldNames = make(map[reflect.Type]string)
configFields []reflect.StructField
configTypesMu sync.Mutex
configTypes = make(map[reflect.Type]reflect.Type)
emptyStructType = reflect.TypeOf(struct{}{})
configsType = reflect.TypeOf(discovery.Configs{})
)
// RegisterConfig registers the given Config type for YAML marshaling and unmarshaling.
func RegisterConfig(config discovery.Config) {
registerConfig(config.Name()+"_sd_configs", reflect.TypeOf(config), config)
}
func init() {
// N.B.: static_configs is the only Config type implemented by default.
// All other types are registered at init by their implementing packages.
elemTyp := reflect.TypeOf(&targetgroup.Group{})
registerConfig(staticConfigsKey, elemTyp, discovery.StaticConfig{})
}
func registerConfig(yamlKey string, elemType reflect.Type, config discovery.Config) {
name := config.Name()
if _, ok := configNames[name]; ok {
panic(fmt.Sprintf("discovery: Config named %q is already registered", name))
}
configNames[name] = config
fieldName := configFieldPrefix + yamlKey // Field must be exported.
configFieldNames[elemType] = fieldName
// Insert fields in sorted order.
i := sort.Search(len(configFields), func(k int) bool {
return fieldName < configFields[k].Name
})
configFields = append(configFields, reflect.StructField{}) // Add empty field at end.
copy(configFields[i+1:], configFields[i:]) // Shift fields to the right.
configFields[i] = reflect.StructField{ // Write new field in place.
Name: fieldName,
Type: reflect.SliceOf(elemType),
Tag: reflect.StructTag(`yaml:"` + yamlKey + `,omitempty"`),
}
}
func getConfigType(out reflect.Type) reflect.Type {
configTypesMu.Lock()
defer configTypesMu.Unlock()
if typ, ok := configTypes[out]; ok {
return typ
}
// Initial exported fields map one-to-one.
var fields []reflect.StructField
for i, n := 0, out.NumField(); i < n; i++ {
switch field := out.Field(i); {
case field.PkgPath == "" && field.Type != configsType:
fields = append(fields, field)
default:
fields = append(fields, reflect.StructField{
Name: "_" + field.Name, // Field must be unexported.
PkgPath: out.PkgPath(),
Type: emptyStructType,
})
}
}
// Append extra config fields on the end.
fields = append(fields, configFields...)
typ := reflect.StructOf(fields)
configTypes[out] = typ
return typ
}
// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs
// that have a Configs field that should be inlined.
func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error {
outVal := reflect.ValueOf(out)
if outVal.Kind() != reflect.Ptr {
return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out)
}
outVal = outVal.Elem()
if outVal.Kind() != reflect.Struct {
return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out)
}
outTyp := outVal.Type()
cfgTyp := getConfigType(outTyp)
cfgPtr := reflect.New(cfgTyp)
cfgVal := cfgPtr.Elem()
// Copy shared fields (defaults) to dynamic value.
var configs *discovery.Configs
for i, n := 0, outVal.NumField(); i < n; i++ {
if outTyp.Field(i).Type == configsType {
configs = outVal.Field(i).Addr().Interface().(*discovery.Configs)
continue
}
if cfgTyp.Field(i).PkgPath != "" {
continue // Field is unexported: ignore.
}
cfgVal.Field(i).Set(outVal.Field(i))
}
if configs == nil {
return fmt.Errorf("discovery: Configs field not found in type: %T", out)
}
// Unmarshal into dynamic value.
if err := unmarshal(cfgPtr.Interface()); err != nil {
return replaceYAMLTypeError(err, cfgTyp, outTyp)
}
// Copy shared fields from dynamic value.
for i, n := 0, outVal.NumField(); i < n; i++ {
if cfgTyp.Field(i).PkgPath != "" {
continue // Field is unexported: ignore.
}
outVal.Field(i).Set(cfgVal.Field(i))
}
var err error
*configs, err = readConfigs(cfgVal, outVal.NumField())
return err
}
func readConfigs(structVal reflect.Value, startField int) (discovery.Configs, error) {
var (
configs discovery.Configs
targets []*targetgroup.Group
)
for i, n := startField, structVal.NumField(); i < n; i++ {
field := structVal.Field(i)
if field.Kind() != reflect.Slice {
panic("discovery: internal error: field is not a slice")
}
for k := 0; k < field.Len(); k++ {
val := field.Index(k)
if val.IsZero() || (val.Kind() == reflect.Ptr && val.Elem().IsZero()) {
key := configFieldNames[field.Type().Elem()]
key = strings.TrimPrefix(key, configFieldPrefix)
return nil, fmt.Errorf("empty or null section in %s", key)
}
switch c := val.Interface().(type) {
case *targetgroup.Group:
// Add index to the static config target groups for unique identification
// within scrape pool.
c.Source = strconv.Itoa(len(targets))
// Coalesce multiple static configs into a single static config.
targets = append(targets, c)
case discovery.Config:
configs = append(configs, c)
default:
panic("discovery: internal error: slice element is not a Config")
}
}
}
if len(targets) > 0 {
configs = append(configs, discovery.StaticConfig(targets))
}
return configs, nil
}
// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs
// that have a Configs field that should be inlined.
func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) {
inVal := reflect.ValueOf(in)
for inVal.Kind() == reflect.Ptr {
inVal = inVal.Elem()
}
inTyp := inVal.Type()
cfgTyp := getConfigType(inTyp)
cfgPtr := reflect.New(cfgTyp)
cfgVal := cfgPtr.Elem()
// Copy shared fields to dynamic value.
var configs *discovery.Configs
for i, n := 0, inTyp.NumField(); i < n; i++ {
if inTyp.Field(i).Type == configsType {
configs = inVal.Field(i).Addr().Interface().(*discovery.Configs)
}
if cfgTyp.Field(i).PkgPath != "" {
continue // Field is unexported: ignore.
}
cfgVal.Field(i).Set(inVal.Field(i))
}
if configs == nil {
return nil, fmt.Errorf("discovery: Configs field not found in type: %T", in)
}
if err := writeConfigs(cfgVal, *configs); err != nil {
return nil, err
}
return cfgPtr.Interface(), nil
}
func writeConfigs(structVal reflect.Value, configs discovery.Configs) error {
targets := structVal.FieldByName(staticConfigsFieldName).Addr().Interface().(*[]*targetgroup.Group)
for _, c := range configs {
if sc, ok := c.(discovery.StaticConfig); ok {
*targets = append(*targets, sc...)
continue
}
fieldName, ok := configFieldNames[reflect.TypeOf(c)]
if !ok {
return fmt.Errorf("discovery: cannot marshal unregistered Config type: %T", c)
}
field := structVal.FieldByName(fieldName)
field.Set(reflect.Append(field, reflect.ValueOf(c)))
}
return nil
}
func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
var e *yaml.TypeError
if errors.As(err, &e) {
oldStr := oldTyp.String()
newStr := newTyp.String()
for i, s := range e.Errors {
e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr)
}
}
return err
}

View file

@ -224,7 +224,7 @@
"Args": [
"--config.file=/etc/prometheus/prometheus.yml",
"--storage.tsdb.path=/prometheus",
"--storage.tsdb.retention=24h"
"--storage.tsdb.retention.time=24h"
],
"Privileges": {
"CredentialSpec": null,

View file

@ -973,7 +973,7 @@
"Args": [
"--config.file=/etc/prometheus/prometheus.yml",
"--storage.tsdb.path=/prometheus",
"--storage.tsdb.retention=24h"
"--storage.tsdb.retention.time=24h"
],
"Privileges": {
"CredentialSpec": null,

View file

@ -33,8 +33,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--web.page-title</code> | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` |
| <code class="text-nowrap">--web.cors.origin</code> | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1\|domain2)\.com' | `.*` |
| <code class="text-nowrap">--storage.tsdb.path</code> | Base path for metrics storage. Use with server mode only. | `data/` |
| <code class="text-nowrap">--storage.tsdb.retention</code> | [DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use "storage.tsdb.retention.time" instead. Use with server mode only. | |
| <code class="text-nowrap">--storage.tsdb.retention.time</code> | How long to retain samples in storage. When this flag is set it overrides "storage.tsdb.retention". If neither this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | |
| <code class="text-nowrap">--storage.tsdb.retention.time</code> | How long to retain samples in storage. If neither this flag nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | |
| <code class="text-nowrap">--storage.tsdb.retention.size</code> | Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Use with server mode only. | |
| <code class="text-nowrap">--storage.tsdb.no-lockfile</code> | Do not create lockfile in data directory. Use with server mode only. | `false` |
| <code class="text-nowrap">--storage.tsdb.head-chunks-write-queue-size</code> | Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental. Use with server mode only. | `0` |
@ -57,8 +56,8 @@ The Prometheus monitoring server
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
| <code class="text-nowrap">--scrape.name-escaping-scheme</code> | Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots". | `values` |
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, auto-reload-config, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, no-default-scrape-port, old-ui, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | |
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |

View file

@ -126,9 +126,9 @@ global:
[ keep_dropped_targets: <int> | default = 0 ]
# Specifies the validation scheme for metric and label names. Either blank or
# "legacy" for letters, numbers, colons, and underscores; or "utf8" for full
# UTF-8 support.
[ metric_name_validation_scheme <string> | default "legacy" ]
# "utf8" for for full UTF-8 support, or "legacy" for letters, numbers, colons,
# and underscores.
[ metric_name_validation_scheme <string> | default "utf8" ]
runtime:
# Configure the Go garbage collector GOGC parameter
@ -485,10 +485,10 @@ metric_relabel_configs:
# that will be kept in memory. 0 means no limit.
[ keep_dropped_targets: <int> | default = 0 ]
# Specifies the validation scheme for metric and label names. Either blank or
# "legacy" for letters, numbers, colons, and underscores; or "utf8" for full
# UTF-8 support.
[ metric_name_validation_scheme <string> | default "legacy" ]
# Specifies the validation scheme for metric and label names. Either blank or
# "utf8" for full UTF-8 support, or "legacy" for letters, numbers, colons, and
# underscores.
[ metric_name_validation_scheme <string> | default "utf8" ]
# Limit on total number of positive and negative buckets allowed in a single
# native histogram. The resolution of a histogram with more buckets will be
@ -2298,6 +2298,8 @@ The `endpointslice` role discovers targets from existing endpointslices. For eac
address referenced in the endpointslice object one target is discovered. If the endpoint is backed by a pod, all
additional container ports of the pod, not bound to an endpoint port, are discovered as targets as well.
The role requires the `discovery.k8s.io/v1` API version (available since Kubernetes v1.21).
Available meta labels:
* `__meta_kubernetes_namespace`: The namespace of the endpoints object.
@ -2318,7 +2320,7 @@ Available meta labels:
* `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: Flag that shows if the referenced object has a kubernetes.io/hostname annotation.
* `__meta_kubernetes_endpointslice_endpoint_hostname`: Hostname of the referenced endpoint.
* `__meta_kubernetes_endpointslice_endpoint_node_name`: Name of the Node hosting the referenced endpoint.
* `__meta_kubernetes_endpointslice_endpoint_zone`: Zone the referenced endpoint exists in (only available when using the `discovery.k8s.io/v1` API group).
* `__meta_kubernetes_endpointslice_endpoint_zone`: Zone the referenced endpoint exists in.
* `__meta_kubernetes_endpointslice_port`: Port of the referenced endpoint.
* `__meta_kubernetes_endpointslice_port_name`: Named port of the referenced endpoint.
* `__meta_kubernetes_endpointslice_port_protocol`: Protocol of the referenced endpoint.
@ -2331,6 +2333,8 @@ The `ingress` role discovers a target for each path of each ingress.
This is generally useful for blackbox monitoring of an ingress.
The address will be set to the host specified in the ingress spec.
The role requires the `networking.k8s.io/v1` API version (available since Kubernetes v1.19).
Available meta labels:
* `__meta_kubernetes_namespace`: The namespace of the ingress object.

View file

@ -20,14 +20,6 @@ values according to the values of the current environment variables. References
to undefined variables are replaced by the empty string.
The `$` character can be escaped by using `$$`.
## Remote Write Receiver
`--enable-feature=remote-write-receiver`
The remote write receiver allows Prometheus to accept remote write requests from other Prometheus servers. More details can be found [here](storage.md#overview).
Activating the remote write receiver via a feature flag is deprecated. Use `--web.enable-remote-write-receiver` instead. This feature flag will be ignored in future versions of Prometheus.
## Exemplars storage
`--enable-feature=exemplar-storage`
@ -55,20 +47,6 @@ When enabled, for each instance scrape, Prometheus stores a sample in the follow
to find out how close they are to reaching the limit with `scrape_samples_post_metric_relabeling / scrape_sample_limit`. Note that `scrape_sample_limit` can be zero if there is no limit configured, which means that the query above can return `+Inf` for targets with no limit (as we divide by zero). If you want to query only for targets that do have a sample limit use this query: `scrape_samples_post_metric_relabeling / (scrape_sample_limit > 0)`.
- `scrape_body_size_bytes`. The uncompressed size of the most recent scrape response, if successful. Scrapes failing because `body_size_limit` is exceeded report `-1`, other scrape failures report `0`.
## New service discovery manager
`--enable-feature=new-service-discovery-manager`
When enabled, Prometheus uses a new service discovery manager that does not
restart unchanged discoveries upon reloading. This makes reloads faster and reduces
pressure on service discoveries' sources.
Users are encouraged to test the new service discovery manager and report any
issues upstream.
In future releases, this new service discovery manager will become the default and
this feature flag will be ignored.
## Prometheus agent
`--enable-feature=agent`
@ -264,14 +242,6 @@ When enabled, Prometheus will change the way in which the `__name__` label is re
This allows optionally preserving the `__name__` label via the `label_replace` and `label_join` functions, and helps prevent the "vector cannot contain metrics with the same labelset" error, which can happen when applying a regex-matcher to the `__name__` label.
## UTF-8 Name Support
`--enable-feature=utf8-names`
When enabled, changes the metric and label name validation scheme inside Prometheus to allow the full UTF-8 character set.
By itself, this flag does not enable the request of UTF-8 names via content negotiation.
Users will also have to set `metric_name_validation_scheme` in scrape configs to enable the feature either on the global config or on a per-scrape config basis.
## Auto Reload Config
`--enable-feature=auto-reload-config`

View file

@ -203,12 +203,12 @@ Range vector literals work like instant vector literals, except that they
select a range of samples back from the current instant. Syntactically, a [time
duration](#time-durations) is appended in square brackets (`[]`) at the end of
a vector selector to specify how far back in time values should be fetched for
each resulting range vector element. The range is a closed interval,
i.e. samples with timestamps coinciding with either boundary of the range are
still included in the selection.
each resulting range vector element. The range is a left-open and right-closed interval,
i.e. samples with timestamps coinciding with the left boundary of the range are excluded from the selection,
while samples coinciding with the right boundary of the range are included in the selection.
In this example, we select all the values we have recorded within the last 5
minutes for all time series that have the metric name `http_requests_total` and
In this example, we select all the values recorded less than 5m ago for all time series
that have the metric name `http_requests_total` and
a `job` label set to `prometheus`:
http_requests_total{job="prometheus"}[5m]
@ -358,7 +358,7 @@ independently of the actual present time series data. This is mainly to support
cases like aggregation (`sum`, `avg`, and so on), where multiple aggregated
time series do not precisely align in time. Because of their independence,
Prometheus needs to assign a value at those timestamps for each relevant time
series. It does so by taking the newest sample before this timestamp within the lookback period.
series. It does so by taking the newest sample that is less than the lookback period ago.
The lookback period is 5 minutes by default.
If a target scrape or rule evaluation no longer returns a sample for a time

View file

@ -87,10 +87,9 @@ or 31 days, whichever is smaller.
Prometheus has several flags that configure local storage. The most important are:
- `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`.
- `--storage.tsdb.retention.time`: How long to retain samples in storage. When this flag is
set, it overrides `storage.tsdb.retention`. If neither this flag nor `storage.tsdb.retention`
nor `storage.tsdb.retention.size` is set, the retention time defaults to `15d`.
Supported units: y, w, d, h, m, s, ms.
- `--storage.tsdb.retention.time`: How long to retain samples in storage. If neither
this flag nor `storage.tsdb.retention.size` is set, the retention time defaults to
`15d`. Supported units: y, w, d, h, m, s, ms.
- `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain.
The oldest data will be removed first. Defaults to `0` or disabled. Units supported:
B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only
@ -98,7 +97,6 @@ Prometheus has several flags that configure local storage. The most important ar
chunks are counted in the total size. So the minimum requirement for the disk is the
peak space taken by the `wal` (the WAL and Checkpoint) and `chunks_head`
(m-mapped Head chunks) directory combined (peaks every 2 hours).
- `--storage.tsdb.retention`: Deprecated in favor of `storage.tsdb.retention.time`.
- `--storage.tsdb.wal-compression`: Enables compression of the write-ahead log (WAL).
Depending on your data, you can expect the WAL size to be halved with little extra
cpu load. This flag was introduced in 2.11.0 and enabled by default in 2.20.0.

View file

@ -909,11 +909,17 @@ func getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path
}
if evalRange == 0 {
start -= durationMilliseconds(s.LookbackDelta)
// Reduce the start by one fewer ms than the lookback delta
// because wo want to exclude samples that are precisely the
// lookback delta before the eval time.
start -= durationMilliseconds(s.LookbackDelta) - 1
} else {
// For all matrix queries we want to ensure that we have (end-start) + range selected
// this way we have `range` data before the start time
start -= durationMilliseconds(evalRange)
// For all matrix queries we want to ensure that we have
// (end-start) + range selected this way we have `range` data
// before the start time. We subtract one from the range to
// exclude samples positioned directly at the lower boundary of
// the range.
start -= durationMilliseconds(evalRange) - 1
}
offsetMilliseconds := durationMilliseconds(n.OriginalOffset)
@ -2087,7 +2093,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co
seriesIterators := make([]*storage.MemoizedSeriesIterator, len(vs.Series))
for i, s := range vs.Series {
it := s.Iterator(nil)
seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta))
seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)-1)
}
return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
@ -2149,7 +2155,7 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no
if valueType == chunkenc.ValNone || t > refTime {
var ok bool
t, v, h, ok = it.PeekPrev()
if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) {
if !ok || t <= refTime-durationMilliseconds(ev.lookbackDelta) {
return 0, 0, nil, false
}
}
@ -2283,20 +2289,20 @@ func (ev *evaluator) matrixIterSlice(
mintFloats, mintHistograms := mint, mint
// First floats...
if len(floats) > 0 && floats[len(floats)-1].T >= mint {
if len(floats) > 0 && floats[len(floats)-1].T > mint {
// There is an overlap between previous and current ranges, retain common
// points. In most such cases:
// (a) the overlap is significantly larger than the eval step; and/or
// (b) the number of samples is relatively small.
// so a linear search will be as fast as a binary search.
var drop int
for drop = 0; floats[drop].T < mint; drop++ {
for drop = 0; floats[drop].T <= mint; drop++ {
}
ev.currentSamples -= drop
copy(floats, floats[drop:])
floats = floats[:len(floats)-drop]
// Only append points with timestamps after the last timestamp we have.
mintFloats = floats[len(floats)-1].T + 1
mintFloats = floats[len(floats)-1].T
} else {
ev.currentSamples -= len(floats)
if floats != nil {
@ -2305,14 +2311,14 @@ func (ev *evaluator) matrixIterSlice(
}
// ...then the same for histograms. TODO(beorn7): Use generics?
if len(histograms) > 0 && histograms[len(histograms)-1].T >= mint {
if len(histograms) > 0 && histograms[len(histograms)-1].T > mint {
// There is an overlap between previous and current ranges, retain common
// points. In most such cases:
// (a) the overlap is significantly larger than the eval step; and/or
// (b) the number of samples is relatively small.
// so a linear search will be as fast as a binary search.
var drop int
for drop = 0; histograms[drop].T < mint; drop++ {
for drop = 0; histograms[drop].T <= mint; drop++ {
}
// Rotate the buffer around the drop index so that points before mint can be
// reused to store new histograms.
@ -2323,7 +2329,7 @@ func (ev *evaluator) matrixIterSlice(
histograms = histograms[:len(histograms)-drop]
ev.currentSamples -= totalHPointSize(histograms)
// Only append points with timestamps after the last timestamp we have.
mintHistograms = histograms[len(histograms)-1].T + 1
mintHistograms = histograms[len(histograms)-1].T
} else {
ev.currentSamples -= totalHPointSize(histograms)
if histograms != nil {
@ -2347,7 +2353,7 @@ loop:
case chunkenc.ValFloatHistogram, chunkenc.ValHistogram:
t := buf.AtT()
// Values in the buffer are guaranteed to be smaller than maxt.
if t >= mintHistograms {
if t > mintHistograms {
if histograms == nil {
histograms = getMatrixSelectorHPoints()
}
@ -2373,7 +2379,7 @@ loop:
continue loop
}
// Values in the buffer are guaranteed to be smaller than maxt.
if t >= mintFloats {
if t > mintFloats {
ev.currentSamples++
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env))

View file

@ -321,271 +321,271 @@ func TestSelectHintsSetCorrectly(t *testing.T) {
{
query: "foo", start: 10000,
expected: []*storage.SelectHints{
{Start: 5000, End: 10000},
{Start: 5001, End: 10000},
},
}, {
query: "foo @ 15", start: 10000,
expected: []*storage.SelectHints{
{Start: 10000, End: 15000},
{Start: 10001, End: 15000},
},
}, {
query: "foo @ 1", start: 10000,
expected: []*storage.SelectHints{
{Start: -4000, End: 1000},
{Start: -3999, End: 1000},
},
}, {
query: "foo[2m]", start: 200000,
expected: []*storage.SelectHints{
{Start: 80000, End: 200000, Range: 120000},
{Start: 80001, End: 200000, Range: 120000},
},
}, {
query: "foo[2m] @ 180", start: 200000,
expected: []*storage.SelectHints{
{Start: 60000, End: 180000, Range: 120000},
{Start: 60001, End: 180000, Range: 120000},
},
}, {
query: "foo[2m] @ 300", start: 200000,
expected: []*storage.SelectHints{
{Start: 180000, End: 300000, Range: 120000},
{Start: 180001, End: 300000, Range: 120000},
},
}, {
query: "foo[2m] @ 60", start: 200000,
expected: []*storage.SelectHints{
{Start: -60000, End: 60000, Range: 120000},
{Start: -59999, End: 60000, Range: 120000},
},
}, {
query: "foo[2m] offset 2m", start: 300000,
expected: []*storage.SelectHints{
{Start: 60000, End: 180000, Range: 120000},
{Start: 60001, End: 180000, Range: 120000},
},
}, {
query: "foo[2m] @ 200 offset 2m", start: 300000,
expected: []*storage.SelectHints{
{Start: -40000, End: 80000, Range: 120000},
{Start: -39999, End: 80000, Range: 120000},
},
}, {
query: "foo[2m:1s]", start: 300000,
expected: []*storage.SelectHints{
{Start: 175000, End: 300000, Step: 1000},
{Start: 175001, End: 300000, Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s])", start: 300000,
expected: []*storage.SelectHints{
{Start: 175000, End: 300000, Func: "count_over_time", Step: 1000},
{Start: 175001, End: 300000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s] @ 300)", start: 200000,
expected: []*storage.SelectHints{
{Start: 175000, End: 300000, Func: "count_over_time", Step: 1000},
{Start: 175001, End: 300000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s] @ 200)", start: 200000,
expected: []*storage.SelectHints{
{Start: 75000, End: 200000, Func: "count_over_time", Step: 1000},
{Start: 75001, End: 200000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s] @ 100)", start: 200000,
expected: []*storage.SelectHints{
{Start: -25000, End: 100000, Func: "count_over_time", Step: 1000},
{Start: -24999, End: 100000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000,
expected: []*storage.SelectHints{
{Start: 165000, End: 290000, Func: "count_over_time", Step: 1000},
{Start: 165001, End: 290000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000,
expected: []*storage.SelectHints{
{Start: 155000, End: 280000, Func: "count_over_time", Step: 1000},
{Start: 155001, End: 280000, Func: "count_over_time", Step: 1000},
},
}, {
// When the @ is on the vector selector, the enclosing subquery parameters
// don't affect the hint ranges.
query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000,
expected: []*storage.SelectHints{
{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
{Start: 185001, End: 190000, Func: "count_over_time", Step: 1000},
},
}, {
// When the @ is on the vector selector, the enclosing subquery parameters
// don't affect the hint ranges.
query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000,
expected: []*storage.SelectHints{
{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
{Start: 185001, End: 190000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000,
expected: []*storage.SelectHints{
{Start: -45000, End: 80000, Func: "count_over_time", Step: 1000},
{Start: -44999, End: 80000, Func: "count_over_time", Step: 1000},
},
}, {
query: "foo", start: 10000, end: 20000,
expected: []*storage.SelectHints{
{Start: 5000, End: 20000, Step: 1000},
{Start: 5001, End: 20000, Step: 1000},
},
}, {
query: "foo @ 15", start: 10000, end: 20000,
expected: []*storage.SelectHints{
{Start: 10000, End: 15000, Step: 1000},
{Start: 10001, End: 15000, Step: 1000},
},
}, {
query: "foo @ 1", start: 10000, end: 20000,
expected: []*storage.SelectHints{
{Start: -4000, End: 1000, Step: 1000},
{Start: -3999, End: 1000, Step: 1000},
},
}, {
query: "rate(foo[2m] @ 180)", start: 200000, end: 500000,
expected: []*storage.SelectHints{
{Start: 60000, End: 180000, Range: 120000, Func: "rate", Step: 1000},
{Start: 60001, End: 180000, Range: 120000, Func: "rate", Step: 1000},
},
}, {
query: "rate(foo[2m] @ 300)", start: 200000, end: 500000,
expected: []*storage.SelectHints{
{Start: 180000, End: 300000, Range: 120000, Func: "rate", Step: 1000},
{Start: 180001, End: 300000, Range: 120000, Func: "rate", Step: 1000},
},
}, {
query: "rate(foo[2m] @ 60)", start: 200000, end: 500000,
expected: []*storage.SelectHints{
{Start: -60000, End: 60000, Range: 120000, Func: "rate", Step: 1000},
{Start: -59999, End: 60000, Range: 120000, Func: "rate", Step: 1000},
},
}, {
query: "rate(foo[2m])", start: 200000, end: 500000,
expected: []*storage.SelectHints{
{Start: 80000, End: 500000, Range: 120000, Func: "rate", Step: 1000},
{Start: 80001, End: 500000, Range: 120000, Func: "rate", Step: 1000},
},
}, {
query: "rate(foo[2m] offset 2m)", start: 300000, end: 500000,
expected: []*storage.SelectHints{
{Start: 60000, End: 380000, Range: 120000, Func: "rate", Step: 1000},
{Start: 60001, End: 380000, Range: 120000, Func: "rate", Step: 1000},
},
}, {
query: "rate(foo[2m:1s])", start: 300000, end: 500000,
expected: []*storage.SelectHints{
{Start: 175000, End: 500000, Func: "rate", Step: 1000},
{Start: 175001, End: 500000, Func: "rate", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s])", start: 300000, end: 500000,
expected: []*storage.SelectHints{
{Start: 175000, End: 500000, Func: "count_over_time", Step: 1000},
{Start: 175001, End: 500000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, end: 500000,
expected: []*storage.SelectHints{
{Start: 165000, End: 490000, Func: "count_over_time", Step: 1000},
{Start: 165001, End: 490000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, end: 500000,
expected: []*storage.SelectHints{
{Start: 175000, End: 300000, Func: "count_over_time", Step: 1000},
{Start: 175001, End: 300000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, end: 500000,
expected: []*storage.SelectHints{
{Start: 75000, End: 200000, Func: "count_over_time", Step: 1000},
{Start: 75001, End: 200000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, end: 500000,
expected: []*storage.SelectHints{
{Start: -25000, End: 100000, Func: "count_over_time", Step: 1000},
{Start: -24999, End: 100000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000,
expected: []*storage.SelectHints{
{Start: 155000, End: 480000, Func: "count_over_time", Step: 1000},
{Start: 155001, End: 480000, Func: "count_over_time", Step: 1000},
},
}, {
// When the @ is on the vector selector, the enclosing subquery parameters
// don't affect the hint ranges.
query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000,
expected: []*storage.SelectHints{
{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
{Start: 185001, End: 190000, Func: "count_over_time", Step: 1000},
},
}, {
// When the @ is on the vector selector, the enclosing subquery parameters
// don't affect the hint ranges.
query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000,
expected: []*storage.SelectHints{
{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
{Start: 185001, End: 190000, Func: "count_over_time", Step: 1000},
},
}, {
query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000,
expected: []*storage.SelectHints{
{Start: -45000, End: 80000, Func: "count_over_time", Step: 1000},
{Start: -44999, End: 80000, Func: "count_over_time", Step: 1000},
},
}, {
query: "sum by (dim1) (foo)", start: 10000,
expected: []*storage.SelectHints{
{Start: 5000, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}},
{Start: 5001, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}},
},
}, {
query: "sum without (dim1) (foo)", start: 10000,
expected: []*storage.SelectHints{
{Start: 5000, End: 10000, Func: "sum", Grouping: []string{"dim1"}},
{Start: 5001, End: 10000, Func: "sum", Grouping: []string{"dim1"}},
},
}, {
query: "sum by (dim1) (avg_over_time(foo[1s]))", start: 10000,
expected: []*storage.SelectHints{
{Start: 9000, End: 10000, Func: "avg_over_time", Range: 1000},
{Start: 9001, End: 10000, Func: "avg_over_time", Range: 1000},
},
}, {
query: "sum by (dim1) (max by (dim2) (foo))", start: 10000,
expected: []*storage.SelectHints{
{Start: 5000, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}},
{Start: 5001, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}},
},
}, {
query: "(max by (dim1) (foo))[5s:1s]", start: 10000,
expected: []*storage.SelectHints{
{Start: 0, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}, Step: 1000},
{Start: 1, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}, Step: 1000},
},
}, {
query: "(sum(http_requests{group=~\"p.*\"})+max(http_requests{group=~\"c.*\"}))[20s:5s]", start: 120000,
expected: []*storage.SelectHints{
{Start: 95000, End: 120000, Func: "sum", By: true, Step: 5000},
{Start: 95000, End: 120000, Func: "max", By: true, Step: 5000},
{Start: 95001, End: 120000, Func: "sum", By: true, Step: 5000},
{Start: 95001, End: 120000, Func: "max", By: true, Step: 5000},
},
}, {
query: "foo @ 50 + bar @ 250 + baz @ 900", start: 100000, end: 500000,
expected: []*storage.SelectHints{
{Start: 45000, End: 50000, Step: 1000},
{Start: 245000, End: 250000, Step: 1000},
{Start: 895000, End: 900000, Step: 1000},
{Start: 45001, End: 50000, Step: 1000},
{Start: 245001, End: 250000, Step: 1000},
{Start: 895001, End: 900000, Step: 1000},
},
}, {
query: "foo @ 50 + bar + baz @ 900", start: 100000, end: 500000,
expected: []*storage.SelectHints{
{Start: 45000, End: 50000, Step: 1000},
{Start: 95000, End: 500000, Step: 1000},
{Start: 895000, End: 900000, Step: 1000},
{Start: 45001, End: 50000, Step: 1000},
{Start: 95001, End: 500000, Step: 1000},
{Start: 895001, End: 900000, Step: 1000},
},
}, {
query: "rate(foo[2s] @ 50) + bar @ 250 + baz @ 900", start: 100000, end: 500000,
expected: []*storage.SelectHints{
{Start: 48000, End: 50000, Step: 1000, Func: "rate", Range: 2000},
{Start: 245000, End: 250000, Step: 1000},
{Start: 895000, End: 900000, Step: 1000},
{Start: 48001, End: 50000, Step: 1000, Func: "rate", Range: 2000},
{Start: 245001, End: 250000, Step: 1000},
{Start: 895001, End: 900000, Step: 1000},
},
}, {
query: "rate(foo[2s:1s] @ 50) + bar + baz", start: 100000, end: 500000,
expected: []*storage.SelectHints{
{Start: 43000, End: 50000, Step: 1000, Func: "rate"},
{Start: 95000, End: 500000, Step: 1000},
{Start: 95000, End: 500000, Step: 1000},
{Start: 43001, End: 50000, Step: 1000, Func: "rate"},
{Start: 95001, End: 500000, Step: 1000},
{Start: 95001, End: 500000, Step: 1000},
},
}, {
query: "rate(foo[2s:1s] @ 50) + bar + rate(baz[2m:1s] @ 900 offset 2m) ", start: 100000, end: 500000,
expected: []*storage.SelectHints{
{Start: 43000, End: 50000, Step: 1000, Func: "rate"},
{Start: 95000, End: 500000, Step: 1000},
{Start: 655000, End: 780000, Step: 1000, Func: "rate"},
{Start: 43001, End: 50000, Step: 1000, Func: "rate"},
{Start: 95001, End: 500000, Step: 1000},
{Start: 655001, End: 780000, Step: 1000, Func: "rate"},
},
}, { // Hints are based on the inner most subquery timestamp.
query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 50)[3s:1s] @ 3000`, start: 100000,
expected: []*storage.SelectHints{
{Start: -150000, End: 50000, Range: 100000, Func: "sum_over_time", Step: 25000},
{Start: -149999, End: 50000, Range: 100000, Func: "sum_over_time", Step: 25000},
},
}, { // Hints are based on the inner most subquery timestamp.
query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 3000)[3s:1s] @ 50`,
expected: []*storage.SelectHints{
{Start: 2800000, End: 3000000, Range: 100000, Func: "sum_over_time", Step: 25000},
{Start: 2800001, End: 3000000, Range: 100000, Func: "sum_over_time", Step: 25000},
},
},
} {
@ -935,22 +935,20 @@ load 10s
},
},
{
Query: "max_over_time(metricWith1SampleEvery10Seconds[59s])[20s:5s]",
Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]",
Start: time.Unix(201, 0),
PeakSamples: 10,
TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 20/5 (using 59s so we always return 6 samples
// as if we run a query on 00 looking back 60 seconds we will return 7 samples;
// see next test).
TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 4
TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 24,
},
},
{
Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]",
Query: "max_over_time(metricWith1SampleEvery10Seconds[61s])[20s:5s]",
Start: time.Unix(201, 0),
PeakSamples: 11,
TotalSamples: 26, // (1 sample / 10 seconds * 60 seconds) * 4 + 2 as
// max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples.
// max_over_time(metricWith1SampleEvery10Seconds[61s]) @ 190 and 200 will return 7 samples.
TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 26,
},
@ -959,10 +957,9 @@ load 10s
Query: "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]",
Start: time.Unix(201, 0),
PeakSamples: 78,
TotalSamples: 338, // (1 histogram (size 13 HPoint) / 10 seconds * 60 seconds) * 4 + 2 * 13 as
// max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples.
TotalSamples: 312, // (1 histogram (size 13) / 10 seconds * 60 seconds) * 4
TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 338,
201000: 312,
},
},
{
@ -1427,23 +1424,23 @@ load 10s
},
{
// The peak samples in memory is during the first evaluation:
// - Subquery takes 22 samples, 11 for each bigmetric,
// - Result is calculated per series where the series samples is buffered, hence 11 more here.
// - Subquery takes 22 samples, 11 for each bigmetric, but samples on the left bound won't be evaluated.
// - Result is calculated per series where the series samples is buffered, hence 10 more here.
// - The result of two series is added before the last series buffer is discarded, so 2 more here.
// Hence at peak it is 22 (subquery) + 11 (buffer of a series) + 2 (result from 2 series).
// Hence at peak it is 22 (subquery) + 10 (buffer of a series) + 2 (result from 2 series).
// The subquery samples and the buffer is discarded before duplicating.
Query: `rate(bigmetric[10s:1s] @ 10)`,
MaxSamples: 35,
MaxSamples: 34,
Start: time.Unix(0, 0),
End: time.Unix(10, 0),
Interval: 5 * time.Second,
},
{
// Here the reasoning is same as above. But LHS and RHS are done one after another.
// So while one of them takes 35 samples at peak, we need to hold the 2 sample
// So while one of them takes 34 samples at peak, we need to hold the 2 sample
// result of the other till then.
Query: `rate(bigmetric[10s:1s] @ 10) + rate(bigmetric[10s:1s] @ 30)`,
MaxSamples: 37,
MaxSamples: 36,
Start: time.Unix(0, 0),
End: time.Unix(10, 0),
Interval: 5 * time.Second,
@ -1452,20 +1449,20 @@ load 10s
// promql.Sample as above but with only 1 part as step invariant.
// Here the peak is caused by the non-step invariant part as it touches more time range.
// Hence at peak it is 2*21 (subquery from 0s to 20s)
// + 11 (buffer of a series per evaluation)
// + 10 (buffer of a series per evaluation)
// + 6 (result from 2 series at 3 eval times).
Query: `rate(bigmetric[10s:1s]) + rate(bigmetric[10s:1s] @ 30)`,
MaxSamples: 59,
MaxSamples: 58,
Start: time.Unix(10, 0),
End: time.Unix(20, 0),
Interval: 5 * time.Second,
},
{
// Nested subquery.
// We saw that innermost rate takes 35 samples which is still the peak
// We saw that innermost rate takes 34 samples which is still the peak
// since the other two subqueries just duplicate the result.
Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[100s:20s] @ 2000`,
MaxSamples: 35,
MaxSamples: 34,
Start: time.Unix(10, 0),
},
{
@ -1579,11 +1576,11 @@ load 1ms
start: 10,
result: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 28, T: 280000}, {F: 29, T: 290000}, {F: 30, T: 300000}},
Floats: []promql.FPoint{{F: 29, T: 290000}, {F: 30, T: 300000}},
Metric: lbls1,
},
promql.Series{
Floats: []promql.FPoint{{F: 56, T: 280000}, {F: 58, T: 290000}, {F: 60, T: 300000}},
Floats: []promql.FPoint{{F: 58, T: 290000}, {F: 60, T: 300000}},
Metric: lbls2,
},
},
@ -1592,7 +1589,7 @@ load 1ms
start: 100,
result: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 3, T: -2000}, {F: 2, T: -1000}, {F: 1, T: 0}},
Floats: []promql.FPoint{{F: 2, T: -1000}, {F: 1, T: 0}},
Metric: lblsneg,
},
},
@ -1601,7 +1598,7 @@ load 1ms
start: 100,
result: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 504, T: -503000}, {F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}},
Floats: []promql.FPoint{{F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}},
Metric: lblsneg,
},
},
@ -1610,7 +1607,7 @@ load 1ms
start: 100,
result: promql.Matrix{
promql.Series{
Floats: []promql.FPoint{{F: 2342, T: 2342}, {F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}},
Floats: []promql.FPoint{{F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}},
Metric: lblsms,
},
},
@ -3060,7 +3057,7 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
ts time.Time
}{
"matches series with points in range": {
expr: "some_metric[1m]",
expr: "some_metric[2m]",
ts: baseT.Add(2 * time.Minute),
expected: promql.Matrix{
{
@ -3096,7 +3093,6 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
{
Metric: labels.FromStrings("__name__", "some_metric_with_stale_marker"),
Floats: []promql.FPoint{
{T: timestamp.FromTime(baseT), F: 0},
{T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1},
{T: timestamp.FromTime(baseT.Add(3 * time.Minute)), F: 3},
},
@ -3118,6 +3114,217 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) {
}
}
func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) {
// TODO(codesome): Integrate histograms into the PromQL testing framework
// and write more tests there.
cases := []struct {
histograms []histogram.Histogram
expected histogram.FloatHistogram
expectedAvg histogram.FloatHistogram
}{
{
histograms: []histogram.Histogram{
{
CounterResetHint: histogram.GaugeType,
Schema: 0,
Count: 25,
Sum: 1234.5,
ZeroThreshold: 0.001,
ZeroCount: 4,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 2},
},
NegativeBuckets: []int64{2, 2, -3, 8},
},
{
CounterResetHint: histogram.GaugeType,
Schema: 0,
Count: 41,
Sum: 2345.6,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
},
{
CounterResetHint: histogram.GaugeType,
Schema: 0,
Count: 41,
Sum: 1111.1,
ZeroThreshold: 0.001,
ZeroCount: 5,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 1, Length: 4},
{Offset: 2, Length: 0},
{Offset: 2, Length: 3},
},
NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3},
},
{
CounterResetHint: histogram.GaugeType,
Schema: 1, // Everything is 0 just to make the count 4 so avg has nicer numbers.
},
},
expected: histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Schema: 0,
ZeroThreshold: 0.001,
ZeroCount: 14,
Count: 107,
Sum: 4691.2,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 7},
},
PositiveBuckets: []float64{3, 8, 2, 5, 3, 2, 2},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 6},
{Offset: 3, Length: 3},
},
NegativeBuckets: []float64{2, 6, 8, 4, 15, 9, 10, 10, 4},
},
expectedAvg: histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Schema: 0,
ZeroThreshold: 0.001,
ZeroCount: 3.5,
Count: 26.75,
Sum: 1172.8,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 7},
},
PositiveBuckets: []float64{0.75, 2, 0.5, 1.25, 0.75, 0.5, 0.5},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 6},
{Offset: 3, Length: 3},
},
NegativeBuckets: []float64{0.5, 1.5, 2, 1, 3.75, 2.25, 2.5, 2.5, 1},
},
},
}
idx0 := int64(0)
for _, c := range cases {
for _, floatHisto := range []bool{true, false} {
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
storage := teststorage.New(t)
t.Cleanup(func() { storage.Close() })
seriesName := "sparse_histogram_series"
seriesNameOverTime := "sparse_histogram_series_over_time"
engine := newTestEngine(t)
ts := idx0 * int64(10*time.Minute/time.Millisecond)
app := storage.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("__name__", "float_series", "idx", "0"), ts, 42)
require.NoError(t, err)
for idx1, h := range c.histograms {
lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1))
// Since we mutate h later, we need to create a copy here.
var err error
if floatHisto {
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil))
} else {
_, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil)
}
require.NoError(t, err)
lbls = labels.FromStrings("__name__", seriesNameOverTime)
newTs := ts + int64(idx1)*int64(time.Minute/time.Millisecond)
// Since we mutate h later, we need to create a copy here.
if floatHisto {
_, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat(nil))
} else {
_, err = app.AppendHistogram(0, lbls, newTs, h.Copy(), nil)
}
require.NoError(t, err)
}
require.NoError(t, app.Commit())
queryAndCheck := func(queryString string, ts int64, exp promql.Vector) {
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res := qry.Exec(context.Background())
require.NoError(t, res.Err)
require.Empty(t, res.Warnings)
vector, err := res.Vector()
require.NoError(t, err)
testutil.RequireEqual(t, exp, vector)
}
queryAndCheckAnnotations := func(queryString string, ts int64, expWarnings annotations.Annotations) {
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
require.NoError(t, err)
res := qry.Exec(context.Background())
require.NoError(t, res.Err)
require.Equal(t, expWarnings, res.Warnings)
}
// sum().
queryString := fmt.Sprintf("sum(%s)", seriesName)
queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
queryString = `sum({idx="0"})`
var annos annotations.Annotations
annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(posrange.PositionRange{Start: 4, End: 13}))
queryAndCheckAnnotations(queryString, ts, annos)
// + operator.
queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName)
for idx := 1; idx < len(c.histograms); idx++ {
queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx)
}
queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}})
// count().
queryString = fmt.Sprintf("count(%s)", seriesName)
queryAndCheck(queryString, ts, []promql.Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}})
// avg().
queryString = fmt.Sprintf("avg(%s)", seriesName)
queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}})
offset := int64(len(c.histograms) - 1)
newTs := ts + offset*int64(time.Minute/time.Millisecond)
// sum_over_time().
queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset+1)
queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels(), DropName: true}})
// avg_over_time().
queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset+1)
queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels(), DropName: true}})
})
idx0++
}
}
}
func TestNativeHistogram_SubOperator(t *testing.T) {
// TODO(codesome): Integrate histograms into the PromQL testing framework
// and write more tests there.
@ -3370,43 +3577,43 @@ metric 0 1 2
}{
{
name: "default lookback delta",
ts: lastDatapointTs.Add(defaultLookbackDelta),
ts: lastDatapointTs.Add(defaultLookbackDelta - time.Millisecond),
expectSamples: true,
},
{
name: "outside default lookback delta",
ts: lastDatapointTs.Add(defaultLookbackDelta + time.Millisecond),
ts: lastDatapointTs.Add(defaultLookbackDelta),
expectSamples: false,
},
{
name: "custom engine lookback delta",
ts: lastDatapointTs.Add(10 * time.Minute),
ts: lastDatapointTs.Add(10*time.Minute - time.Millisecond),
engineLookback: 10 * time.Minute,
expectSamples: true,
},
{
name: "outside custom engine lookback delta",
ts: lastDatapointTs.Add(10*time.Minute + time.Millisecond),
ts: lastDatapointTs.Add(10 * time.Minute),
engineLookback: 10 * time.Minute,
expectSamples: false,
},
{
name: "custom query lookback delta",
ts: lastDatapointTs.Add(20 * time.Minute),
ts: lastDatapointTs.Add(20*time.Minute - time.Millisecond),
engineLookback: 10 * time.Minute,
queryLookback: 20 * time.Minute,
expectSamples: true,
},
{
name: "outside custom query lookback delta",
ts: lastDatapointTs.Add(20*time.Minute + time.Millisecond),
ts: lastDatapointTs.Add(20 * time.Minute),
engineLookback: 10 * time.Minute,
queryLookback: 20 * time.Minute,
expectSamples: false,
},
{
name: "negative custom query lookback delta",
ts: lastDatapointTs.Add(20 * time.Minute),
ts: lastDatapointTs.Add(20*time.Minute - time.Millisecond),
engineLookback: -10 * time.Minute,
queryLookback: 20 * time.Minute,
expectSamples: true,
@ -3473,18 +3680,18 @@ histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum:
}
}
qry, err := engine.NewRangeQuery(context.Background(), storage, nil, "increase(histogram[60s])", time.Unix(0, 0), time.Unix(0, 0).Add(1*time.Minute), time.Minute)
qry, err := engine.NewRangeQuery(context.Background(), storage, nil, "increase(histogram[90s])", time.Unix(0, 0), time.Unix(0, 0).Add(60*time.Second), time.Minute)
require.NoError(t, err)
verify(t, qry, []histogram.FloatHistogram{
{
Count: 2,
Sum: 2, // Increase from 4 to 6 is 2.
Count: 3,
Sum: 3, // Increase from 4 to 6 is 2. Interpolation adds 1.
PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, // Two buckets changed between the first and second histogram.
PositiveBuckets: []float64{1, 1}, // Increase from 2 to 3 is 1 in both buckets.
PositiveBuckets: []float64{1.5, 1.5}, // Increase from 2 to 3 is 1 in both buckets. Interpolation adds 0.5.
},
})
qry, err = engine.NewInstantQuery(context.Background(), storage, nil, "histogram[60s]", time.Unix(0, 0).Add(2*time.Minute))
qry, err = engine.NewInstantQuery(context.Background(), storage, nil, "histogram[61s]", time.Unix(0, 0).Add(2*time.Minute))
require.NoError(t, err)
verify(t, qry, []histogram.FloatHistogram{
{

View file

@ -237,7 +237,7 @@ eval instant at 5m sum by (group) (http_requests)
load 5m
testmetric {{}}
eval instant at 5m testmetric
eval instant at 0m testmetric
`,
expectedError: `error in eval testmetric (line 5): unexpected metric {__name__="testmetric"} in result, has value {count:0, sum:0}`,
},

View file

@ -250,7 +250,7 @@ clear
load 5m
http_requests{job="api-server", instance="0", group="production"} 0+10x10
http_requests{job="api-server", instance="1", group="production"} 0+20x10
http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
http_requests{job="app-server", instance="0", group="production"} 0+50x10
@ -337,32 +337,32 @@ load 5m
version{job="app-server", instance="0", group="canary"} 7
version{job="app-server", instance="1", group="canary"} 7
eval instant at 5m count_values("version", version)
eval instant at 1m count_values("version", version)
{version="6"} 5
{version="7"} 2
{version="8"} 2
eval instant at 5m count_values(((("version"))), version)
eval instant at 1m count_values(((("version"))), version)
{version="6"} 5
{version="7"} 2
{version="8"} 2
eval instant at 5m count_values without (instance)("version", version)
eval instant at 1m count_values without (instance)("version", version)
{job="api-server", group="production", version="6"} 3
{job="api-server", group="canary", version="8"} 2
{job="app-server", group="production", version="6"} 2
{job="app-server", group="canary", version="7"} 2
# Overwrite label with output. Don't do this.
eval instant at 5m count_values without (instance)("job", version)
eval instant at 1m count_values without (instance)("job", version)
{job="6", group="production"} 5
{job="8", group="canary"} 2
{job="7", group="canary"} 2
# Overwrite label with output. Don't do this.
eval instant at 5m count_values by (job, group)("job", version)
eval instant at 1m count_values by (job, group)("job", version)
{job="6", group="production"} 5
{job="8", group="canary"} 2
{job="7", group="canary"} 2

View file

@ -121,45 +121,43 @@ eval instant at 25s sum_over_time(metric{job="1"}[100:1] offset 20 @ 100)
# Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries.
# Inner most sum=1+2+...+10=55.
# With [100s:25s] subquery, it's 55*5.
# With [100s:25s] subquery, it's 55*4.
eval instant at 100s sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50)
{job="1"} 275
{job="1"} 220
# Nested subqueries with different timestamps on both.
# Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries.
# Sum of innermost subquery is 275 as above. The outer subquery repeats it 4 times.
# Sum of innermost subquery is 220 as above. The outer subquery repeats it 3 times.
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50)[3s:1s] @ 3000)
{job="1"} 1100
{job="1"} 660
# Testing the inner subquery timestamp since vector selector does not have @.
# Inner sum for subquery [100s:25s] @ 50 are
# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=4+5=9.
# This sum of 11 is repeated 4 times by outer subquery.
# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=5.
# This sum of 7 is repeated 3 times by outer subquery.
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 50)[3s:1s] @ 200)
{job="1"} 44
{job="1"} 21
# Inner sum for subquery [100s:25s] @ 200 are
# at 100=9+10, at 125=12, at 150=14+15, at 175=17, at 200=19+20.
# This sum of 116 is repeated 4 times by outer subquery.
# at 125=12, at 150=15, at 175=17, at 200=20.
# This sum of 64 is repeated 3 times by outer subquery.
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 200)[3s:1s] @ 50)
{job="1"} 464
{job="1"} 192
# Nested subqueries with timestamp only on outer subquery.
# Outer most subquery:
# at 900=783
# inner subquery: at 870=87+86+85, at 880=88+87+86, at 890=89+88+87
# at 925=537
# inner subquery: at 895=89+88, at 905=90+89, at 915=90+91
# at 950=828
# inner subquery: at 920=92+91+90, at 930=93+92+91, at 940=94+93+92
# at 975=567
# inner subquery: at 945=94+93, at 955=95+94, at 965=96+95
# at 1000=873
# inner subquery: at 970=97+96+95, at 980=98+97+96, at 990=99+98+97
# at 925=360
# inner subquery: at 905=90+89, at 915=91+90
# at 950=372
# inner subquery: at 930=93+92, at 940=94+93
# at 975=380
# inner subquery: at 955=95+94, at 965=96+95
# at 1000=392
# inner subquery: at 980=98+97, at 990=99+98
eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[20s])[20s:10s] offset 10s)[100s:25s] @ 1000)
{job="1"} 3588
{job="1"} 1504
# minute is counted on the value of the sample.
eval instant at 10s minute(metric @ 1500)
@ -182,32 +180,32 @@ eval instant at 15m timestamp(timestamp(metric{job="1"} @ 10))
# minute is counted on the value of the sample.
eval instant at 0s sum_over_time(minute(metric @ 1500)[100s:10s])
{job="1"} 22
{job="2"} 55
{job="1"} 20
{job="2"} 50
# If nothing passed, minute() takes eval time.
# Here the eval time is determined by the subquery.
# [50m:1m] at 6000, i.e. 100m, is 50m to 100m.
# sum=50+51+52+...+59+0+1+2+...+40.
# sum=51+52+...+59+0+1+2+...+40.
eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000)
{} 1315
# sum=46+47+...+59+0+1+2+...+35.
eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000 offset 5m)
{} 1365
# sum=45+46+47+...+59+0+1+2+...+35.
eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000 offset 5m)
{} 1410
# time() is the eval time which is determined by subquery here.
# 2900+2901+...+3000 = (3000*3001 - 2899*2900)/2.
# 2901+...+3000 = (3000*3001 - 2899*2900)/2.
eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000)
{} 297950
{} 295050
# 2300+2301+...+2400 = (2400*2401 - 2299*2300)/2.
# 2301+...+2400 = (2400*2401 - 2299*2300)/2.
eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000 offset 600s)
{} 237350
{} 235050
# timestamp() takes the time of the sample and not the evaluation time.
eval instant at 0s sum_over_time(timestamp(metric{job="1"} @ 10)[100s:10s] @ 3000)
{job="1"} 110
{job="1"} 100
# The result of inner timestamp() will have the timestamp as the
# eval time, hence entire expression is not step invariant and depends on eval time.

View file

@ -6,11 +6,13 @@ load 5m
# Tests for resets().
eval instant at 50m resets(http_requests[5m])
eval instant at 50m resets(http_requests[10m])
{path="/foo"} 0
{path="/bar"} 0
{path="/biz"} 0
eval instant at 50m resets(http_requests[300])
eval instant at 50m resets(http_requests[600])
{path="/foo"} 0
{path="/bar"} 0
{path="/biz"} 0
@ -21,6 +23,11 @@ eval instant at 50m resets(http_requests[20m])
{path="/biz"} 0
eval instant at 50m resets(http_requests[30m])
{path="/foo"} 1
{path="/bar"} 0
{path="/biz"} 0
eval instant at 50m resets(http_requests[32m])
{path="/foo"} 2
{path="/bar"} 1
{path="/biz"} 0
@ -34,28 +41,30 @@ eval instant at 50m resets(nonexistent_metric[50m])
# Tests for changes().
eval instant at 50m changes(http_requests[5m])
eval instant at 50m changes(http_requests[6m])
{path="/foo"} 0
{path="/bar"} 0
{path="/biz"} 0
eval instant at 50m changes(http_requests[20m])
{path="/foo"} 3
{path="/bar"} 3
{path="/foo"} 2
{path="/bar"} 2
{path="/biz"} 0
eval instant at 50m changes(http_requests[30m])
{path="/foo"} 4
{path="/bar"} 5
{path="/biz"} 1
{path="/foo"} 3
{path="/bar"} 4
{path="/biz"} 0
eval instant at 50m changes(http_requests[50m])
{path="/foo"} 8
{path="/bar"} 9
{path="/foo"} 7
{path="/bar"} 8
{path="/biz"} 1
eval instant at 50m changes((http_requests[50m]))
{path="/foo"} 8
{path="/bar"} 9
{path="/foo"} 7
{path="/bar"} 8
{path="/biz"} 1
eval instant at 50m changes(nonexistent_metric[50m])
@ -66,7 +75,7 @@ load 5m
x{a="b"} NaN NaN NaN
x{a="c"} 0 NaN 0
eval instant at 15m changes(x[15m])
eval instant at 15m changes(x[20m])
{a="b"} 0
{a="c"} 2
@ -75,14 +84,14 @@ clear
# Tests for increase().
load 5m
http_requests{path="/foo"} 0+10x10
http_requests{path="/bar"} 0+10x5 0+10x5
http_requests{path="/bar"} 0+18x5 0+18x5
http_requests{path="/dings"} 10+10x10
http_requests{path="/bumms"} 1+10x10
# Tests for increase().
eval instant at 50m increase(http_requests[50m])
{path="/foo"} 100
{path="/bar"} 90
{path="/bar"} 160
{path="/dings"} 100
{path="/bumms"} 100
@ -95,7 +104,7 @@ eval instant at 50m increase(http_requests[50m])
# value, and therefore the extrapolation happens only by 30s.
eval instant at 50m increase(http_requests[100m])
{path="/foo"} 100
{path="/bar"} 90
{path="/bar"} 162
{path="/dings"} 105
{path="/bumms"} 101
@ -115,15 +124,17 @@ clear
# Tests for rate().
load 5m
testcounter_reset_middle 0+10x4 0+10x5
testcounter_reset_middle 0+27x4 0+27x5
testcounter_reset_end 0+10x9 0 10
# Counter resets at in the middle of range are handled correctly by rate().
eval instant at 50m rate(testcounter_reset_middle[50m])
{} 0.03
{} 0.08
# Counter resets at end of range are ignored by rate().
eval instant at 50m rate(testcounter_reset_end[5m])
eval instant at 50m rate(testcounter_reset_end[6m])
{} 0
clear
@ -242,24 +253,24 @@ eval instant at 50m deriv(testcounter_reset_middle[100m])
# intercept at t=3000: 38.63636363636364
# intercept at t=3000+3600: 76.81818181818181
eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600)
{} 76.81818181818181
{} 70
eval instant at 50m predict_linear(testcounter_reset_middle[50m], 1h)
{} 76.81818181818181
{} 70
# intercept at t = 3000+3600 = 6600
eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600)
{} 76.81818181818181
eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 1h)
eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 1h)
{} 76.81818181818181
# intercept at t = 600+3600 = 4200
eval instant at 10m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
eval instant at 10m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600)
{} 51.36363636363637
# intercept at t = 4200+3600 = 7800
eval instant at 70m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
eval instant at 70m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600)
{} 89.54545454545455
# With http_requests, there is a sample value exactly at the end of
@ -467,7 +478,7 @@ load 5m
http_requests{job="api-server", instance="1", group="production"} 0+20x10
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
http_requests{job="app-server", instance="0", group="production"} 0+50x10
http_requests{job="app-server", instance="1", group="production"} 0+60x10
http_requests{job="app-server", instance="0", group="canary"} 0+70x10
@ -502,7 +513,7 @@ load 5m
http_requests{job="api-server", instance="1", group="production"} 0+20x10
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
http_requests{job="app-server", instance="0", group="production"} 0+50x10
http_requests{job="app-server", instance="1", group="production"} 0+60x10
http_requests{job="app-server", instance="0", group="canary"} 0+70x10
@ -688,10 +699,10 @@ load 10s
metric9 -9.988465674311579e+307 -9.988465674311579e+307 -9.988465674311579e+307
metric10 -9.988465674311579e+307 9.988465674311579e+307
eval instant at 1m avg_over_time(metric[1m])
eval instant at 55s avg_over_time(metric[1m])
{} 3
eval instant at 1m sum_over_time(metric[1m])/count_over_time(metric[1m])
eval instant at 55s sum_over_time(metric[1m])/count_over_time(metric[1m])
{} 3
eval instant at 1m avg_over_time(metric2[1m])
@ -758,8 +769,8 @@ eval instant at 1m avg_over_time(metric8[1m])
{} 9.988465674311579e+307
# This overflows float64.
eval instant at 1m sum_over_time(metric8[1m])/count_over_time(metric8[1m])
{} Inf
eval instant at 1m sum_over_time(metric8[2m])/count_over_time(metric8[2m])
{} +Inf
eval instant at 1m avg_over_time(metric9[1m])
{} -9.988465674311579e+307
@ -768,10 +779,16 @@ eval instant at 1m avg_over_time(metric9[1m])
eval instant at 1m sum_over_time(metric9[1m])/count_over_time(metric9[1m])
{} -Inf
eval instant at 1m avg_over_time(metric10[1m])
eval instant at 45s avg_over_time(metric10[1m])
{} 0
eval instant at 1m sum_over_time(metric10[1m])/count_over_time(metric10[1m])
eval instant at 1m avg_over_time(metric10[2m])
{} 0
eval instant at 45s sum_over_time(metric10[1m])/count_over_time(metric10[1m])
{} 0
eval instant at 1m sum_over_time(metric10[2m])/count_over_time(metric10[2m])
{} 0
# Test if very big intermediate values cause loss of detail.
@ -779,10 +796,10 @@ clear
load 10s
metric 1 1e100 1 -1e100
eval instant at 1m sum_over_time(metric[1m])
eval instant at 1m sum_over_time(metric[2m])
{} 2
eval instant at 1m avg_over_time(metric[1m])
eval instant at 1m avg_over_time(metric[2m])
{} 0.5
# Tests for stddev_over_time and stdvar_over_time.
@ -790,13 +807,13 @@ clear
load 10s
metric 0 8 8 2 3
eval instant at 1m stdvar_over_time(metric[1m])
eval instant at 1m stdvar_over_time(metric[2m])
{} 10.56
eval instant at 1m stddev_over_time(metric[1m])
eval instant at 1m stddev_over_time(metric[2m])
{} 3.249615
eval instant at 1m stddev_over_time((metric[1m]))
eval instant at 1m stddev_over_time((metric[2m]))
{} 3.249615
# Tests for stddev_over_time and stdvar_over_time #4927.
@ -826,42 +843,42 @@ load 10s
data{test="three samples"} 0 1 2
data{test="uneven samples"} 0 1 4
eval instant at 1m quantile_over_time(0, data[1m])
eval instant at 1m quantile_over_time(0, data[2m])
{test="two samples"} 0
{test="three samples"} 0
{test="uneven samples"} 0
eval instant at 1m quantile_over_time(0.5, data[1m])
eval instant at 1m quantile_over_time(0.5, data[2m])
{test="two samples"} 0.5
{test="three samples"} 1
{test="uneven samples"} 1
eval instant at 1m quantile_over_time(0.75, data[1m])
eval instant at 1m quantile_over_time(0.75, data[2m])
{test="two samples"} 0.75
{test="three samples"} 1.5
{test="uneven samples"} 2.5
eval instant at 1m quantile_over_time(0.8, data[1m])
eval instant at 1m quantile_over_time(0.8, data[2m])
{test="two samples"} 0.8
{test="three samples"} 1.6
{test="uneven samples"} 2.8
eval instant at 1m quantile_over_time(1, data[1m])
eval instant at 1m quantile_over_time(1, data[2m])
{test="two samples"} 1
{test="three samples"} 2
{test="uneven samples"} 4
eval_warn instant at 1m quantile_over_time(-1, data[1m])
eval_warn instant at 1m quantile_over_time(-1, data[2m])
{test="two samples"} -Inf
{test="three samples"} -Inf
{test="uneven samples"} -Inf
eval_warn instant at 1m quantile_over_time(2, data[1m])
eval_warn instant at 1m quantile_over_time(2, data[2m])
{test="two samples"} +Inf
{test="three samples"} +Inf
{test="uneven samples"} +Inf
eval_warn instant at 1m (quantile_over_time(2, (data[1m])))
eval_warn instant at 1m (quantile_over_time(2, (data[2m])))
{test="two samples"} +Inf
{test="three samples"} +Inf
{test="uneven samples"} +Inf
@ -969,21 +986,21 @@ load 10s
data{type="some_nan3"} NaN 0 1
data{type="only_nan"} NaN NaN NaN
eval instant at 1m min_over_time(data[1m])
eval instant at 1m min_over_time(data[2m])
{type="numbers"} 0
{type="some_nan"} 0
{type="some_nan2"} 1
{type="some_nan3"} 0
{type="only_nan"} NaN
eval instant at 1m max_over_time(data[1m])
eval instant at 1m max_over_time(data[2m])
{type="numbers"} 3
{type="some_nan"} 2
{type="some_nan2"} 2
{type="some_nan3"} 1
{type="only_nan"} NaN
eval instant at 1m last_over_time(data[1m])
eval instant at 1m last_over_time(data[2m])
data{type="numbers"} 3
data{type="some_nan"} NaN
data{type="some_nan2"} 1
@ -1076,13 +1093,19 @@ eval instant at 1m absent_over_time(httpd_log_lines_total[30s])
{} 1
eval instant at 15m absent_over_time(http_requests[5m])
eval instant at 16m absent_over_time(http_requests[5m])
{} 1
eval instant at 15m absent_over_time(http_requests[10m])
eval instant at 16m absent_over_time(http_requests[6m])
{} 1
eval instant at 16m absent_over_time(http_requests[16m])
eval instant at 16m absent_over_time(httpd_handshake_failures_total[1m])
{} 1
eval instant at 16m absent_over_time(httpd_handshake_failures_total[2m])
eval instant at 16m absent_over_time({instance="127.0.0.1"}[5m])
@ -1138,17 +1161,18 @@ eval instant at 0m present_over_time(httpd_log_lines_total[30s])
eval instant at 1m present_over_time(httpd_log_lines_total[30s])
eval instant at 15m present_over_time(http_requests[5m])
eval instant at 15m present_over_time(http_requests[10m])
{instance="127.0.0.1", job="httpd", path="/bar"} 1
{instance="127.0.0.1", job="httpd", path="/foo"} 1
eval instant at 16m present_over_time(http_requests[5m])
eval instant at 16m present_over_time(http_requests[6m])
eval instant at 16m present_over_time(http_requests[16m])
{instance="127.0.0.1", job="httpd", path="/bar"} 1
{instance="127.0.0.1", job="httpd", path="/foo"} 1
eval instant at 16m present_over_time(httpd_handshake_failures_total[1m])
{instance="127.0.0.1", job="node"} 1
eval instant at 16m present_over_time({instance="127.0.0.1"}[5m])
{instance="127.0.0.1",job="node"} 1
@ -1169,59 +1193,59 @@ load 5m
exp_root_log{l="x"} 10
exp_root_log{l="y"} 20
eval instant at 5m exp(exp_root_log)
eval instant at 1m exp(exp_root_log)
{l="x"} 22026.465794806718
{l="y"} 485165195.4097903
eval instant at 5m exp(exp_root_log - 10)
eval instant at 1m exp(exp_root_log - 10)
{l="y"} 22026.465794806718
{l="x"} 1
eval instant at 5m exp(exp_root_log - 20)
eval instant at 1m exp(exp_root_log - 20)
{l="x"} 4.5399929762484854e-05
{l="y"} 1
eval instant at 5m ln(exp_root_log)
eval instant at 1m ln(exp_root_log)
{l="x"} 2.302585092994046
{l="y"} 2.995732273553991
eval instant at 5m ln(exp_root_log - 10)
eval instant at 1m ln(exp_root_log - 10)
{l="y"} 2.302585092994046
{l="x"} -Inf
eval instant at 5m ln(exp_root_log - 20)
eval instant at 1m ln(exp_root_log - 20)
{l="y"} -Inf
{l="x"} NaN
eval instant at 5m exp(ln(exp_root_log))
eval instant at 1m exp(ln(exp_root_log))
{l="y"} 20
{l="x"} 10
eval instant at 5m sqrt(exp_root_log)
eval instant at 1m sqrt(exp_root_log)
{l="x"} 3.1622776601683795
{l="y"} 4.47213595499958
eval instant at 5m log2(exp_root_log)
eval instant at 1m log2(exp_root_log)
{l="x"} 3.3219280948873626
{l="y"} 4.321928094887363
eval instant at 5m log2(exp_root_log - 10)
eval instant at 1m log2(exp_root_log - 10)
{l="y"} 3.3219280948873626
{l="x"} -Inf
eval instant at 5m log2(exp_root_log - 20)
eval instant at 1m log2(exp_root_log - 20)
{l="x"} NaN
{l="y"} -Inf
eval instant at 5m log10(exp_root_log)
eval instant at 1m log10(exp_root_log)
{l="x"} 1
{l="y"} 1.301029995663981
eval instant at 5m log10(exp_root_log - 10)
eval instant at 1m log10(exp_root_log - 10)
{l="y"} 1
{l="x"} -Inf
eval instant at 5m log10(exp_root_log - 20)
eval instant at 1m log10(exp_root_log - 20)
{l="x"} NaN
{l="y"} -Inf

View file

@ -108,8 +108,8 @@ eval instant at 50m histogram_stdvar(testhistogram3)
eval instant at 50m histogram_fraction(0, 0.2, testhistogram3)
{start="positive"} 0.6363636363636364
{start="negative"} 0
eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m]))
eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[10m]))
{start="positive"} 0.6363636363636364
{start="negative"} 0
@ -118,8 +118,8 @@ eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m]))
eval instant at 50m testhistogram3_bucket{le=".2"} / ignoring(le) testhistogram3_count
{start="positive"} 0.6363636363636364
eval instant at 50m rate(testhistogram3_bucket{le=".2"}[5m]) / ignoring(le) rate(testhistogram3_count[5m])
eval instant at 50m rate(testhistogram3_bucket{le=".2"}[10m]) / ignoring(le) rate(testhistogram3_count[10m])
{start="positive"} 0.6363636363636364
# Test histogram_quantile, native and classic.
@ -241,28 +241,27 @@ eval instant at 50m histogram_quantile(0.8, testhistogram_bucket)
{start="negative"} 0.3
# More realistic with rates.
eval instant at 50m histogram_quantile(0.2, rate(testhistogram[5m]))
eval instant at 50m histogram_quantile(0.2, rate(testhistogram[10m]))
{start="positive"} 0.048
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m]))
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[10m]))
{start="positive"} 0.048
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.5, rate(testhistogram[5m]))
eval instant at 50m histogram_quantile(0.5, rate(testhistogram[10m]))
{start="positive"} 0.15
{start="negative"} -0.15
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m]))
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[10m]))
{start="positive"} 0.15
{start="negative"} -0.15
eval instant at 50m histogram_quantile(0.8, rate(testhistogram[5m]))
eval instant at 50m histogram_quantile(0.8, rate(testhistogram[10m]))
{start="positive"} 0.72
{start="negative"} 0.3
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m]))
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[10m]))
{start="positive"} 0.72
{start="negative"} 0.3
@ -307,115 +306,112 @@ eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m]))
# Aggregated histogram: Everything in one. Note how native histograms
# don't require aggregation by le.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])))
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])))
{} 0.075
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le))
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.075
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])))
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])))
{} 0.1277777777777778
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le))
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.1277777777777778
# Aggregated histogram: Everything in one. Now with avg, which does not change anything.
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[5m])))
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[10m])))
{} 0.075
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le))
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.075
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[5m])))
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[10m])))
{} 0.12777777777777778
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le))
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.12777777777777778
# Aggregated histogram: By instance.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (instance))
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (instance))
{instance="ins1"} 0.075
{instance="ins2"} 0.075
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
{instance="ins1"} 0.075
{instance="ins2"} 0.075
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (instance))
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (instance))
{instance="ins1"} 0.1333333333
{instance="ins2"} 0.125
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
{instance="ins1"} 0.1333333333
{instance="ins2"} 0.125
# Aggregated histogram: By job.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job))
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job))
{job="job1"} 0.1
{job="job2"} 0.0642857142857143
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
{job="job1"} 0.1
{job="job2"} 0.0642857142857143
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job))
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job))
{job="job1"} 0.14
{job="job2"} 0.1125
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
{job="job1"} 0.14
{job="job2"} 0.1125
# Aggregated histogram: By job and instance.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job, instance))
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job, instance))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job, instance))
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job, instance))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.1333333333333333
{instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.1166666666666667
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.1333333333333333
{instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.1166666666666667
# The unaggregated histogram for comparison. Same result as the previous one.
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[5m]))
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[10m]))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m]))
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[10m]))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[5m]))
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[10m]))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.13333333333333333
{instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.11666666666666667
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m]))
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[10m]))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.13333333333333333
{instance="ins1", job="job2"} 0.1
@ -448,19 +444,19 @@ eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket)
{} 979.75
# Buckets with different representations of the same upper bound.
eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[5m]))
eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[10m]))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} NaN
eval instant at 50m histogram_quantile(0.5, rate(mixed[5m]))
eval instant at 50m histogram_quantile(0.5, rate(mixed[10m]))
{instance="ins1", job="job1"} 0.2
{instance="ins2", job="job1"} NaN
eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m]))
eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[10m]))
{instance="ins1", job="job1"} 0.2
{instance="ins2", job="job1"} NaN
eval instant at 50m histogram_quantile(1, rate(mixed_bucket[5m]))
eval instant at 50m histogram_quantile(1, rate(mixed_bucket[10m]))
{instance="ins1", job="job1"} 0.2
{instance="ins2", job="job1"} NaN
@ -469,7 +465,7 @@ load_with_nhcb 5m
empty_bucket{le="0.2", job="job1", instance="ins1"} 0x10
empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10
eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m]))
eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[10m]))
{instance="ins1", job="job1"} NaN
# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set.

View file

@ -4,81 +4,81 @@ load 5m
another_metric{env="1"} 60 120 180
# Does not drop __name__ for vector selector
eval instant at 15m metric{env="1"}
eval instant at 10m metric{env="1"}
metric{env="1"} 120
# Drops __name__ for unary operators
eval instant at 15m -metric
eval instant at 10m -metric
{env="1"} -120
# Drops __name__ for binary operators
eval instant at 15m metric + another_metric
eval instant at 10m metric + another_metric
{env="1"} 300
# Does not drop __name__ for binary comparison operators
eval instant at 15m metric <= another_metric
eval instant at 10m metric <= another_metric
metric{env="1"} 120
# Drops __name__ for binary comparison operators with "bool" modifier
eval instant at 15m metric <= bool another_metric
eval instant at 10m metric <= bool another_metric
{env="1"} 1
# Drops __name__ for vector-scalar operations
eval instant at 15m metric * 2
eval instant at 10m metric * 2
{env="1"} 240
# Drops __name__ for instant-vector functions
eval instant at 15m clamp(metric, 0, 100)
eval instant at 10m clamp(metric, 0, 100)
{env="1"} 100
# Drops __name__ for range-vector functions
eval instant at 15m rate(metric{env="1"}[10m])
eval instant at 10m rate(metric{env="1"}[10m])
{env="1"} 0.2
# Does not drop __name__ for last_over_time function
eval instant at 15m last_over_time(metric{env="1"}[10m])
eval instant at 10m last_over_time(metric{env="1"}[10m])
metric{env="1"} 120
# Drops name for other _over_time functions
eval instant at 15m max_over_time(metric{env="1"}[10m])
eval instant at 10m max_over_time(metric{env="1"}[10m])
{env="1"} 120
# Allows relabeling (to-be-dropped) __name__ via label_replace
eval instant at 15m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)")
eval instant at 10m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)")
{my_name="rate_metric", env="1"} 0.2
{my_name="rate_another_metric", env="1"} 0.2
# Allows preserving __name__ via label_replace
eval instant at 15m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)")
eval instant at 10m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)")
rate_metric{env="1"} 0.2
rate_another_metric{env="1"} 0.2
# Allows relabeling (to-be-dropped) __name__ via label_join
eval instant at 15m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__")
eval instant at 10m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__")
{my_name="metric", env="1"} 0.2
{my_name="another_metric", env="1"} 0.2
# Allows preserving __name__ via label_join
eval instant at 15m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env")
eval instant at 10m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env")
metric_1{env="1"} 0.2
another_metric_1{env="1"} 0.2
# Does not drop metric names fro aggregation operators
eval instant at 15m sum by (__name__, env) (metric{env="1"})
eval instant at 10m sum by (__name__, env) (metric{env="1"})
metric{env="1"} 120
# Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label)
# This is an accidental side effect of delayed __name__ label dropping
eval_fail instant at 15m sum by (__name__) (rate({env="1"}[10m]))
eval_fail instant at 10m sum by (__name__) (rate({env="1"}[10m]))
# Aggregation operators aggregate metrics with same labelset and to-be-dropped names
# This is an accidental side effect of delayed __name__ label dropping
eval instant at 15m sum(rate({env="1"}[10m])) by (env)
eval instant at 10m sum(rate({env="1"}[10m])) by (env)
{env="1"} 0.4
# Aggregationk operators propagate __name__ label dropping information
eval instant at 15m topk(10, sum by (__name__, env) (metric{env="1"}))
eval instant at 10m topk(10, sum by (__name__, env) (metric{env="1"}))
metric{env="1"} 120
eval instant at 15m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m])))
eval instant at 10m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m])))
{env="1"} 0.2

View file

@ -2,55 +2,55 @@
load 5m
empty_histogram {{}}
eval instant at 5m empty_histogram
eval instant at 1m empty_histogram
{__name__="empty_histogram"} {{}}
eval instant at 5m histogram_count(empty_histogram)
eval instant at 1m histogram_count(empty_histogram)
{} 0
eval instant at 5m histogram_sum(empty_histogram)
eval instant at 1m histogram_sum(empty_histogram)
{} 0
eval instant at 5m histogram_avg(empty_histogram)
eval instant at 1m histogram_avg(empty_histogram)
{} NaN
eval instant at 5m histogram_fraction(-Inf, +Inf, empty_histogram)
eval instant at 1m histogram_fraction(-Inf, +Inf, empty_histogram)
{} NaN
eval instant at 5m histogram_fraction(0, 8, empty_histogram)
eval instant at 1m histogram_fraction(0, 8, empty_histogram)
{} NaN
clear
# buckets:[1 2 1] means 1 observation in the 1st bucket, 2 observations in the 2nd and 1 observation in the 3rd (total 4).
load 5m
single_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}}
# histogram_count extracts the count property from the histogram.
eval instant at 5m histogram_count(single_histogram)
eval instant at 1m histogram_count(single_histogram)
{} 4
# histogram_sum extracts the sum property from the histogram.
eval instant at 5m histogram_sum(single_histogram)
eval instant at 1m histogram_sum(single_histogram)
{} 5
# histogram_avg calculates the average from sum and count properties.
eval instant at 5m histogram_avg(single_histogram)
eval instant at 1m histogram_avg(single_histogram)
{} 1.25
# We expect half of the values to fall in the range 1 < x <= 2.
eval instant at 5m histogram_fraction(1, 2, single_histogram)
eval instant at 1m histogram_fraction(1, 2, single_histogram)
{} 0.5
# We expect all values to fall in the range 0 < x <= 8.
eval instant at 5m histogram_fraction(0, 8, single_histogram)
eval instant at 1m histogram_fraction(0, 8, single_histogram)
{} 1
# Median is 1.5 due to linear estimation of the midpoint of the middle bucket, whose values are within range 1 < x <= 2.
eval instant at 5m histogram_quantile(0.5, single_histogram)
eval instant at 1m histogram_quantile(0.5, single_histogram)
{} 1.5
clear
# Repeat the same histogram 10 times.
load 5m
@ -88,7 +88,7 @@ eval instant at 50m histogram_fraction(1, 2, multi_histogram)
eval instant at 50m histogram_quantile(0.5, multi_histogram)
{} 1.5
clear
# Accumulate the histogram addition for 10 iterations, offset is a bucket position where offset:0 is always the bucket
# with an upper limit of 1 and offset:1 is the bucket which follows to the right. Negative offsets represent bucket
@ -133,14 +133,14 @@ eval instant at 50m histogram_quantile(0.5, incr_histogram)
{} 1.5
# Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum.
eval instant at 50m rate(incr_histogram[5m])
{} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}}
eval instant at 50m rate(incr_histogram[10m])
{} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}}
# Calculate the 50th percentile of observations over the last 10m.
eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m]))
{} 1.5
clear
# Schema represents the histogram resolution, different schema have compatible bucket boundaries, e.g.:
# 0: 1 2 4 8 16 32 64 (higher resolution)
@ -166,77 +166,77 @@ eval instant at 5m histogram_avg(low_res_histogram)
eval instant at 5m histogram_fraction(1, 4, low_res_histogram)
{} 1
clear
# z_bucket:1 means there is one observation in the zero bucket and z_bucket_w:0.5 means the zero bucket has the range
# 0 < x <= 0.5. Sum and count are expected to represent all observations in the histogram, including those in the zero bucket.
load 5m
single_zero_histogram {{schema:0 z_bucket:1 z_bucket_w:0.5 sum:0.25 count:1}}
eval instant at 5m histogram_count(single_zero_histogram)
eval instant at 1m histogram_count(single_zero_histogram)
{} 1
eval instant at 5m histogram_sum(single_zero_histogram)
eval instant at 1m histogram_sum(single_zero_histogram)
{} 0.25
eval instant at 5m histogram_avg(single_zero_histogram)
eval instant at 1m histogram_avg(single_zero_histogram)
{} 0.25
# When only the zero bucket is populated, or there are negative buckets, the distribution is assumed to be equally
# distributed around zero; i.e. that there are an equal number of positive and negative observations. Therefore the
# entire distribution must lie within the full range of the zero bucket, in this case: -0.5 < x <= +0.5.
eval instant at 5m histogram_fraction(-0.5, 0.5, single_zero_histogram)
eval instant at 1m histogram_fraction(-0.5, 0.5, single_zero_histogram)
{} 1
# Half of the observations are estimated to be zero, as this is the midpoint between -0.5 and +0.5.
eval instant at 5m histogram_quantile(0.5, single_zero_histogram)
eval instant at 1m histogram_quantile(0.5, single_zero_histogram)
{} 0
clear
# Let's turn single_histogram upside-down.
load 5m
negative_histogram {{schema:0 sum:-5 count:4 n_buckets:[1 2 1]}}
eval instant at 5m histogram_count(negative_histogram)
eval instant at 1m histogram_count(negative_histogram)
{} 4
eval instant at 5m histogram_sum(negative_histogram)
eval instant at 1m histogram_sum(negative_histogram)
{} -5
eval instant at 5m histogram_avg(negative_histogram)
eval instant at 1m histogram_avg(negative_histogram)
{} -1.25
# We expect half of the values to fall in the range -2 < x <= -1.
eval instant at 5m histogram_fraction(-2, -1, negative_histogram)
eval instant at 1m histogram_fraction(-2, -1, negative_histogram)
{} 0.5
eval instant at 5m histogram_quantile(0.5, negative_histogram)
eval instant at 1m histogram_quantile(0.5, negative_histogram)
{} -1.5
clear
# Two histogram samples.
load 5m
two_samples_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}} {{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}}
# We expect to see the newest sample.
eval instant at 10m histogram_count(two_samples_histogram)
eval instant at 5m histogram_count(two_samples_histogram)
{} 4
eval instant at 10m histogram_sum(two_samples_histogram)
eval instant at 5m histogram_sum(two_samples_histogram)
{} -4
eval instant at 10m histogram_avg(two_samples_histogram)
eval instant at 5m histogram_avg(two_samples_histogram)
{} -1
eval instant at 10m histogram_fraction(-2, -1, two_samples_histogram)
eval instant at 5m histogram_fraction(-2, -1, two_samples_histogram)
{} 0.5
eval instant at 10m histogram_quantile(0.5, two_samples_histogram)
eval instant at 5m histogram_quantile(0.5, two_samples_histogram)
{} -1.5
clear
# Add two histograms with negated data.
load 5m
@ -259,6 +259,8 @@ eval instant at 5m histogram_fraction(0, 4, balanced_histogram)
eval instant at 5m histogram_quantile(0.5, balanced_histogram)
{} 0.5
clear
# Add histogram to test sum(last_over_time) regression
load 5m
incr_sum_histogram{number="1"} {{schema:0 sum:0 count:0 buckets:[1]}}+{{schema:0 sum:1 count:1 buckets:[1]}}x10
@ -270,6 +272,8 @@ eval instant at 50m histogram_sum(sum(incr_sum_histogram))
eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m])))
{} 30
clear
# Apply rate function to histogram.
load 15s
histogram_rate {{schema:1 count:12 sum:18.4 z_bucket:2 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[1 2 0 1 1]}}+{{schema:1 count:9 sum:18.4 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 0 1 1] n_buckets:[1 1 0 1 1]}}x100
@ -280,6 +284,8 @@ eval instant at 5m rate(histogram_rate[45s])
eval range from 5m to 5m30s step 30s rate(histogram_rate[45s])
{} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}x1
clear
# Apply count and sum function to histogram.
load 10m
histogram_count_sum_2 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
@ -290,6 +296,8 @@ eval instant at 10m histogram_count(histogram_count_sum_2)
eval instant at 10m histogram_sum(histogram_count_sum_2)
{} 100
clear
# Apply stddev and stdvar function to histogram with {1, 2, 3, 4} (low res).
load 10m
histogram_stddev_stdvar_1 {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}x1
@ -300,6 +308,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_1)
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_1)
{} 1.163807968526718
clear
# Apply stddev and stdvar function to histogram with {1, 1, 1, 1} (high res).
load 10m
histogram_stddev_stdvar_2 {{schema:8 count:10 sum:10 buckets:[1 2 3 4]}}x1
@ -310,6 +320,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_2)
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_2)
{} 2.3971123370139447e-05
clear
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9}.
load 10m
histogram_stddev_stdvar_3 {{schema:3 count:7 sum:62 z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
@ -320,6 +332,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_3)
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_3)
{} 1844.4651144196398
clear
# Apply stddev and stdvar function to histogram with {-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3}.
load 10m
histogram_stddev_stdvar_4 {{schema:0 count:10 sum:-112946 z_bucket:0 n_buckets:[0 0 1 1 1 0 1 1 0 0 3 0 0 0 1 0 0 1]}}x1
@ -330,6 +344,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_4)
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_4)
{} 759352122.1939945
clear
# Apply stddev and stdvar function to histogram with {-10x10}.
load 10m
histogram_stddev_stdvar_5 {{schema:0 count:10 sum:-100 z_bucket:0 n_buckets:[0 0 0 0 10]}}x1
@ -340,6 +356,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_5)
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_5)
{} 1.725830020304794
clear
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, NaN}.
load 10m
histogram_stddev_stdvar_6 {{schema:3 count:7 sum:NaN z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
@ -350,6 +368,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_6)
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_6)
{} NaN
clear
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, Inf}.
load 10m
histogram_stddev_stdvar_7 {{schema:3 count:7 sum:Inf z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
@ -360,6 +380,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_7)
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7)
{} Inf
clear
# Apply quantile function to histogram with all positive buckets with zero bucket.
load 10m
histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
@ -391,6 +413,8 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_1)
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_1)
{} -Inf
clear
# Apply quantile function to histogram with all negative buckets with zero bucket.
load 10m
histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1
@ -419,6 +443,8 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_2)
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_2)
{} -Inf
clear
# Apply quantile function to histogram with both positive and negative buckets with zero bucket.
load 10m
histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
@ -462,6 +488,8 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_3)
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_3)
{} -Inf
clear
# Apply fraction function to empty histogram.
load 10m
histogram_fraction_1 {{}}x1
@ -469,6 +497,8 @@ load 10m
eval instant at 10m histogram_fraction(3.1415, 42, histogram_fraction_1)
{} NaN
clear
# Apply fraction function to histogram with positive and zero buckets.
load 10m
histogram_fraction_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
@ -633,6 +663,8 @@ eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_3)
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_3)
{} 1
clear
# Apply fraction function to histogram with both positive, negative and zero buckets.
load 10m
histogram_fraction_4 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
@ -814,7 +846,7 @@ load 30s
some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}}
# Test the case where we only have two points for rate
eval_warn instant at 30s rate(some_metric[30s])
eval_warn instant at 30s rate(some_metric[1m])
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
# Test the case where we have more than two points for rate
@ -836,11 +868,11 @@ eval_warn instant at 1m30s rate(some_metric[1m])
# Should produce no results.
# Start with custom, end with exponential.
eval_warn instant at 1m rate(some_metric[30s])
eval_warn instant at 1m rate(some_metric[1m])
# Should produce no results.
# Start with exponential, end with custom.
eval_warn instant at 30s rate(some_metric[30s])
eval_warn instant at 30s rate(some_metric[1m])
# Should produce no results.
clear
@ -975,8 +1007,8 @@ clear
load 1m
histogram_sum_over_time {{schema:0 count:25 sum:1234.5 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:0 count:41 sum:1111.1 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:1 count:0}}
eval instant at 3m sum_over_time(histogram_sum_over_time[3m:1m])
eval instant at 3m sum_over_time(histogram_sum_over_time[4m:1m])
{} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
eval instant at 3m avg_over_time(histogram_sum_over_time[3m:1m])
eval instant at 3m avg_over_time(histogram_sum_over_time[4m:1m])
{} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}}

View file

@ -113,7 +113,7 @@ eval instant at 50m http_requests{job="api-server", group="canary"}
http_requests{group="canary", instance="0", job="api-server"} 300
http_requests{group="canary", instance="1", job="api-server"} 400
eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[5m]) * 5 * 60
eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[10m]) * 5 * 60
{group="canary", instance="0", job="api-server"} 330
{group="canary", instance="1", job="api-server"} 440
@ -308,65 +308,65 @@ load 5m
threshold{instance="abc",job="node",target="a@b.com"} 0
# Copy machine role to node variable.
eval instant at 5m node_role * on (instance) group_right (role) node_var
eval instant at 1m node_role * on (instance) group_right (role) node_var
{instance="abc",job="node",role="prometheus"} 2
eval instant at 5m node_var * on (instance) group_left (role) node_role
eval instant at 1m node_var * on (instance) group_left (role) node_role
{instance="abc",job="node",role="prometheus"} 2
eval instant at 5m node_var * ignoring (role) group_left (role) node_role
eval instant at 1m node_var * ignoring (role) group_left (role) node_role
{instance="abc",job="node",role="prometheus"} 2
eval instant at 5m node_role * ignoring (role) group_right (role) node_var
eval instant at 1m node_role * ignoring (role) group_right (role) node_var
{instance="abc",job="node",role="prometheus"} 2
# Copy machine role to node variable with instrumentation labels.
eval instant at 5m node_cpu * ignoring (role, mode) group_left (role) node_role
eval instant at 1m node_cpu * ignoring (role, mode) group_left (role) node_role
{instance="abc",job="node",mode="idle",role="prometheus"} 3
{instance="abc",job="node",mode="user",role="prometheus"} 1
eval instant at 5m node_cpu * on (instance) group_left (role) node_role
eval instant at 1m node_cpu * on (instance) group_left (role) node_role
{instance="abc",job="node",mode="idle",role="prometheus"} 3
{instance="abc",job="node",mode="user",role="prometheus"} 1
# Ratio of total.
eval instant at 5m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu)
eval instant at 1m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu)
{instance="abc",job="node",mode="idle"} .75
{instance="abc",job="node",mode="user"} .25
{instance="def",job="node",mode="idle"} .80
{instance="def",job="node",mode="user"} .20
eval instant at 5m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)
eval instant at 1m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)
{job="node",mode="idle"} 0.7857142857142857
{job="node",mode="user"} 0.21428571428571427
eval instant at 5m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu))
eval instant at 1m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu))
{} 1.0
eval instant at 5m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu)
eval instant at 1m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu)
{instance="abc",job="node",mode="idle"} .75
{instance="abc",job="node",mode="user"} .25
{instance="def",job="node",mode="idle"} .80
{instance="def",job="node",mode="user"} .20
eval instant at 5m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu)
eval instant at 1m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu)
{instance="abc",job="node",mode="idle"} .75
{instance="abc",job="node",mode="user"} .25
{instance="def",job="node",mode="idle"} .80
{instance="def",job="node",mode="user"} .20
eval instant at 5m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)
eval instant at 1m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)
{job="node",mode="idle"} 0.7857142857142857
{job="node",mode="user"} 0.21428571428571427
eval instant at 5m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu))
eval instant at 1m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu))
{} 1.0
# Copy over label from metric with no matching labels, without having to list cross-job target labels ('job' here).
eval instant at 5m node_cpu + on(dummy) group_left(foo) random*0
eval instant at 1m node_cpu + on(dummy) group_left(foo) random*0
{instance="abc",job="node",mode="idle",foo="bar"} 3
{instance="abc",job="node",mode="user",foo="bar"} 1
{instance="def",job="node",mode="idle",foo="bar"} 8
@ -374,12 +374,12 @@ eval instant at 5m node_cpu + on(dummy) group_left(foo) random*0
# Use threshold from metric, and copy over target.
eval instant at 5m node_cpu > on(job, instance) group_left(target) threshold
eval instant at 1m node_cpu > on(job, instance) group_left(target) threshold
node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3
node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1
# Use threshold from metric, and a default (1) if it's not present.
eval instant at 5m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1))
eval instant at 1m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1))
node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3
node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1
node_cpu{instance="def",job="node",mode="idle"} 8
@ -387,37 +387,37 @@ eval instant at 5m node_cpu > on(job, instance) group_left(target) (threshold or
# Check that binops drop the metric name.
eval instant at 5m node_cpu + 2
eval instant at 1m node_cpu + 2
{instance="abc",job="node",mode="idle"} 5
{instance="abc",job="node",mode="user"} 3
{instance="def",job="node",mode="idle"} 10
{instance="def",job="node",mode="user"} 4
eval instant at 5m node_cpu - 2
eval instant at 1m node_cpu - 2
{instance="abc",job="node",mode="idle"} 1
{instance="abc",job="node",mode="user"} -1
{instance="def",job="node",mode="idle"} 6
{instance="def",job="node",mode="user"} 0
eval instant at 5m node_cpu / 2
eval instant at 1m node_cpu / 2
{instance="abc",job="node",mode="idle"} 1.5
{instance="abc",job="node",mode="user"} 0.5
{instance="def",job="node",mode="idle"} 4
{instance="def",job="node",mode="user"} 1
eval instant at 5m node_cpu * 2
eval instant at 1m node_cpu * 2
{instance="abc",job="node",mode="idle"} 6
{instance="abc",job="node",mode="user"} 2
{instance="def",job="node",mode="idle"} 16
{instance="def",job="node",mode="user"} 4
eval instant at 5m node_cpu ^ 2
eval instant at 1m node_cpu ^ 2
{instance="abc",job="node",mode="idle"} 9
{instance="abc",job="node",mode="user"} 1
{instance="def",job="node",mode="idle"} 64
{instance="def",job="node",mode="user"} 4
eval instant at 5m node_cpu % 2
eval instant at 1m node_cpu % 2
{instance="abc",job="node",mode="idle"} 1
{instance="abc",job="node",mode="user"} 1
{instance="def",job="node",mode="idle"} 0
@ -432,14 +432,14 @@ load 5m
metricB{baz="meh"} 4
# On with no labels, for metrics with no common labels.
eval instant at 5m random + on() metricA
eval instant at 1m random + on() metricA
{} 5
# Ignoring with no labels is the same as no ignoring.
eval instant at 5m metricA + ignoring() metricB
eval instant at 1m metricA + ignoring() metricB
{baz="meh"} 7
eval instant at 5m metricA + metricB
eval instant at 1m metricA + metricB
{baz="meh"} 7
clear
@ -457,16 +457,16 @@ load 5m
test_total{instance="localhost"} 50
test_smaller{instance="localhost"} 10
eval instant at 5m test_total > bool test_smaller
eval instant at 1m test_total > bool test_smaller
{instance="localhost"} 1
eval instant at 5m test_total > test_smaller
eval instant at 1m test_total > test_smaller
test_total{instance="localhost"} 50
eval instant at 5m test_total < bool test_smaller
eval instant at 1m test_total < bool test_smaller
{instance="localhost"} 0
eval instant at 5m test_total < test_smaller
eval instant at 1m test_total < test_smaller
clear
@ -476,14 +476,14 @@ load 5m
trigx{} 20
trigNaN{} NaN
eval instant at 5m trigy atan2 trigx
eval instant at 1m trigy atan2 trigx
{} 0.4636476090008061
eval instant at 5m trigy atan2 trigNaN
eval instant at 1m trigy atan2 trigNaN
{} NaN
eval instant at 5m 10 atan2 20
eval instant at 1m 10 atan2 20
0.4636476090008061
eval instant at 5m 10 atan2 NaN
eval instant at 1m 10 atan2 NaN
NaN

View file

@ -1,18 +1,18 @@
# sum_over_time with all values
load 30s
load 15s
bar 0 1 10 100 1000
eval range from 0 to 2m step 1m sum_over_time(bar[30s])
eval range from 0 to 1m step 30s sum_over_time(bar[30s])
{} 0 11 1100
clear
# sum_over_time with trailing values
load 30s
load 15s
bar 0 1 10 100 1000 0 0 0 0
eval range from 0 to 2m step 1m sum_over_time(bar[30s])
{} 0 11 1100
{} 0 1100 0
clear
@ -21,15 +21,15 @@ load 30s
bar 0 1 10 100 1000 10000 100000 1000000 10000000
eval range from 0 to 4m step 1m sum_over_time(bar[30s])
{} 0 11 1100 110000 11000000
{} 0 10 1000 100000 10000000
clear
# sum_over_time with all values random
load 30s
load 15s
bar 5 17 42 2 7 905 51
eval range from 0 to 3m step 1m sum_over_time(bar[30s])
eval range from 0 to 90s step 30s sum_over_time(bar[30s])
{} 5 59 9 956
clear

View file

@ -14,10 +14,10 @@ eval instant at 40s metric
{__name__="metric"} 2
# It goes stale 5 minutes after the last sample.
eval instant at 330s metric
eval instant at 329s metric
{__name__="metric"} 2
eval instant at 331s metric
eval instant at 330s metric
# Range vector ignores stale sample.
@ -30,9 +30,13 @@ eval instant at 10s count_over_time(metric[1s])
eval instant at 20s count_over_time(metric[1s])
eval instant at 20s count_over_time(metric[10s])
eval instant at 20s count_over_time(metric[20s])
{} 1
eval instant at 20s count_over_time(metric[10])
eval instant at 20s count_over_time(metric[20])
{} 1
@ -48,7 +52,7 @@ eval instant at 0s metric
eval instant at 150s metric
{__name__="metric"} 0
eval instant at 300s metric
eval instant at 299s metric
{__name__="metric"} 0
eval instant at 301s metric
eval instant at 300s metric

View file

@ -10,18 +10,18 @@ eval instant at 10s sum_over_time(metric[50s:5s])
# Every evaluation yields the last value, i.e. 2
eval instant at 5m sum_over_time(metric[50s:10s])
{} 12
{} 10
# Series becomes stale at 5m10s (5m after last sample)
# Hence subquery gets a single sample at 6m-50s=5m10s.
eval instant at 6m sum_over_time(metric[50s:10s])
# Hence subquery gets a single sample at 5m10s.
eval instant at 5m59s sum_over_time(metric[60s:10s])
{} 2
eval instant at 10s rate(metric[20s:10s])
{} 0.1
eval instant at 20s rate(metric[20s:5s])
{} 0.05
{} 0.06666666666666667
clear
@ -49,16 +49,16 @@ load 10s
metric3 0+3x1000
eval instant at 1000s sum_over_time(metric1[30s:10s])
{} 394
{} 297
# This is (394*2 - 100), because other than the last 100 at 1000s,
# This is (97 + 98*2 + 99*2 + 100), because other than 97@975s and 100@1000s,
# everything else is repeated with the 5s step.
eval instant at 1000s sum_over_time(metric1[30s:5s])
{} 688
{} 591
# Offset is aligned with the step.
# Offset is aligned with the step, so this is from [98@980s, 99@990s, 100@1000s].
eval instant at 1010s sum_over_time(metric1[30s:10s] offset 10s)
{} 394
{} 297
# Same result for different offsets due to step alignment.
eval instant at 1010s sum_over_time(metric1[30s:10s] offset 9s)
@ -93,16 +93,16 @@ eval instant at 1010s sum_over_time((metric1)[30:10] offset 3)
# Nested subqueries
eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s])
{} 0.4
{} 0.30000000000000004
eval instant at 1000s rate(sum_over_time(metric2[30s:10s])[50s:10s])
{} 0.8
{} 0.6000000000000001
eval instant at 1000s rate(sum_over_time(metric3[30s:10s])[50s:10s])
{} 1.2
{} 0.9
eval instant at 1000s rate(sum_over_time((metric1+metric2+metric3)[30s:10s])[30s:10s])
{} 2.4
{} 1.8
clear
@ -115,16 +115,20 @@ load 7s
eval instant at 80s rate(metric[1m])
{} 2.517857143
# No extrapolation, [2@20, 144@80]: (144 - 2) / 60
eval instant at 80s rate(metric[1m:10s])
{} 2.366666667
# Extrapolated to range start for counter, [2@20, 144@80]: (144 - 2) / (80 - 20)
eval instant at 80s rate(metric[1m500ms:10s])
{} 2.3666666666666667
# Extrapolated to zero value for counter, [2@20, 144@80]: (144 - 0) / 61
eval instant at 80s rate(metric[1m1s:10s])
{} 2.360655737704918
# Only one value between 10s and 20s, 2@14
eval instant at 20s min_over_time(metric[10s])
{} 2
# min(1@10, 2@20)
eval instant at 20s min_over_time(metric[10s:10s])
# min(2@20)
eval instant at 20s min_over_time(metric[15s:10s])
{} 1
eval instant at 20m min_over_time(rate(metric[5m])[20m:1m])

View file

@ -5,92 +5,92 @@ load 5m
trig{l="y"} 20
trig{l="NaN"} NaN
eval instant at 5m sin(trig)
eval instant at 1m sin(trig)
{l="x"} -0.5440211108893699
{l="y"} 0.9129452507276277
{l="NaN"} NaN
eval instant at 5m cos(trig)
eval instant at 1m cos(trig)
{l="x"} -0.8390715290764524
{l="y"} 0.40808206181339196
{l="NaN"} NaN
eval instant at 5m tan(trig)
eval instant at 1m tan(trig)
{l="x"} 0.6483608274590867
{l="y"} 2.2371609442247427
{l="NaN"} NaN
eval instant at 5m asin(trig - 10.1)
eval instant at 1m asin(trig - 10.1)
{l="x"} -0.10016742116155944
{l="y"} NaN
{l="NaN"} NaN
eval instant at 5m acos(trig - 10.1)
eval instant at 1m acos(trig - 10.1)
{l="x"} 1.670963747956456
{l="y"} NaN
{l="NaN"} NaN
eval instant at 5m atan(trig)
eval instant at 1m atan(trig)
{l="x"} 1.4711276743037345
{l="y"} 1.5208379310729538
{l="NaN"} NaN
eval instant at 5m sinh(trig)
eval instant at 1m sinh(trig)
{l="x"} 11013.232920103324
{l="y"} 2.4258259770489514e+08
{l="NaN"} NaN
eval instant at 5m cosh(trig)
eval instant at 1m cosh(trig)
{l="x"} 11013.232920103324
{l="y"} 2.4258259770489514e+08
{l="NaN"} NaN
eval instant at 5m tanh(trig)
eval instant at 1m tanh(trig)
{l="x"} 0.9999999958776927
{l="y"} 1
{l="NaN"} NaN
eval instant at 5m asinh(trig)
eval instant at 1m asinh(trig)
{l="x"} 2.99822295029797
{l="y"} 3.6895038689889055
{l="NaN"} NaN
eval instant at 5m acosh(trig)
eval instant at 1m acosh(trig)
{l="x"} 2.993222846126381
{l="y"} 3.6882538673612966
{l="NaN"} NaN
eval instant at 5m atanh(trig - 10.1)
eval instant at 1m atanh(trig - 10.1)
{l="x"} -0.10033534773107522
{l="y"} NaN
{l="NaN"} NaN
eval instant at 5m rad(trig)
eval instant at 1m rad(trig)
{l="x"} 0.17453292519943295
{l="y"} 0.3490658503988659
{l="NaN"} NaN
eval instant at 5m rad(trig - 10)
eval instant at 1m rad(trig - 10)
{l="x"} 0
{l="y"} 0.17453292519943295
{l="NaN"} NaN
eval instant at 5m rad(trig - 20)
eval instant at 1m rad(trig - 20)
{l="x"} -0.17453292519943295
{l="y"} 0
{l="NaN"} NaN
eval instant at 5m deg(trig)
eval instant at 1m deg(trig)
{l="x"} 572.9577951308232
{l="y"} 1145.9155902616465
{l="NaN"} NaN
eval instant at 5m deg(trig - 10)
eval instant at 1m deg(trig - 10)
{l="x"} 0
{l="y"} 572.9577951308232
{l="NaN"} NaN
eval instant at 5m deg(trig - 20)
eval instant at 1m deg(trig - 20)
{l="x"} -572.9577951308232
{l="y"} 0
{l="NaN"} NaN

View file

@ -45,6 +45,11 @@ import (
"github.com/prometheus/prometheus/util/testutil"
)
func init() {
// This can be removed when the default validation scheme in common is updated.
model.NameValidationScheme = model.UTF8Validation
}
func TestPopulateLabels(t *testing.T) {
cases := []struct {
in labels.Labels

View file

@ -329,9 +329,9 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
mrc = sp.config.MetricRelabelConfigs
)
validationScheme := model.LegacyValidation
if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig {
validationScheme = model.UTF8Validation
validationScheme := model.UTF8Validation
if sp.config.MetricNameValidationScheme == config.LegacyValidationConfig {
validationScheme = model.LegacyValidation
}
sp.targetMtx.Lock()
@ -485,9 +485,9 @@ func (sp *scrapePool) sync(targets []*Target) {
scrapeClassicHistograms = sp.config.ScrapeClassicHistograms
)
validationScheme := model.LegacyValidation
if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig {
validationScheme = model.UTF8Validation
validationScheme := model.UTF8Validation
if sp.config.MetricNameValidationScheme == config.LegacyValidationConfig {
validationScheme = model.LegacyValidation
}
sp.targetMtx.Lock()

View file

@ -1044,6 +1044,7 @@ func TestScrapeLoopSeriesAdded(t *testing.T) {
}
func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) {
model.NameValidationScheme = model.LegacyValidation
s := teststorage.New(t)
defer s.Close()
ctx, cancel := context.WithCancel(context.Background())
@ -3812,6 +3813,7 @@ func testNativeHistogramMaxSchemaSet(t *testing.T, minBucketFactor string, expec
// Create a scrape loop with the HTTP server as the target.
configStr := fmt.Sprintf(`
global:
metric_name_validation_scheme: legacy
scrape_interval: 1s
scrape_timeout: 1s
scrape_configs:
@ -3883,6 +3885,164 @@ scrape_configs:
}
}
func TestTargetScrapeConfigWithLabels(t *testing.T) {
const (
configTimeout = 1500 * time.Millisecond
expectedTimeout = "1.5"
expectedTimeoutLabel = "1s500ms"
secondTimeout = 500 * time.Millisecond
secondTimeoutLabel = "500ms"
expectedParam = "value1"
secondParam = "value2"
expectedPath = "/metric-ok"
secondPath = "/metric-nok"
httpScheme = "http"
paramLabel = "__param_param"
jobName = "test"
)
createTestServer := func(t *testing.T, done chan struct{}) *url.URL {
server := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer close(done)
require.Equal(t, expectedTimeout, r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds"))
require.Equal(t, expectedParam, r.URL.Query().Get("param"))
require.Equal(t, expectedPath, r.URL.Path)
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
w.Write([]byte("metric_a 1\nmetric_b 2\n"))
}),
)
t.Cleanup(server.Close)
serverURL, err := url.Parse(server.URL)
require.NoError(t, err)
return serverURL
}
run := func(t *testing.T, cfg *config.ScrapeConfig, targets []*targetgroup.Group) chan struct{} {
done := make(chan struct{})
srvURL := createTestServer(t, done)
// Update target addresses to use the dynamically created server URL.
for _, target := range targets {
for i := range target.Targets {
target.Targets[i][model.AddressLabel] = model.LabelValue(srvURL.Host)
}
}
sp, err := newScrapePool(cfg, &nopAppendable{}, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
require.NoError(t, err)
t.Cleanup(sp.stop)
sp.Sync(targets)
return done
}
cases := []struct {
name string
cfg *config.ScrapeConfig
targets []*targetgroup.Group
}{
{
name: "Everything in scrape config",
cfg: &config.ScrapeConfig{
ScrapeInterval: model.Duration(2 * time.Second),
ScrapeTimeout: model.Duration(configTimeout),
Params: url.Values{"param": []string{expectedParam}},
JobName: jobName,
Scheme: httpScheme,
MetricsPath: expectedPath,
},
targets: []*targetgroup.Group{
{
Targets: []model.LabelSet{
{model.AddressLabel: model.LabelValue("")},
},
},
},
},
{
name: "Overridden in target",
cfg: &config.ScrapeConfig{
ScrapeInterval: model.Duration(2 * time.Second),
ScrapeTimeout: model.Duration(secondTimeout),
JobName: jobName,
Scheme: httpScheme,
MetricsPath: secondPath,
Params: url.Values{"param": []string{secondParam}},
},
targets: []*targetgroup.Group{
{
Targets: []model.LabelSet{
{
model.AddressLabel: model.LabelValue(""),
model.ScrapeTimeoutLabel: expectedTimeoutLabel,
model.MetricsPathLabel: expectedPath,
paramLabel: expectedParam,
},
},
},
},
},
{
name: "Overridden in relabel_config",
cfg: &config.ScrapeConfig{
ScrapeInterval: model.Duration(2 * time.Second),
ScrapeTimeout: model.Duration(secondTimeout),
JobName: jobName,
Scheme: httpScheme,
MetricsPath: secondPath,
Params: url.Values{"param": []string{secondParam}},
RelabelConfigs: []*relabel.Config{
{
Action: relabel.DefaultRelabelConfig.Action,
Regex: relabel.DefaultRelabelConfig.Regex,
SourceLabels: relabel.DefaultRelabelConfig.SourceLabels,
TargetLabel: model.ScrapeTimeoutLabel,
Replacement: expectedTimeoutLabel,
},
{
Action: relabel.DefaultRelabelConfig.Action,
Regex: relabel.DefaultRelabelConfig.Regex,
SourceLabels: relabel.DefaultRelabelConfig.SourceLabels,
TargetLabel: paramLabel,
Replacement: expectedParam,
},
{
Action: relabel.DefaultRelabelConfig.Action,
Regex: relabel.DefaultRelabelConfig.Regex,
SourceLabels: relabel.DefaultRelabelConfig.SourceLabels,
TargetLabel: model.MetricsPathLabel,
Replacement: expectedPath,
},
},
},
targets: []*targetgroup.Group{
{
Targets: []model.LabelSet{
{
model.AddressLabel: model.LabelValue(""),
model.ScrapeTimeoutLabel: secondTimeoutLabel,
model.MetricsPathLabel: secondPath,
paramLabel: secondParam,
},
},
},
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
select {
case <-run(t, c.cfg, c.targets):
case <-time.After(10 * time.Second):
t.Fatal("timeout after 10 seconds")
}
})
}
}
func newScrapableServer(scrapeText string) (s *httptest.Server, scrapedTwice chan bool) {
var scrapes int
scrapedTwice = make(chan bool)

View file

@ -441,8 +441,8 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort
}
// Encode scrape query parameters as labels.
for k, v := range cfg.Params {
if len(v) > 0 {
lb.Set(model.ParamLabelPrefix+k, v[0])
if name := model.ParamLabelPrefix + k; len(v) > 0 && lb.Get(name) == "" {
lb.Set(name, v[0])
}
}

24
scripts/get_module_version.sh Executable file
View file

@ -0,0 +1,24 @@
#!/usr/bin/env bash
# if no version string is passed as an argument, read VERSION file
if [ $# -eq 0 ]; then
VERSION="$(< VERSION)"
else
VERSION=$1
fi
# Remove leading 'v' if present
VERSION="${VERSION#v}"
# Extract MAJOR, MINOR, and REST
MAJOR="${VERSION%%.*}"
MINOR="${VERSION#*.}"; MINOR="${MINOR%%.*}"
REST="${VERSION#*.*.}"
# Format and output based on MAJOR version
if [[ "$MAJOR" == "2" ]]; then
echo "0.$MINOR.$REST"
elif [[ "$MAJOR" == "3" ]]; then
printf "0.3%02d.$REST\n" "$MINOR"
fi

View file

@ -21,6 +21,7 @@ then
fi
buildOrder=(lezer-promql codemirror-promql)
assetsDir="./static"
function buildModule() {
for module in "${buildOrder[@]}"; do
@ -32,15 +33,17 @@ function buildModule() {
function buildReactApp() {
echo "build react-app"
(cd react-app && npm run build)
rm -rf ./static/react-app
mv ./react-app/build ./static/react-app
mkdir -p ${assetsDir}
rm -rf ${assetsDir}/react-app
mv ./react-app/build ${assetsDir}/react-app
}
function buildMantineUI() {
echo "build mantine-ui"
npm run build -w @prometheus-io/mantine-ui
rm -rf ./static/mantine-ui
mv ./mantine-ui/dist ./static/mantine-ui
mkdir -p ${assetsDir}
rm -rf ${assetsDir}/mantine-ui
mv ./mantine-ui/dist ${assetsDir}/mantine-ui
}
for i in "$@"; do

View file

@ -1,7 +1,7 @@
{
"name": "@prometheus-io/mantine-ui",
"private": true,
"version": "0.54.1",
"version": "0.300.0-beta.0",
"type": "module",
"scripts": {
"start": "vite",
@ -27,7 +27,7 @@
"@mantine/notifications": "^7.11.2",
"@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1",
"@prometheus-io/codemirror-promql": "^0.54.1",
"@prometheus-io/codemirror-promql": "0.300.0-beta.0",
"@reduxjs/toolkit": "^2.2.1",
"@tabler/icons-react": "^2.47.0",
"@tanstack/react-query": "^5.22.2",

View file

@ -1,6 +1,6 @@
{
"name": "@prometheus-io/codemirror-promql",
"version": "0.54.1",
"version": "0.300.0-beta.0",
"description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js",
@ -29,7 +29,7 @@
},
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": {
"@prometheus-io/lezer-promql": "0.54.1",
"@prometheus-io/lezer-promql": "0.300.0-beta.0",
"lru-cache": "^7.18.3"
},
"devDependencies": {

View file

@ -1,6 +1,6 @@
{
"name": "@prometheus-io/lezer-promql",
"version": "0.54.1",
"version": "0.300.0-beta.0",
"description": "lezer-based PromQL grammar",
"main": "dist/index.cjs",
"type": "module",

View file

@ -1,12 +1,12 @@
{
"name": "prometheus-io",
"version": "0.54.1",
"version": "0.300.0-beta.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "prometheus-io",
"version": "0.54.1",
"version": "0.300.0-beta.0",
"workspaces": [
"mantine-ui",
"module/*"
@ -24,7 +24,7 @@
},
"mantine-ui": {
"name": "@prometheus-io/mantine-ui",
"version": "0.54.1",
"version": "0.300.0-beta.0",
"dependencies": {
"@codemirror/autocomplete": "^6.18.0",
"@codemirror/language": "^6.10.2",
@ -41,7 +41,7 @@
"@mantine/notifications": "^7.11.2",
"@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1",
"@prometheus-io/codemirror-promql": "^0.54.1",
"@prometheus-io/codemirror-promql": "0.300.0-beta.0",
"@reduxjs/toolkit": "^2.2.1",
"@tabler/icons-react": "^2.47.0",
"@tanstack/react-query": "^5.22.2",
@ -159,10 +159,10 @@
},
"module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql",
"version": "0.54.1",
"version": "0.300.0-beta.0",
"license": "Apache-2.0",
"dependencies": {
"@prometheus-io/lezer-promql": "0.54.1",
"@prometheus-io/lezer-promql": "0.300.0-beta.0",
"lru-cache": "^7.18.3"
},
"devDependencies": {
@ -192,7 +192,7 @@
},
"module/lezer-promql": {
"name": "@prometheus-io/lezer-promql",
"version": "0.54.1",
"version": "0.300.0-beta.0",
"license": "Apache-2.0",
"devDependencies": {
"@lezer/generator": "^1.7.1",

View file

@ -1,7 +1,7 @@
{
"name": "prometheus-io",
"description": "Monorepo for the Prometheus UI",
"version": "0.54.1",
"version": "0.300.0-beta.0",
"private": true,
"scripts": {
"build": "bash build_ui.sh --all",

View file

@ -26,7 +26,6 @@ const sampleFlagsResponse = {
'storage.tsdb.min-block-duration': '2h',
'storage.tsdb.no-lockfile': 'false',
'storage.tsdb.path': 'data/',
'storage.tsdb.retention': '0s',
'storage.tsdb.retention.size': '0B',
'storage.tsdb.retention.time': '0s',
'storage.tsdb.wal-compression': 'false',

View file

@ -692,32 +692,6 @@ exports[`Flags should match snapshot 1`] = `
/>
</td>
</tr>
<tr
key="--storage.tsdb.retention"
>
<td
className="flag-item"
>
<span
dangerouslySetInnerHTML={
Object {
"__html": "--storage.tsdb.retention",
}
}
/>
</td>
<td
className="flag-value"
>
<span
dangerouslySetInnerHTML={
Object {
"__html": "0s",
}
}
/>
</td>
</tr>
<tr
key="--storage.tsdb.retention.size"
>

View file

@ -1,190 +0,0 @@
.navbar {
margin-bottom: 20px;
}
div.prom_lhs_menu {
float: left;
margin-right: 2ex;
background: #222;
min-height: 100%;
overflow: auto;
width: auto;
margin-left: -40px;
margin-top: 55px;
height: 100%;
}
.prom_lhs_menu_nav {
padding-right: 0;
}
.prom_lhs_menu ul {
list-style: none;
padding-left: .5ex;
margin-left: .5ex;
}
.prom_lhs_menu li {
padding-right: 1ex;
padding-left: 1ex;
font-size: 0.9rem;
}
.prom_lhs_menu a,
.prom_lhs_menu li {
color: #999999;
display: block;
}
.prom_lhs_menu a {
text-decoration: none;
padding-top: 0;
padding-bottom: 0;
}
.prom_lhs_menu_selected a {
pointer-events: none;
cursor: default;
}
li.prom_lhs_menu_selected {
background: #555555;
background-clip: padding-box;
}
.prom_lhs_menu li:hover {
background: #666666;
}
.prom_console_rhs {
float: right;
margin-left: 1ex;
height: 100%;
padding-top: 55px;
}
.prom_console_rhs table {
margin-top: 1ex;
margin-bottom: 32px; /* Space for time control. */
}
.prom_console_rhs th {
text-align: center;
}
.prom_console_rhs td:nth-child(2) {
text-align: right;
}
.prom_console_content {
overflow: visible;
margin-bottom: 32px; /* Space for time control. */
padding-top: 70px; /* Space for navbar */
}
a.prom_query_drilldown {
text-decoration: none;
color: black;
}
a.prom_query_drilldown:hover, a.prom_query_drilldown:active {
text-decoration: underline;
}
#samplesGraph {
width: 100%;
}
.prom_content_div {
width: 80%;
}
.prom_graph_table {
}
.rickshaw_legend {
padding: 2px;
margin-top: 1px;
}
.rickshaw_legend li {
min-width: 0;
}
.rickshaw_legend ul li {
list-style-type: none;
display: inline-block;
}
.rickshaw_graph {
width: 100%;
padding: 0;
}
.prom_graph_hover_flipped.x_label {
right: 0;
}
.prom_graph_hover_flipped.item {
right: 10px;
}
.rickshaw_graph .detail .prom_graph_hover_flipped.item:before {
content: "\25b8";
left: auto;
right: 1px;
font-size: 0.8em;
}
.prom_graph_ytitle {
-webkit-transform: rotate(-90deg);
-moz-transform: rotate(-90deg);
font-size: 11px;
font-family: Arial;
max-width: 13px;
white-space: nowrap;
}
.prom_graph_xtitle {
text-align: center;
font-size: 11px;
font-family: Arial;
}
.prom_graph_loading {
position: absolute;
top: 0px;
left: 0px;
}
.prom_graph_error {
font-family: Arial;
text-align: center;
}
a.prom_graph_link {
text-decoration: none;
color: black;
}
.prom_graph_timecontrol {
background: #222;
position: fixed;
padding: 2px;
bottom: 0;
left: 0;
z-index: 2;
width: 100%;
}
.prom_graph_timecontrol_inner {
text-align: center;
height: 31px;
}
.prom_graph_timecontrol_group {
display: inline-block;
margin: 1px 5px;
}
.prom_graph_timecontrol_group .btn {
font-size: 0.8em;
border-radius: 0;
height: 29px;
border-color: #ccc;
}
.prom_graph_timecontrol_group .input {
height: 29px;
padding: 6px 12px;
font-size: 14px;
line-height: 1.42857143;
color: #555;
background-color: #fff;
background-image: none;
border: 1px solid #ccc;
text-align: center;
}
.prom_graph_timecontrol_group .dropdown-menu {
min-width: 90px;
}
.prom_graph_timecontrol_group .dropdown-menu li {
text-align: center;
}

View file

@ -1,687 +0,0 @@
/*
* Functions to make it easier to write prometheus consoles, such
* as graphs.
*
*/
PromConsole = {};
PromConsole.NumberFormatter = {};
PromConsole.NumberFormatter.prefixesBig = ["k", "M", "G", "T", "P", "E", "Z", "Y"];
PromConsole.NumberFormatter.prefixesBig1024 = ["ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"];
PromConsole.NumberFormatter.prefixesSmall = ["m", "u", "n", "p", "f", "a", "z", "y"];
PromConsole._stripTrailingZero = function(x) {
if (x.indexOf("e") == -1) {
// It's not safe to strip if it's scientific notation.
return x.replace(/\.?0*$/, '');
}
return x;
};
// Humanize a number.
PromConsole.NumberFormatter.humanize = function(x) {
if (x === null) {
return "null";
}
var ret = PromConsole.NumberFormatter._humanize(
x, PromConsole.NumberFormatter.prefixesBig,
PromConsole.NumberFormatter.prefixesSmall, 1000);
x = ret[0];
var prefix = ret[1];
if (Math.abs(x) < 1) {
return x.toExponential(3) + prefix;
}
return PromConsole._stripTrailingZero(x.toFixed(3)) + prefix;
};
// Humanize a number, don't use milli/micro/etc. prefixes.
PromConsole.NumberFormatter.humanizeNoSmallPrefix = function(x) {
if (x === null) {
return "null";
}
if (Math.abs(x) < 1) {
return PromConsole._stripTrailingZero(x.toPrecision(3));
}
var ret = PromConsole.NumberFormatter._humanize(
x, PromConsole.NumberFormatter.prefixesBig,
[], 1000);
x = ret[0];
var prefix = ret[1];
return PromConsole._stripTrailingZero(x.toFixed(3)) + prefix;
};
// Humanize a number with 1024 as the base, rather than 1000.
PromConsole.NumberFormatter.humanize1024 = function(x) {
if (x === null) {
return "null";
}
var ret = PromConsole.NumberFormatter._humanize(
x, PromConsole.NumberFormatter.prefixesBig1024,
[], 1024);
x = ret[0];
var prefix = ret[1];
if (Math.abs(x) < 1) {
return x.toExponential(3) + prefix;
}
return PromConsole._stripTrailingZero(x.toFixed(3)) + prefix;
};
// Humanize a number, returning an exact representation.
PromConsole.NumberFormatter.humanizeExact = function(x) {
if (x === null) {
return "null";
}
var ret = PromConsole.NumberFormatter._humanize(
x, PromConsole.NumberFormatter.prefixesBig,
PromConsole.NumberFormatter.prefixesSmall, 1000);
return ret[0] + ret[1];
};
PromConsole.NumberFormatter._humanize = function(x, prefixesBig, prefixesSmall, factor) {
var prefix = "";
if (x === 0) {
/* Do nothing. */
} else if (Math.abs(x) >= 1) {
for (var i=0; i < prefixesBig.length && Math.abs(x) >= factor; ++i) {
x /= factor;
prefix = prefixesBig[i];
}
} else {
for (var i=0; i < prefixesSmall.length && Math.abs(x) < 1; ++i) {
x *= factor;
prefix = prefixesSmall[i];
}
}
return [x, prefix];
};
PromConsole.TimeControl = function() {
document.getElementById("prom_graph_duration_shrink").onclick = this.decreaseDuration.bind(this);
document.getElementById("prom_graph_duration_grow").onclick = this.increaseDuration.bind(this);
document.getElementById("prom_graph_time_back").onclick = this.decreaseEnd.bind(this);
document.getElementById("prom_graph_time_forward").onclick = this.increaseEnd.bind(this);
document.getElementById("prom_graph_refresh_button").onclick = this.refresh.bind(this);
this.durationElement = document.getElementById("prom_graph_duration");
this.endElement = document.getElementById("prom_graph_time_end");
this.durationElement.oninput = this.dispatch.bind(this);
this.endElement.oninput = this.dispatch.bind(this);
this.endElement.oninput = this.dispatch.bind(this);
this.refreshValueElement = document.getElementById("prom_graph_refresh_button_value");
var refreshList = document.getElementById("prom_graph_refresh_intervals");
var refreshIntervals = ["Off", "1m", "5m", "15m", "1h"];
for (var i=0; i < refreshIntervals.length; ++i) {
var li = document.createElement("li");
li.onclick = this.setRefresh.bind(this, refreshIntervals[i]);
li.textContent = refreshIntervals[i];
refreshList.appendChild(li);
}
this.durationElement.value = PromConsole.TimeControl.prototype.getHumanDuration(
PromConsole.TimeControl._initialValues.duration);
if (!PromConsole.TimeControl._initialValues.endTimeNow) {
this.endElement.value = PromConsole.TimeControl.prototype.getHumanDate(
new Date(PromConsole.TimeControl._initialValues.endTime * 1000));
}
};
PromConsole.TimeControl.timeFactors = {
"y": 60 * 60 * 24 * 365,
"w": 60 * 60 * 24 * 7,
"d": 60 * 60 * 24,
"h": 60 * 60,
"m": 60,
"s": 1
};
PromConsole.TimeControl.stepValues = [
"10s", "1m", "5m", "15m", "30m", "1h", "2h", "6h", "12h", "1d", "2d",
"1w", "2w", "4w", "8w", "1y", "2y"
];
PromConsole.TimeControl.prototype._setHash = function() {
var duration = this.parseDuration(this.durationElement.value);
var endTime = this.getEndDate() / 1000;
window.location.hash = "#pctc" + encodeURIComponent(JSON.stringify(
{duration: duration, endTime: endTime}));
};
PromConsole.TimeControl._initialValues = function() {
var hash = window.location.hash;
var values;
if (hash.indexOf('#pctc') === 0) {
values = JSON.parse(decodeURIComponent(hash.substring(5)));
} else {
values = {duration: 3600, endTime: 0};
}
if (values.endTime == 0) {
values.endTime = new Date().getTime() / 1000;
values.endTimeNow = true;
}
return values;
}();
PromConsole.TimeControl.prototype.parseDuration = function(durationText) {
var durationRE = new RegExp("^([0-9]+)([ywdhms]?)$");
var matches = durationText.match(durationRE);
if (!matches) { return 3600; }
var value = parseInt(matches[1]);
var unit = matches[2] || 's';
return value * PromConsole.TimeControl.timeFactors[unit];
};
PromConsole.TimeControl.prototype.getHumanDuration = function(duration) {
var units = [];
for (var key in PromConsole.TimeControl.timeFactors) {
units.push([PromConsole.TimeControl.timeFactors[key], key]);
}
units.sort(function(a, b) { return b[0] - a[0]; });
for (var i = 0; i < units.length; ++i) {
if (duration % units[i][0] === 0) {
return (duration / units[i][0]) + units[i][1];
}
}
return duration;
};
PromConsole.TimeControl.prototype.increaseDuration = function() {
var durationSeconds = this.parseDuration(this.durationElement.value);
for (var i = 0; i < PromConsole.TimeControl.stepValues.length; i++) {
if (durationSeconds < this.parseDuration(PromConsole.TimeControl.stepValues[i])) {
this.setDuration(PromConsole.TimeControl.stepValues[i]);
this.dispatch();
return;
}
}
};
PromConsole.TimeControl.prototype.decreaseDuration = function() {
var durationSeconds = this.parseDuration(this.durationElement.value);
for (var i = PromConsole.TimeControl.stepValues.length - 1; i >= 0; i--) {
if (durationSeconds > this.parseDuration(PromConsole.TimeControl.stepValues[i])) {
this.setDuration(PromConsole.TimeControl.stepValues[i]);
this.dispatch();
return;
}
}
};
PromConsole.TimeControl.prototype.setDuration = function(duration) {
this.durationElement.value = duration;
this._setHash();
};
PromConsole.TimeControl.prototype.getEndDate = function() {
if (this.endElement.value === '') {
return null;
}
var dateParts = this.endElement.value.split(/[- :]/)
return Date.UTC(dateParts[0], dateParts[1] - 1, dateParts[2], dateParts[3], dateParts[4]);
};
PromConsole.TimeControl.prototype.getOrSetEndDate = function() {
var date = this.getEndDate();
if (date) {
return date;
}
date = new Date();
this.setEndDate(date);
return date.getTime();
};
PromConsole.TimeControl.prototype.getHumanDate = function(date) {
var hours = date.getUTCHours() < 10 ? '0' + date.getUTCHours() : date.getUTCHours();
var minutes = date.getUTCMinutes() < 10 ? '0' + date.getUTCMinutes() : date.getUTCMinutes();
return date.getUTCFullYear() + "-" + (date.getUTCMonth()+1) + "-" + date.getUTCDate() + " " +
hours + ":" + minutes;
};
PromConsole.TimeControl.prototype.setEndDate = function(date) {
this.setRefresh("Off");
this.endElement.value = this.getHumanDate(date);
this._setHash();
};
PromConsole.TimeControl.prototype.increaseEnd = function() {
// Increase duration 25% range & convert ms to s.
this.setEndDate(new Date(this.getOrSetEndDate() + this.parseDuration(this.durationElement.value) * 1000/4 ));
this.dispatch();
};
PromConsole.TimeControl.prototype.decreaseEnd = function() {
this.setEndDate(new Date(this.getOrSetEndDate() - this.parseDuration(this.durationElement.value) * 1000/4 ));
this.dispatch();
};
PromConsole.TimeControl.prototype.refresh = function() {
this.endElement.value = '';
this._setHash();
this.dispatch();
};
PromConsole.TimeControl.prototype.dispatch = function() {
var durationSeconds = this.parseDuration(this.durationElement.value);
var end = this.getEndDate();
if (end === null) {
end = new Date().getTime();
}
for (var i = 0; i< PromConsole._graph_registry.length; i++) {
var graph = PromConsole._graph_registry[i];
graph.params.duration = durationSeconds;
graph.params.endTime = end / 1000;
graph.dispatch();
}
};
PromConsole.TimeControl.prototype._refreshInterval = null;
PromConsole.TimeControl.prototype.setRefresh = function(duration) {
if (this._refreshInterval !== null) {
window.clearInterval(this._refreshInterval);
this._refreshInterval = null;
}
if (duration != "Off") {
if (this.endElement.value !== '') {
this.refresh();
}
var durationSeconds = this.parseDuration(duration);
this._refreshInterval = window.setInterval(this.dispatch.bind(this), durationSeconds * 1000);
}
this.refreshValueElement.textContent = duration;
};
// List of all graphs, used by time controls.
PromConsole._graph_registry = [];
PromConsole.graphDefaults = {
expr: null, // Expression to graph. Can be a list of strings.
node: null, // DOM node to place graph under.
// How long the graph is over, in seconds.
duration: PromConsole.TimeControl._initialValues.duration,
// The unixtime the graph ends at.
endTime: PromConsole.TimeControl._initialValues.endTime,
width: null, // Height of the graph div, excluding titles and legends.
// Defaults to auto-detection.
height: 200, // Height of the graph div, excluding titles and legends.
min: "auto", // Minimum Y-axis value, defaults to lowest data value.
max: undefined, // Maximum Y-axis value, defaults to highest data value.
renderer: 'line', // Type of graphs, options are 'line' and 'area'.
name: null, // What to call plots, defaults to trying to do
// something reasonable.
// If a string, it'll use that. [[ label ]] will be substituted.
// If a function it'll be called with a map of keys to values,
// and should return the name to use.
// Can be a list of strings/functions, each element
// will be applied to the plots from the corresponding
// element of the expr list.
xTitle: "Time", // The title of the x axis.
yUnits: "", // The units of the y axis.
yTitle: "", // The title of the y axis.
// Number formatter for y axis.
yAxisFormatter: PromConsole.NumberFormatter.humanize,
// Number of ticks (horizontal lines) for the y axis
yAxisTicks: null,
// Number formatter for y values hover detail.
yHoverFormatter: PromConsole.NumberFormatter.humanizeExact,
// Color scheme to be used by the plots. Can be either a list of hex color
// codes or one of the color scheme names supported by Rickshaw.
colorScheme: null,
};
PromConsole.Graph = function(params) {
for (var k in PromConsole.graphDefaults) {
if (!(k in params)) {
params[k] = PromConsole.graphDefaults[k];
}
}
if (typeof params.expr == "string") {
params.expr = [params.expr];
}
if (typeof params.name == "string" || typeof params.name == "function") {
var name = [];
for (var i = 0; i < params.expr.length; i++) {
name.push(params.name);
}
params.name = name;
}
this.params = params;
this.rendered_data = null;
// Keep a reference so that further updates (e.g. annotations) can be made
// by the user in their templates.
this.rickshawGraph = null;
PromConsole._graph_registry.push(this);
/*
* Table layout:
* | yTitle | Graph |
* | | xTitle |
* | /graph | Legend |
*/
var table = document.createElement("table");
table.className = "prom_graph_table";
params.node.appendChild(table);
var tr = document.createElement("tr");
table.appendChild(tr);
var yTitleTd = document.createElement("td");
tr.appendChild(yTitleTd);
var yTitleDiv = document.createElement("td");
yTitleTd.appendChild(yTitleDiv);
yTitleDiv.className = "prom_graph_ytitle";
yTitleDiv.textContent = params.yTitle + (params.yUnits ? " (" + params.yUnits.trim() + ")" : "");
this.graphTd = document.createElement("td");
tr.appendChild(this.graphTd);
this.graphTd.className = "rickshaw_graph";
this.graphTd.width = params.width;
this.graphTd.height = params.height;
tr = document.createElement("tr");
table.appendChild(tr);
tr.appendChild(document.createElement("td"));
var xTitleTd = document.createElement("td");
tr.appendChild(xTitleTd);
xTitleTd.className = "prom_graph_xtitle";
xTitleTd.textContent = params.xTitle;
tr = document.createElement("tr");
table.appendChild(tr);
var graphLinkTd = document.createElement("td");
tr.appendChild(graphLinkTd);
var graphLinkA = document.createElement("a");
graphLinkTd.appendChild(graphLinkA);
graphLinkA.className = "prom_graph_link";
graphLinkA.textContent = "+";
graphLinkA.href = PromConsole._graphsToSlashGraphURL(params.expr);
var legendTd = document.createElement("td");
tr.appendChild(legendTd);
this.legendDiv = document.createElement("div");
legendTd.width = params.width;
legendTd.appendChild(this.legendDiv);
window.addEventListener('resize', function() {
if(this.rendered_data !== null) {
this._render(this.rendered_data);
}
}.bind(this));
this.dispatch();
};
PromConsole.Graph.prototype._parseValue = function(value) {
var val = parseFloat(value);
if (isNaN(val)) {
// "+Inf", "-Inf", "+Inf" will be parsed into NaN by parseFloat(). The
// can't be graphed, so show them as gaps (null).
return null;
}
return val;
};
PromConsole.Graph.prototype._escapeHTML = function(string) {
var entityMap = {
"&": "&amp;",
"<": "&lt;",
">": "&gt;",
'"': '&quot;',
"'": '&#39;',
"/": '&#x2F;'
};
return string.replace(/[&<>"'\/]/g, function (s) {
return entityMap[s];
});
};
PromConsole.Graph.prototype._render = function(data) {
var self = this;
var palette = new Rickshaw.Color.Palette({scheme: this.params.colorScheme});
var series = [];
// This will be used on resize.
this.rendered_data = data;
var nameFuncs = [];
if (this.params.name === null) {
var chooser = PromConsole._chooseNameFunction(data);
for (var i = 0; i < this.params.expr.length; i++) {
nameFuncs.push(chooser);
}
} else {
for (var i = 0; i < this.params.name.length; i++) {
if (typeof this.params.name[i] == "string") {
nameFuncs.push(function(i, metric) {
return PromConsole._interpolateName(this.params.name[i], metric);
}.bind(this, i));
} else {
nameFuncs.push(this.params.name[i]);
}
}
}
// Get the data into the right format.
var seriesLen = 0;
for (var e = 0; e < data.length; e++) {
for (var i = 0; i < data[e].data.result.length; i++) {
series[seriesLen] = {
data: data[e].data.result[i].values.map(function(s) { return {x: s[0], y: self._parseValue(s[1])}; }),
color: palette.color(),
name: self._escapeHTML(nameFuncs[e](data[e].data.result[i].metric)),
};
// Insert nulls for all missing steps.
var newSeries = [];
var pos = 0;
var start = self.params.endTime - self.params.duration;
var step = Math.floor(self.params.duration / this.graphTd.offsetWidth * 1000) / 1000;
for (var t = start; t <= self.params.endTime; t += step) {
// Allow for floating point inaccuracy.
if (series[seriesLen].data.length > pos && series[seriesLen].data[pos].x < t + step / 100) {
newSeries.push(series[seriesLen].data[pos]);
pos++;
} else {
newSeries.push({x: t, y: null});
}
}
series[seriesLen].data = newSeries;
seriesLen++;
}
}
this._clearGraph();
if (!series.length) {
var errorText = document.createElement("div");
errorText.className = 'prom_graph_error';
errorText.textContent = 'No timeseries returned';
this.graphTd.appendChild(errorText);
return;
}
// Render.
var graph = new Rickshaw.Graph({
interpolation: "linear",
width: this.graphTd.offsetWidth,
height: this.params.height,
element: this.graphTd,
renderer: this.params.renderer,
max: this.params.max,
min: this.params.min,
series: series
});
var hoverDetail = new Rickshaw.Graph.HoverDetail({
graph: graph,
onRender: function() {
var xLabel = this.element.getElementsByClassName("x_label")[0];
var item = this.element.getElementsByClassName("item")[0];
if (xLabel.offsetWidth + xLabel.offsetLeft + this.element.offsetLeft > graph.element.offsetWidth ||
item.offsetWidth + item.offsetLeft + this.element.offsetLeft > graph.element.offsetWidth) {
xLabel.classList.add("prom_graph_hover_flipped");
item.classList.add("prom_graph_hover_flipped");
} else {
xLabel.classList.remove("prom_graph_hover_flipped");
item.classList.remove("prom_graph_hover_flipped");
}
},
yFormatter: function(y) {
if (y === null) {
return "";
}
return this.params.yHoverFormatter(y) + this.params.yUnits;
}.bind(this)
});
var yAxis = new Rickshaw.Graph.Axis.Y({
graph: graph,
tickFormat: this.params.yAxisFormatter,
ticks: this.params.yAxisTicks
});
var xAxis = new Rickshaw.Graph.Axis.Time({
graph: graph,
});
var legend = new Rickshaw.Graph.Legend({
graph: graph,
element: this.legendDiv
});
xAxis.render();
yAxis.render();
graph.render();
this.rickshawGraph = graph;
};
PromConsole.Graph.prototype._clearGraph = function() {
while (this.graphTd.lastChild) {
this.graphTd.removeChild(this.graphTd.lastChild);
}
while (this.legendDiv.lastChild) {
this.legendDiv.removeChild(this.legendDiv.lastChild);
}
this.rickshawGraph = null;
};
PromConsole.Graph.prototype._xhrs = [];
PromConsole.Graph.prototype.buildQueryUrl = function(expr) {
var p = this.params;
return PATH_PREFIX + "/api/v1/query_range?query=" +
encodeURIComponent(expr) +
"&step=" + Math.floor(p.duration / this.graphTd.offsetWidth * 1000) / 1000 +
"&start=" + (p.endTime - p.duration) + "&end=" + p.endTime;
};
PromConsole.Graph.prototype.dispatch = function() {
for (var j = 0; j < this._xhrs.length; j++) {
this._xhrs[j].abort();
}
var all_data = new Array(this.params.expr.length);
this._xhrs = new Array(this.params.expr.length);
var pending_requests = this.params.expr.length;
for (var i = 0; i < this.params.expr.length; ++i) {
var endTime = this.params.endTime;
var url = this.buildQueryUrl(this.params.expr[i]);
var xhr = new XMLHttpRequest();
xhr.open('get', url, true);
xhr.responseType = 'json';
xhr.onerror = function(xhr, i) {
this._clearGraph();
var errorText = document.createElement("div");
errorText.className = 'prom_graph_error';
errorText.textContent = 'Error loading data';
this.graphTd.appendChild(errorText);
console.log('Error loading data for ' + this.params.expr[i]);
pending_requests = 0;
// onabort gets any aborts.
for (var j = 0; j < pending_requests; j++) {
this._xhrs[j].abort();
}
}.bind(this, xhr, i);
xhr.onload = function(xhr, i) {
if (pending_requests === 0) {
// Got an error before this success.
return;
}
var data = xhr.response;
if (typeof data !== "object") {
data = JSON.parse(xhr.responseText);
}
pending_requests -= 1;
all_data[i] = data;
if (pending_requests === 0) {
this._xhrs = [];
this._render(all_data);
}
}.bind(this, xhr, i);
xhr.send();
this._xhrs[i] = xhr;
}
var loadingImg = document.createElement("img");
loadingImg.src = PATH_PREFIX + '/classic/static/img/ajax-loader.gif';
loadingImg.alt = 'Loading...';
loadingImg.className = 'prom_graph_loading';
this.graphTd.appendChild(loadingImg);
};
// Substitute the value of 'label' for [[ label ]].
PromConsole._interpolateName = function(name, metric) {
var re = /(.*?)\[\[\s*(\w+)+\s*\]\](.*?)/g;
var result = '';
while (match = re.exec(name)) {
result = result + match[1] + metric[match[2]] + match[3];
}
if (!result) {
return name;
}
return result;
};
// Given the data returned by the API, return an appropriate function
// to return plot names.
PromConsole._chooseNameFunction = function(data) {
// By default, use the full metric name.
var nameFunc = function (metric) {
var name = metric.__name__ + "{";
for (var label in metric) {
if (label.substring(0,2) == "__") {
continue;
}
name += label + "='" + metric[label] + "',";
}
return name + "}";
};
// If only one label varies, use that value.
var labelValues = {};
for (var e = 0; e < data.length; e++) {
for (var i = 0; i < data[e].data.result.length; i++) {
for (var label in data[e].data.result[i].metric) {
if (!(label in labelValues)) {
labelValues[label] = {};
}
labelValues[label][data[e].data.result[i].metric[label]] = 1;
}
}
}
var multiValueLabels = [];
for (var label in labelValues) {
if (Object.keys(labelValues[label]).length > 1) {
multiValueLabels.push(label);
}
}
if (multiValueLabels.length == 1) {
nameFunc = function(metric) {
return metric[multiValueLabels[0]];
};
}
return nameFunc;
};
// Given a list of expressions, produce the /graph url for them.
PromConsole._graphsToSlashGraphURL = function(exprs) {
var data = [];
for (var i = 0; i < exprs.length; ++i) {
data.push({'expr': exprs[i], 'tab': 0});
}
return PATH_PREFIX + '/graph#' + encodeURIComponent(JSON.stringify(data));
};

File diff suppressed because it is too large Load diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1,325 +0,0 @@
/*!
* Bootstrap Reboot v4.5.2 (https://getbootstrap.com/)
* Copyright 2011-2020 The Bootstrap Authors
* Copyright 2011-2020 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
* Forked from Normalize.css, licensed MIT (https://github.com/necolas/normalize.css/blob/master/LICENSE.md)
*/
*,
*::before,
*::after {
box-sizing: border-box;
}
html {
font-family: sans-serif;
line-height: 1.15;
-webkit-text-size-adjust: 100%;
-webkit-tap-highlight-color: rgba(0, 0, 0, 0);
}
article, aside, figcaption, figure, footer, header, hgroup, main, nav, section {
display: block;
}
body {
margin: 0;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
font-size: 1rem;
font-weight: 400;
line-height: 1.5;
color: #212529;
text-align: left;
background-color: #fff;
}
[tabindex="-1"]:focus:not(:focus-visible) {
outline: 0 !important;
}
hr {
box-sizing: content-box;
height: 0;
overflow: visible;
}
h1, h2, h3, h4, h5, h6 {
margin-top: 0;
margin-bottom: 0.5rem;
}
p {
margin-top: 0;
margin-bottom: 1rem;
}
abbr[title],
abbr[data-original-title] {
text-decoration: underline;
-webkit-text-decoration: underline dotted;
text-decoration: underline dotted;
cursor: help;
border-bottom: 0;
-webkit-text-decoration-skip-ink: none;
text-decoration-skip-ink: none;
}
address {
margin-bottom: 1rem;
font-style: normal;
line-height: inherit;
}
ol,
ul,
dl {
margin-top: 0;
margin-bottom: 1rem;
}
ol ol,
ul ul,
ol ul,
ul ol {
margin-bottom: 0;
}
dt {
font-weight: 700;
}
dd {
margin-bottom: .5rem;
margin-left: 0;
}
blockquote {
margin: 0 0 1rem;
}
b,
strong {
font-weight: bolder;
}
small {
font-size: 80%;
}
sub,
sup {
position: relative;
font-size: 75%;
line-height: 0;
vertical-align: baseline;
}
sub {
bottom: -.25em;
}
sup {
top: -.5em;
}
a {
color: #007bff;
text-decoration: none;
background-color: transparent;
}
a:hover {
color: #0056b3;
text-decoration: underline;
}
a:not([href]):not([class]) {
color: inherit;
text-decoration: none;
}
a:not([href]):not([class]):hover {
color: inherit;
text-decoration: none;
}
pre,
code,
kbd,
samp {
font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
font-size: 1em;
}
pre {
margin-top: 0;
margin-bottom: 1rem;
overflow: auto;
-ms-overflow-style: scrollbar;
}
figure {
margin: 0 0 1rem;
}
img {
vertical-align: middle;
border-style: none;
}
svg {
overflow: hidden;
vertical-align: middle;
}
table {
border-collapse: collapse;
}
caption {
padding-top: 0.75rem;
padding-bottom: 0.75rem;
color: #6c757d;
text-align: left;
caption-side: bottom;
}
th {
text-align: inherit;
}
label {
display: inline-block;
margin-bottom: 0.5rem;
}
button {
border-radius: 0;
}
button:focus {
outline: 1px dotted;
outline: 5px auto -webkit-focus-ring-color;
}
input,
button,
select,
optgroup,
textarea {
margin: 0;
font-family: inherit;
font-size: inherit;
line-height: inherit;
}
button,
input {
overflow: visible;
}
button,
select {
text-transform: none;
}
[role="button"] {
cursor: pointer;
}
select {
word-wrap: normal;
}
button,
[type="button"],
[type="reset"],
[type="submit"] {
-webkit-appearance: button;
}
button:not(:disabled),
[type="button"]:not(:disabled),
[type="reset"]:not(:disabled),
[type="submit"]:not(:disabled) {
cursor: pointer;
}
button::-moz-focus-inner,
[type="button"]::-moz-focus-inner,
[type="reset"]::-moz-focus-inner,
[type="submit"]::-moz-focus-inner {
padding: 0;
border-style: none;
}
input[type="radio"],
input[type="checkbox"] {
box-sizing: border-box;
padding: 0;
}
textarea {
overflow: auto;
resize: vertical;
}
fieldset {
min-width: 0;
padding: 0;
margin: 0;
border: 0;
}
legend {
display: block;
width: 100%;
max-width: 100%;
padding: 0;
margin-bottom: .5rem;
font-size: 1.5rem;
line-height: inherit;
color: inherit;
white-space: normal;
}
progress {
vertical-align: baseline;
}
[type="number"]::-webkit-inner-spin-button,
[type="number"]::-webkit-outer-spin-button {
height: auto;
}
[type="search"] {
outline-offset: -2px;
-webkit-appearance: none;
}
[type="search"]::-webkit-search-decoration {
-webkit-appearance: none;
}
::-webkit-file-upload-button {
font: inherit;
-webkit-appearance: button;
}
output {
display: inline-block;
}
summary {
display: list-item;
cursor: pointer;
}
template {
display: none;
}
[hidden] {
display: none !important;
}
/*# sourceMappingURL=bootstrap-reboot.css.map */

File diff suppressed because one or more lines are too long

View file

@ -1,8 +0,0 @@
/*!
* Bootstrap Reboot v4.5.2 (https://getbootstrap.com/)
* Copyright 2011-2020 The Bootstrap Authors
* Copyright 2011-2020 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
* Forked from Normalize.css, licensed MIT (https://github.com/necolas/normalize.css/blob/master/LICENSE.md)
*/*,::after,::before{box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:transparent}article,aside,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}body{margin:0;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:left;background-color:#fff}[tabindex="-1"]:focus:not(:focus-visible){outline:0!important}hr{box-sizing:content-box;height:0;overflow:visible}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:.5rem}p{margin-top:0;margin-bottom:1rem}abbr[data-original-title],abbr[title]{text-decoration:underline;-webkit-text-decoration:underline dotted;text-decoration:underline dotted;cursor:help;border-bottom:0;-webkit-text-decoration-skip-ink:none;text-decoration-skip-ink:none}address{margin-bottom:1rem;font-style:normal;line-height:inherit}dl,ol,ul{margin-top:0;margin-bottom:1rem}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}b,strong{font-weight:bolder}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{color:#007bff;text-decoration:none;background-color:transparent}a:hover{color:#0056b3;text-decoration:underline}a:not([href]):not([class]){color:inherit;text-decoration:none}a:not([href]):not([class]):hover{color:inherit;text-decoration:none}code,kbd,pre,samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;font-size:1em}pre{margin-top:0;margin-bottom:1rem;overflow:auto;-ms-overflow-style:scrollbar}figure{margin:0 0 1rem}img{vertical-align:middle;border-style:none}svg{overflow:hidden;vertical-align:middle}table{border-collapse:collapse}caption{padding-top:.75rem;padding-bottom:.75rem;color:#6c757d;text-align:left;caption-side:bottom}th{text-align:inherit}label{display:inline-block;margin-bottom:.5rem}button{border-radius:0}button:focus{outline:1px dotted;outline:5px auto -webkit-focus-ring-color}button,input,optgroup,select,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,input{overflow:visible}button,select{text-transform:none}[role=button]{cursor:pointer}select{word-wrap:normal}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]:not(:disabled),[type=reset]:not(:disabled),[type=submit]:not(:disabled),button:not(:disabled){cursor:pointer}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{padding:0;border-style:none}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}textarea{overflow:auto;resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;max-width:100%;padding:0;margin-bottom:.5rem;font-size:1.5rem;line-height:inherit;color:inherit;white-space:normal}progress{vertical-align:baseline}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{outline-offset:-2px;-webkit-appearance:none}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}summary{display:list-item;cursor:pointer}template{display:none}[hidden]{display:none!important}
/*# sourceMappingURL=bootstrap-reboot.min.css.map */

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1,809 +0,0 @@
/*!
* Bootstrap v3.3.7 (http://getbootstrap.com)
* Copyright 2011-2018 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
*/
@font-face {
font-family: 'Glyphicons Halflings';
src: url('../fonts/glyphicons/glyphicons-halflings-regular.eot');
src: url('../fonts/glyphicons/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons/glyphicons-halflings-regular.woff2') format('woff2'), url('../fonts/glyphicons/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');
}
.glyphicon {
position: relative;
top: 1px;
display: inline-block;
font-family: 'Glyphicons Halflings';
font-style: normal;
font-weight: normal;
line-height: 1;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
.glyphicon-asterisk:before {
content: "\002a";
}
.glyphicon-plus:before {
content: "\002b";
}
.glyphicon-euro:before,
.glyphicon-eur:before {
content: "\20ac";
}
.glyphicon-minus:before {
content: "\2212";
}
.glyphicon-cloud:before {
content: "\2601";
}
.glyphicon-envelope:before {
content: "\2709";
}
.glyphicon-pencil:before {
content: "\270f";
}
.glyphicon-glass:before {
content: "\e001";
}
.glyphicon-music:before {
content: "\e002";
}
.glyphicon-search:before {
content: "\e003";
}
.glyphicon-heart:before {
content: "\e005";
}
.glyphicon-star:before {
content: "\e006";
}
.glyphicon-star-empty:before {
content: "\e007";
}
.glyphicon-user:before {
content: "\e008";
}
.glyphicon-film:before {
content: "\e009";
}
.glyphicon-th-large:before {
content: "\e010";
}
.glyphicon-th:before {
content: "\e011";
}
.glyphicon-th-list:before {
content: "\e012";
}
.glyphicon-ok:before {
content: "\e013";
}
.glyphicon-remove:before {
content: "\e014";
}
.glyphicon-zoom-in:before {
content: "\e015";
}
.glyphicon-zoom-out:before {
content: "\e016";
}
.glyphicon-off:before {
content: "\e017";
}
.glyphicon-signal:before {
content: "\e018";
}
.glyphicon-cog:before {
content: "\e019";
}
.glyphicon-trash:before {
content: "\e020";
}
.glyphicon-home:before {
content: "\e021";
}
.glyphicon-file:before {
content: "\e022";
}
.glyphicon-time:before {
content: "\e023";
}
.glyphicon-road:before {
content: "\e024";
}
.glyphicon-download-alt:before {
content: "\e025";
}
.glyphicon-download:before {
content: "\e026";
}
.glyphicon-upload:before {
content: "\e027";
}
.glyphicon-inbox:before {
content: "\e028";
}
.glyphicon-play-circle:before {
content: "\e029";
}
.glyphicon-repeat:before {
content: "\e030";
}
.glyphicon-refresh:before {
content: "\e031";
}
.glyphicon-list-alt:before {
content: "\e032";
}
.glyphicon-lock:before {
content: "\e033";
}
.glyphicon-flag:before {
content: "\e034";
}
.glyphicon-headphones:before {
content: "\e035";
}
.glyphicon-volume-off:before {
content: "\e036";
}
.glyphicon-volume-down:before {
content: "\e037";
}
.glyphicon-volume-up:before {
content: "\e038";
}
.glyphicon-qrcode:before {
content: "\e039";
}
.glyphicon-barcode:before {
content: "\e040";
}
.glyphicon-tag:before {
content: "\e041";
}
.glyphicon-tags:before {
content: "\e042";
}
.glyphicon-book:before {
content: "\e043";
}
.glyphicon-bookmark:before {
content: "\e044";
}
.glyphicon-print:before {
content: "\e045";
}
.glyphicon-camera:before {
content: "\e046";
}
.glyphicon-font:before {
content: "\e047";
}
.glyphicon-bold:before {
content: "\e048";
}
.glyphicon-italic:before {
content: "\e049";
}
.glyphicon-text-height:before {
content: "\e050";
}
.glyphicon-text-width:before {
content: "\e051";
}
.glyphicon-align-left:before {
content: "\e052";
}
.glyphicon-align-center:before {
content: "\e053";
}
.glyphicon-align-right:before {
content: "\e054";
}
.glyphicon-align-justify:before {
content: "\e055";
}
.glyphicon-list:before {
content: "\e056";
}
.glyphicon-indent-left:before {
content: "\e057";
}
.glyphicon-indent-right:before {
content: "\e058";
}
.glyphicon-facetime-video:before {
content: "\e059";
}
.glyphicon-picture:before {
content: "\e060";
}
.glyphicon-map-marker:before {
content: "\e062";
}
.glyphicon-adjust:before {
content: "\e063";
}
.glyphicon-tint:before {
content: "\e064";
}
.glyphicon-edit:before {
content: "\e065";
}
.glyphicon-share:before {
content: "\e066";
}
.glyphicon-check:before {
content: "\e067";
}
.glyphicon-move:before {
content: "\e068";
}
.glyphicon-step-backward:before {
content: "\e069";
}
.glyphicon-fast-backward:before {
content: "\e070";
}
.glyphicon-backward:before {
content: "\e071";
}
.glyphicon-play:before {
content: "\e072";
}
.glyphicon-pause:before {
content: "\e073";
}
.glyphicon-stop:before {
content: "\e074";
}
.glyphicon-forward:before {
content: "\e075";
}
.glyphicon-fast-forward:before {
content: "\e076";
}
.glyphicon-step-forward:before {
content: "\e077";
}
.glyphicon-eject:before {
content: "\e078";
}
.glyphicon-chevron-left:before {
content: "\e079";
}
.glyphicon-chevron-right:before {
content: "\e080";
}
.glyphicon-plus-sign:before {
content: "\e081";
}
.glyphicon-minus-sign:before {
content: "\e082";
}
.glyphicon-remove-sign:before {
content: "\e083";
}
.glyphicon-ok-sign:before {
content: "\e084";
}
.glyphicon-question-sign:before {
content: "\e085";
}
.glyphicon-info-sign:before {
content: "\e086";
}
.glyphicon-screenshot:before {
content: "\e087";
}
.glyphicon-remove-circle:before {
content: "\e088";
}
.glyphicon-ok-circle:before {
content: "\e089";
}
.glyphicon-ban-circle:before {
content: "\e090";
}
.glyphicon-arrow-left:before {
content: "\e091";
}
.glyphicon-arrow-right:before {
content: "\e092";
}
.glyphicon-arrow-up:before {
content: "\e093";
}
.glyphicon-arrow-down:before {
content: "\e094";
}
.glyphicon-share-alt:before {
content: "\e095";
}
.glyphicon-resize-full:before {
content: "\e096";
}
.glyphicon-resize-small:before {
content: "\e097";
}
.glyphicon-exclamation-sign:before {
content: "\e101";
}
.glyphicon-gift:before {
content: "\e102";
}
.glyphicon-leaf:before {
content: "\e103";
}
.glyphicon-fire:before {
content: "\e104";
}
.glyphicon-eye-open:before {
content: "\e105";
}
.glyphicon-eye-close:before {
content: "\e106";
}
.glyphicon-warning-sign:before {
content: "\e107";
}
.glyphicon-plane:before {
content: "\e108";
}
.glyphicon-calendar:before {
content: "\e109";
}
.glyphicon-random:before {
content: "\e110";
}
.glyphicon-comment:before {
content: "\e111";
}
.glyphicon-magnet:before {
content: "\e112";
}
.glyphicon-chevron-up:before {
content: "\e113";
}
.glyphicon-chevron-down:before {
content: "\e114";
}
.glyphicon-retweet:before {
content: "\e115";
}
.glyphicon-shopping-cart:before {
content: "\e116";
}
.glyphicon-folder-close:before {
content: "\e117";
}
.glyphicon-folder-open:before {
content: "\e118";
}
.glyphicon-resize-vertical:before {
content: "\e119";
}
.glyphicon-resize-horizontal:before {
content: "\e120";
}
.glyphicon-hdd:before {
content: "\e121";
}
.glyphicon-bullhorn:before {
content: "\e122";
}
.glyphicon-bell:before {
content: "\e123";
}
.glyphicon-certificate:before {
content: "\e124";
}
.glyphicon-thumbs-up:before {
content: "\e125";
}
.glyphicon-thumbs-down:before {
content: "\e126";
}
.glyphicon-hand-right:before {
content: "\e127";
}
.glyphicon-hand-left:before {
content: "\e128";
}
.glyphicon-hand-up:before {
content: "\e129";
}
.glyphicon-hand-down:before {
content: "\e130";
}
.glyphicon-circle-arrow-right:before {
content: "\e131";
}
.glyphicon-circle-arrow-left:before {
content: "\e132";
}
.glyphicon-circle-arrow-up:before {
content: "\e133";
}
.glyphicon-circle-arrow-down:before {
content: "\e134";
}
.glyphicon-globe:before {
content: "\e135";
}
.glyphicon-wrench:before {
content: "\e136";
}
.glyphicon-tasks:before {
content: "\e137";
}
.glyphicon-filter:before {
content: "\e138";
}
.glyphicon-briefcase:before {
content: "\e139";
}
.glyphicon-fullscreen:before {
content: "\e140";
}
.glyphicon-dashboard:before {
content: "\e141";
}
.glyphicon-paperclip:before {
content: "\e142";
}
.glyphicon-heart-empty:before {
content: "\e143";
}
.glyphicon-link:before {
content: "\e144";
}
.glyphicon-phone:before {
content: "\e145";
}
.glyphicon-pushpin:before {
content: "\e146";
}
.glyphicon-usd:before {
content: "\e148";
}
.glyphicon-gbp:before {
content: "\e149";
}
.glyphicon-sort:before {
content: "\e150";
}
.glyphicon-sort-by-alphabet:before {
content: "\e151";
}
.glyphicon-sort-by-alphabet-alt:before {
content: "\e152";
}
.glyphicon-sort-by-order:before {
content: "\e153";
}
.glyphicon-sort-by-order-alt:before {
content: "\e154";
}
.glyphicon-sort-by-attributes:before {
content: "\e155";
}
.glyphicon-sort-by-attributes-alt:before {
content: "\e156";
}
.glyphicon-unchecked:before {
content: "\e157";
}
.glyphicon-expand:before {
content: "\e158";
}
.glyphicon-collapse-down:before {
content: "\e159";
}
.glyphicon-collapse-up:before {
content: "\e160";
}
.glyphicon-log-in:before {
content: "\e161";
}
.glyphicon-flash:before {
content: "\e162";
}
.glyphicon-log-out:before {
content: "\e163";
}
.glyphicon-new-window:before {
content: "\e164";
}
.glyphicon-record:before {
content: "\e165";
}
.glyphicon-save:before {
content: "\e166";
}
.glyphicon-open:before {
content: "\e167";
}
.glyphicon-saved:before {
content: "\e168";
}
.glyphicon-import:before {
content: "\e169";
}
.glyphicon-export:before {
content: "\e170";
}
.glyphicon-send:before {
content: "\e171";
}
.glyphicon-floppy-disk:before {
content: "\e172";
}
.glyphicon-floppy-saved:before {
content: "\e173";
}
.glyphicon-floppy-remove:before {
content: "\e174";
}
.glyphicon-floppy-save:before {
content: "\e175";
}
.glyphicon-floppy-open:before {
content: "\e176";
}
.glyphicon-credit-card:before {
content: "\e177";
}
.glyphicon-transfer:before {
content: "\e178";
}
.glyphicon-cutlery:before {
content: "\e179";
}
.glyphicon-header:before {
content: "\e180";
}
.glyphicon-compressed:before {
content: "\e181";
}
.glyphicon-earphone:before {
content: "\e182";
}
.glyphicon-phone-alt:before {
content: "\e183";
}
.glyphicon-tower:before {
content: "\e184";
}
.glyphicon-stats:before {
content: "\e185";
}
.glyphicon-sd-video:before {
content: "\e186";
}
.glyphicon-hd-video:before {
content: "\e187";
}
.glyphicon-subtitles:before {
content: "\e188";
}
.glyphicon-sound-stereo:before {
content: "\e189";
}
.glyphicon-sound-dolby:before {
content: "\e190";
}
.glyphicon-sound-5-1:before {
content: "\e191";
}
.glyphicon-sound-6-1:before {
content: "\e192";
}
.glyphicon-sound-7-1:before {
content: "\e193";
}
.glyphicon-copyright-mark:before {
content: "\e194";
}
.glyphicon-registration-mark:before {
content: "\e195";
}
.glyphicon-cloud-download:before {
content: "\e197";
}
.glyphicon-cloud-upload:before {
content: "\e198";
}
.glyphicon-tree-conifer:before {
content: "\e199";
}
.glyphicon-tree-deciduous:before {
content: "\e200";
}
.glyphicon-cd:before {
content: "\e201";
}
.glyphicon-save-file:before {
content: "\e202";
}
.glyphicon-open-file:before {
content: "\e203";
}
.glyphicon-level-up:before {
content: "\e204";
}
.glyphicon-copy:before {
content: "\e205";
}
.glyphicon-paste:before {
content: "\e206";
}
.glyphicon-alert:before {
content: "\e209";
}
.glyphicon-equalizer:before {
content: "\e210";
}
.glyphicon-king:before {
content: "\e211";
}
.glyphicon-queen:before {
content: "\e212";
}
.glyphicon-pawn:before {
content: "\e213";
}
.glyphicon-bishop:before {
content: "\e214";
}
.glyphicon-knight:before {
content: "\e215";
}
.glyphicon-baby-formula:before {
content: "\e216";
}
.glyphicon-tent:before {
content: "\26fa";
}
.glyphicon-blackboard:before {
content: "\e218";
}
.glyphicon-bed:before {
content: "\e219";
}
.glyphicon-apple:before {
content: "\f8ff";
}
.glyphicon-erase:before {
content: "\e221";
}
.glyphicon-hourglass:before {
content: "\231b";
}
.glyphicon-lamp:before {
content: "\e223";
}
.glyphicon-duplicate:before {
content: "\e224";
}
.glyphicon-piggy-bank:before {
content: "\e225";
}
.glyphicon-scissors:before {
content: "\e226";
}
.glyphicon-bitcoin:before {
content: "\e227";
}
.glyphicon-btc:before {
content: "\e227";
}
.glyphicon-xbt:before {
content: "\e227";
}
.glyphicon-yen:before {
content: "\00a5";
}
.glyphicon-jpy:before {
content: "\00a5";
}
.glyphicon-ruble:before {
content: "\20bd";
}
.glyphicon-rub:before {
content: "\20bd";
}
.glyphicon-scale:before {
content: "\e230";
}
.glyphicon-ice-lolly:before {
content: "\e231";
}
.glyphicon-ice-lolly-tasted:before {
content: "\e232";
}
.glyphicon-education:before {
content: "\e233";
}
.glyphicon-option-horizontal:before {
content: "\e234";
}
.glyphicon-option-vertical:before {
content: "\e235";
}
.glyphicon-menu-hamburger:before {
content: "\e236";
}
.glyphicon-modal-window:before {
content: "\e237";
}
.glyphicon-oil:before {
content: "\e238";
}
.glyphicon-grain:before {
content: "\e239";
}
.glyphicon-sunglasses:before {
content: "\e240";
}
.glyphicon-text-size:before {
content: "\e241";
}
.glyphicon-text-color:before {
content: "\e242";
}
.glyphicon-text-background:before {
content: "\e243";
}
.glyphicon-object-align-top:before {
content: "\e244";
}
.glyphicon-object-align-bottom:before {
content: "\e245";
}
.glyphicon-object-align-horizontal:before {
content: "\e246";
}
.glyphicon-object-align-left:before {
content: "\e247";
}
.glyphicon-object-align-vertical:before {
content: "\e248";
}
.glyphicon-object-align-right:before {
content: "\e249";
}
.glyphicon-triangle-right:before {
content: "\e250";
}
.glyphicon-triangle-left:before {
content: "\e251";
}
.glyphicon-triangle-bottom:before {
content: "\e252";
}
.glyphicon-triangle-top:before {
content: "\e253";
}
.glyphicon-console:before {
content: "\e254";
}
.glyphicon-superscript:before {
content: "\e255";
}
.glyphicon-subscript:before {
content: "\e256";
}
.glyphicon-menu-left:before {
content: "\e257";
}
.glyphicon-menu-right:before {
content: "\e258";
}
.glyphicon-menu-down:before {
content: "\e259";
}
.glyphicon-menu-up:before {
content: "\e260";
}

File diff suppressed because one or more lines are too long

Some files were not shown because too many files have changed in this diff Show more