mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
Merge remote-tracking branch 'prometheus/main' into arve/close-engine
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
This commit is contained in:
commit
5dfbcc390e
56
.github/stale.yml
vendored
56
.github/stale.yml
vendored
|
@ -1,56 +0,0 @@
|
|||
# Configuration for probot-stale - https://github.com/probot/stale
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request becomes stale
|
||||
daysUntilStale: 60
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
|
||||
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
|
||||
daysUntilClose: false
|
||||
|
||||
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
|
||||
onlyLabels: []
|
||||
|
||||
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
|
||||
exemptLabels:
|
||||
- keepalive
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
|
||||
# Set to true to ignore issues in a milestone (defaults to false)
|
||||
exemptMilestones: false
|
||||
|
||||
# Set to true to ignore issues with an assignee (defaults to false)
|
||||
exemptAssignees: false
|
||||
|
||||
# Label to use when marking as stale
|
||||
staleLabel: stale
|
||||
|
||||
# Comment to post when marking as stale. Set to `false` to disable
|
||||
markComment: false
|
||||
|
||||
# Comment to post when removing the stale label.
|
||||
# unmarkComment: >
|
||||
# Your comment here.
|
||||
|
||||
# Comment to post when closing a stale Issue or Pull Request.
|
||||
# closeComment: >
|
||||
# Your comment here.
|
||||
|
||||
# Limit the number of actions per hour, from 1-30. Default is 30
|
||||
limitPerRun: 30
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
only: pulls
|
||||
|
||||
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
|
||||
# pulls:
|
||||
# daysUntilStale: 30
|
||||
# markComment: >
|
||||
# This pull request has been automatically marked as stale because it has not had
|
||||
# recent activity. It will be closed if no further activity occurs. Thank you
|
||||
# for your contributions.
|
||||
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - confirmed
|
31
.github/workflows/stale.yml
vendored
Normal file
31
.github/workflows/stale.yml
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
name: Stale Check
|
||||
on:
|
||||
workflow_dispatch: {}
|
||||
schedule:
|
||||
- cron: '16 22 * * *'
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
jobs:
|
||||
stale:
|
||||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# opt out of defaults to avoid marking issues as stale and closing them
|
||||
# https://github.com/actions/stale#days-before-close
|
||||
# https://github.com/actions/stale#days-before-stale
|
||||
days-before-stale: -1
|
||||
days-before-close: -1
|
||||
# Setting it to empty string to skip comments.
|
||||
# https://github.com/actions/stale#stale-pr-message
|
||||
# https://github.com/actions/stale#stale-issue-message
|
||||
stale-pr-message: ''
|
||||
stale-issue-message: ''
|
||||
operations-per-run: 30
|
||||
# override days-before-stale, for only marking the pull requests as stale
|
||||
days-before-pr-stale: 60
|
||||
stale-pr-label: stale
|
||||
exempt-pr-labels: keepalive
|
|
@ -169,6 +169,8 @@ type flagConfig struct {
|
|||
corsRegexString string
|
||||
|
||||
promlogConfig promlog.Config
|
||||
|
||||
promqlEnableDelayedNameRemoval bool
|
||||
}
|
||||
|
||||
// setFeatureListOptions sets the corresponding options from the featureList.
|
||||
|
@ -238,6 +240,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
case "delayed-compaction":
|
||||
c.tsdb.EnableDelayedCompaction = true
|
||||
level.Info(logger).Log("msg", "Experimental delayed compaction is enabled.")
|
||||
case "promql-delayed-name-removal":
|
||||
c.promqlEnableDelayedNameRemoval = true
|
||||
level.Info(logger).Log("msg", "Experimental PromQL delayed name removal enabled.")
|
||||
case "utf8-names":
|
||||
model.NameValidationScheme = model.UTF8Validation
|
||||
level.Info(logger).Log("msg", "Experimental UTF-8 support enabled")
|
||||
|
@ -487,7 +492,7 @@ func main() {
|
|||
|
||||
a.Flag("scrape.name-escaping-scheme", `Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots".`).Default(scrape.DefaultNameEscapingScheme.String()).StringVar(&cfg.nameEscapingScheme)
|
||||
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, no-default-scrape-port, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
Default("").StringsVar(&cfg.featureList)
|
||||
|
||||
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
||||
|
@ -799,9 +804,10 @@ func main() {
|
|||
NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get,
|
||||
// EnableAtModifier and EnableNegativeOffset have to be
|
||||
// always on for regular PromQL as of Prometheus v2.33.
|
||||
EnableAtModifier: true,
|
||||
EnableNegativeOffset: true,
|
||||
EnablePerStepStats: cfg.enablePerStepStats,
|
||||
EnableAtModifier: true,
|
||||
EnableNegativeOffset: true,
|
||||
EnablePerStepStats: cfg.enablePerStepStats,
|
||||
EnableDelayedNameRemoval: cfg.promqlEnableDelayedNameRemoval,
|
||||
}
|
||||
|
||||
queryEngine = promql.NewEngine(opts)
|
||||
|
|
|
@ -394,8 +394,16 @@ func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) {
|
|||
m.targets[poolKey] = make(map[string]*targetgroup.Group)
|
||||
}
|
||||
for _, tg := range tgs {
|
||||
if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics.
|
||||
// Some Discoverers send nil target group so need to check for it to avoid panics.
|
||||
if tg == nil {
|
||||
continue
|
||||
}
|
||||
if len(tg.Targets) > 0 {
|
||||
m.targets[poolKey][tg.Source] = tg
|
||||
} else {
|
||||
// The target group is empty, drop the corresponding entry to avoid leaks.
|
||||
// In case the group yielded targets before, allGroups() will take care of making consumers drop them.
|
||||
delete(m.targets[poolKey], tg.Source)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1051,8 +1051,8 @@ func TestDiscovererConfigs(t *testing.T) {
|
|||
}
|
||||
|
||||
// TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after
|
||||
// removing all targets from the static_configs sends an update with empty targetGroups.
|
||||
// This is required to signal the receiver that this target set has no current targets.
|
||||
// removing all targets from the static_configs cleans the corresponding targetGroups entries to avoid leaks and sends an empty update.
|
||||
// The update is required to signal the consumers that the previous targets should be dropped.
|
||||
func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
@ -1085,16 +1085,14 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
|||
discoveryManager.ApplyConfig(c)
|
||||
|
||||
syncedTargets = <-discoveryManager.SyncCh()
|
||||
require.Len(t, discoveryManager.targets, 1)
|
||||
p = pk("static", "prometheus", 1)
|
||||
targetGroups, ok := discoveryManager.targets[p]
|
||||
require.True(t, ok, "'%v' should be present in target groups", p)
|
||||
group, ok := targetGroups[""]
|
||||
require.True(t, ok, "missing '' key in target groups %v", targetGroups)
|
||||
|
||||
require.Empty(t, group.Targets, "Invalid number of targets.")
|
||||
require.Len(t, syncedTargets, 1)
|
||||
require.Len(t, syncedTargets["prometheus"], 1)
|
||||
require.Nil(t, syncedTargets["prometheus"][0].Labels)
|
||||
require.True(t, ok, "'%v' should be present in targets", p)
|
||||
// Otherwise the targetGroups will leak, see https://github.com/prometheus/prometheus/issues/12436.
|
||||
require.Empty(t, targetGroups, 0, "'%v' should no longer have any associated target groups", p)
|
||||
require.Len(t, syncedTargets, 1, "an update with no targetGroups should still be sent.")
|
||||
require.Empty(t, syncedTargets["prometheus"], 0)
|
||||
}
|
||||
|
||||
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
||||
|
|
|
@ -57,7 +57,7 @@ The Prometheus monitoring server
|
|||
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
|
||||
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
|
||||
| <code class="text-nowrap">--scrape.name-escaping-scheme</code> | Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots". | `values` |
|
||||
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
|
||||
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, no-default-scrape-port, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
|
||||
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
|
||||
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |
|
||||
|
||||
|
|
|
@ -250,6 +250,14 @@ Note that during this delay, the Head continues its usual operations, which incl
|
|||
|
||||
Despite the delay in compaction, the blocks produced are time-aligned in the same manner as they would be if the delay was not in place.
|
||||
|
||||
## Delay __name__ label removal for PromQL engine
|
||||
|
||||
`--enable-feature=promql-delayed-name-removal`
|
||||
|
||||
When enabled, Prometheus will change the way in which the `__name__` label is removed from PromQL query results (for functions and expressions for which this is necessary). Specifically, it will delay the removal to the last step of the query evaluation, instead of every time an expression or function creating derived metrics is evaluated.
|
||||
|
||||
This allows optionally preserving the `__name__` label via the `label_replace` and `label_join` functions, and helps prevent the "vector cannot contain metrics with the same labelset" error, which can happen when applying a regex-matcher to the `__name__` label.
|
||||
|
||||
## UTF-8 Name Support
|
||||
|
||||
`--enable-feature=utf8-names`
|
||||
|
|
|
@ -41,7 +41,7 @@ vector is the only type which can be graphed.
|
|||
_Notes about the experimental native histograms:_
|
||||
|
||||
* Ingesting native histograms has to be enabled via a [feature
|
||||
flag](../../feature_flags.md#native-histograms).
|
||||
flag](../feature_flags.md#native-histograms).
|
||||
* Once native histograms have been ingested into the TSDB (and even after
|
||||
disabling the feature flag again), both instant vectors and range vectors may
|
||||
now contain samples that aren't simple floating point numbers (float samples)
|
||||
|
|
4
go.mod
4
go.mod
|
@ -36,9 +36,9 @@ require (
|
|||
github.com/gophercloud/gophercloud v1.14.0
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/consul/api v1.29.2
|
||||
github.com/hashicorp/consul/api v1.29.4
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.12.0
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.13.1
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.2.0
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/klauspost/compress v1.17.9
|
||||
|
|
8
go.sum
8
go.sum
|
@ -353,8 +353,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb
|
|||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/api v1.29.2 h1:aYyRn8EdE2mSfG14S1+L9Qkjtz8RzmaWh6AcNGRNwPw=
|
||||
github.com/hashicorp/consul/api v1.29.2/go.mod h1:0YObcaLNDSbtlgzIRtmRXI1ZkeuK0trCBxwZQ4MYnIk=
|
||||
github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE=
|
||||
github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg=
|
||||
github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0=
|
||||
github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
|
||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
|
@ -414,8 +414,8 @@ github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtx
|
|||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.12.0 h1:nOgfNTo0gyXZJJdM8mo/XH5MO/e80wAEpldRzdWayhY=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.12.0/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.13.1 h1:jq0GP4QaYE5d8xR/Zw17s9qoaESRJMXfGmtD1a/qckQ=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
|
|
124
promql/engine.go
124
promql/engine.go
|
@ -316,6 +316,11 @@ type EngineOpts struct {
|
|||
|
||||
// EnablePerStepStats if true allows for per-step stats to be computed on request. Disabled otherwise.
|
||||
EnablePerStepStats bool
|
||||
|
||||
// EnableDelayedNameRemoval delays the removal of the __name__ label to the last step of the query evaluation.
|
||||
// This is useful in certain scenarios where the __name__ label must be preserved or where applying a
|
||||
// regex-matcher to the __name__ label may otherwise lead to duplicate labelset errors.
|
||||
EnableDelayedNameRemoval bool
|
||||
}
|
||||
|
||||
// Engine handles the lifetime of queries from beginning to end.
|
||||
|
@ -333,6 +338,7 @@ type Engine struct {
|
|||
enableAtModifier bool
|
||||
enableNegativeOffset bool
|
||||
enablePerStepStats bool
|
||||
enableDelayedNameRemoval bool
|
||||
}
|
||||
|
||||
// NewEngine returns a new engine.
|
||||
|
@ -423,6 +429,7 @@ func NewEngine(opts EngineOpts) *Engine {
|
|||
enableAtModifier: opts.EnableAtModifier,
|
||||
enableNegativeOffset: opts.EnableNegativeOffset,
|
||||
enablePerStepStats: opts.EnablePerStepStats,
|
||||
enableDelayedNameRemoval: opts.EnableDelayedNameRemoval,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -723,6 +730,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
|
|||
lookbackDelta: s.LookbackDelta,
|
||||
samplesStats: query.sampleStats,
|
||||
noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn,
|
||||
enableDelayedNameRemoval: ng.enableDelayedNameRemoval,
|
||||
}
|
||||
query.sampleStats.InitStepTracking(start, start, 1)
|
||||
|
||||
|
@ -754,9 +762,9 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
|
|||
// Point might have a different timestamp, force it to the evaluation
|
||||
// timestamp as that is when we ran the evaluation.
|
||||
if len(s.Histograms) > 0 {
|
||||
vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start}
|
||||
vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start, DropName: s.DropName}
|
||||
} else {
|
||||
vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start}
|
||||
vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start, DropName: s.DropName}
|
||||
}
|
||||
}
|
||||
return vector, warnings, nil
|
||||
|
@ -781,6 +789,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
|
|||
lookbackDelta: s.LookbackDelta,
|
||||
samplesStats: query.sampleStats,
|
||||
noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn,
|
||||
enableDelayedNameRemoval: ng.enableDelayedNameRemoval,
|
||||
}
|
||||
query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval)
|
||||
val, warnings, err := evaluator.Eval(s.Expr)
|
||||
|
@ -1043,6 +1052,7 @@ type evaluator struct {
|
|||
lookbackDelta time.Duration
|
||||
samplesStats *stats.QuerySamples
|
||||
noStepSubqueryIntervalFn func(rangeMillis int64) int64
|
||||
enableDelayedNameRemoval bool
|
||||
}
|
||||
|
||||
// errorf causes a panic with the input formatted into an error.
|
||||
|
@ -1084,6 +1094,9 @@ func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws annotations.Anno
|
|||
defer ev.recover(expr, &ws, &err)
|
||||
|
||||
v, ws = ev.eval(expr)
|
||||
if ev.enableDelayedNameRemoval {
|
||||
ev.cleanupMetricLabels(v)
|
||||
}
|
||||
return v, ws, nil
|
||||
}
|
||||
|
||||
|
@ -1112,6 +1125,9 @@ type EvalNodeHelper struct {
|
|||
rightSigs map[string]Sample
|
||||
matchedSigs map[string]map[uint64]struct{}
|
||||
resultMetric map[string]labels.Labels
|
||||
|
||||
// Additional options for the evaluation.
|
||||
enableDelayedNameRemoval bool
|
||||
}
|
||||
|
||||
func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) {
|
||||
|
@ -1161,7 +1177,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
|
|||
biggestLen = len(matrixes[i])
|
||||
}
|
||||
}
|
||||
enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen)}
|
||||
enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen), enableDelayedNameRemoval: ev.enableDelayedNameRemoval}
|
||||
type seriesAndTimestamp struct {
|
||||
Series
|
||||
ts int64
|
||||
|
@ -1207,12 +1223,12 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
|
|||
for si, series := range matrixes[i] {
|
||||
switch {
|
||||
case len(series.Floats) > 0 && series.Floats[0].T == ts:
|
||||
vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: series.Floats[0].F, T: ts})
|
||||
vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: series.Floats[0].F, T: ts, DropName: series.DropName})
|
||||
// Move input vectors forward so we don't have to re-scan the same
|
||||
// past points at the next step.
|
||||
matrixes[i][si].Floats = series.Floats[1:]
|
||||
case len(series.Histograms) > 0 && series.Histograms[0].T == ts:
|
||||
vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: series.Histograms[0].H, T: ts})
|
||||
vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: series.Histograms[0].H, T: ts, DropName: series.DropName})
|
||||
matrixes[i][si].Histograms = series.Histograms[1:]
|
||||
default:
|
||||
continue
|
||||
|
@ -1251,15 +1267,15 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
|
|||
|
||||
// If this could be an instant query, shortcut so as not to change sort order.
|
||||
if ev.endTimestamp == ev.startTimestamp {
|
||||
if result.ContainsSameLabelset() {
|
||||
if !ev.enableDelayedNameRemoval && result.ContainsSameLabelset() {
|
||||
ev.errorf("vector cannot contain metrics with the same labelset")
|
||||
}
|
||||
mat := make(Matrix, len(result))
|
||||
for i, s := range result {
|
||||
if s.H == nil {
|
||||
mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}}
|
||||
mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}, DropName: s.DropName}
|
||||
} else {
|
||||
mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}}
|
||||
mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}, DropName: s.DropName}
|
||||
}
|
||||
}
|
||||
ev.currentSamples = originalNumSamples + mat.TotalSamples()
|
||||
|
@ -1277,7 +1293,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
|
|||
}
|
||||
ss.ts = ts
|
||||
} else {
|
||||
ss = seriesAndTimestamp{Series{Metric: sample.Metric}, ts}
|
||||
ss = seriesAndTimestamp{Series{Metric: sample.Metric, DropName: sample.DropName}, ts}
|
||||
}
|
||||
addToSeries(&ss.Series, enh.Ts, sample.F, sample.H, numSteps)
|
||||
seriess[h] = ss
|
||||
|
@ -1313,7 +1329,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping
|
|||
|
||||
var warnings annotations.Annotations
|
||||
|
||||
enh := &EvalNodeHelper{}
|
||||
enh := &EvalNodeHelper{enableDelayedNameRemoval: ev.enableDelayedNameRemoval}
|
||||
tempNumSamples := ev.currentSamples
|
||||
|
||||
// Create a mapping from input series to output groups.
|
||||
|
@ -1622,10 +1638,17 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
var prevSS *Series
|
||||
inMatrix := make(Matrix, 1)
|
||||
inArgs[matrixArgIndex] = inMatrix
|
||||
enh := &EvalNodeHelper{Out: make(Vector, 0, 1)}
|
||||
enh := &EvalNodeHelper{Out: make(Vector, 0, 1), enableDelayedNameRemoval: ev.enableDelayedNameRemoval}
|
||||
// Process all the calls for one time series at a time.
|
||||
it := storage.NewBuffer(selRange)
|
||||
var chkIter chunkenc.Iterator
|
||||
|
||||
// The last_over_time function acts like offset; thus, it
|
||||
// should keep the metric name. For all the other range
|
||||
// vector functions, the only change needed is to drop the
|
||||
// metric name in the output.
|
||||
dropName := e.Func.Name != "last_over_time"
|
||||
|
||||
for i, s := range selVS.Series {
|
||||
if err := contextDone(ev.ctx, "expression evaluation"); err != nil {
|
||||
ev.error(err)
|
||||
|
@ -1640,15 +1663,12 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
chkIter = s.Iterator(chkIter)
|
||||
it.Reset(chkIter)
|
||||
metric := selVS.Series[i].Labels()
|
||||
// The last_over_time function acts like offset; thus, it
|
||||
// should keep the metric name. For all the other range
|
||||
// vector functions, the only change needed is to drop the
|
||||
// metric name in the output.
|
||||
if e.Func.Name != "last_over_time" {
|
||||
if !ev.enableDelayedNameRemoval && dropName {
|
||||
metric = metric.DropMetricName()
|
||||
}
|
||||
ss := Series{
|
||||
Metric: metric,
|
||||
Metric: metric,
|
||||
DropName: dropName,
|
||||
}
|
||||
inMatrix[0].Metric = selVS.Series[i].Labels()
|
||||
for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval {
|
||||
|
@ -1763,16 +1783,16 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
|
||||
return Matrix{
|
||||
Series{
|
||||
Metric: createLabelsForAbsentFunction(e.Args[0]),
|
||||
Floats: newp,
|
||||
Metric: createLabelsForAbsentFunction(e.Args[0]),
|
||||
Floats: newp,
|
||||
DropName: dropName,
|
||||
},
|
||||
}, warnings
|
||||
}
|
||||
|
||||
if mat.ContainsSameLabelset() {
|
||||
if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() {
|
||||
ev.errorf("vector cannot contain metrics with the same labelset")
|
||||
}
|
||||
|
||||
return mat, warnings
|
||||
|
||||
case *parser.ParenExpr:
|
||||
|
@ -1783,12 +1803,15 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
mat := val.(Matrix)
|
||||
if e.Op == parser.SUB {
|
||||
for i := range mat {
|
||||
mat[i].Metric = mat[i].Metric.DropMetricName()
|
||||
if !ev.enableDelayedNameRemoval {
|
||||
mat[i].Metric = mat[i].Metric.DropMetricName()
|
||||
}
|
||||
mat[i].DropName = true
|
||||
for j := range mat[i].Floats {
|
||||
mat[i].Floats[j].F = -mat[i].Floats[j].F
|
||||
}
|
||||
}
|
||||
if mat.ContainsSameLabelset() {
|
||||
if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() {
|
||||
ev.errorf("vector cannot contain metrics with the same labelset")
|
||||
}
|
||||
}
|
||||
|
@ -1924,6 +1947,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
lookbackDelta: ev.lookbackDelta,
|
||||
samplesStats: ev.samplesStats.NewChild(),
|
||||
noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn,
|
||||
enableDelayedNameRemoval: ev.enableDelayedNameRemoval,
|
||||
}
|
||||
|
||||
if e.Step != 0 {
|
||||
|
@ -1968,6 +1992,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
lookbackDelta: ev.lookbackDelta,
|
||||
samplesStats: ev.samplesStats.NewChild(),
|
||||
noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn,
|
||||
enableDelayedNameRemoval: ev.enableDelayedNameRemoval,
|
||||
}
|
||||
res, ws := newEv.eval(e.Expr)
|
||||
ev.currentSamples = newEv.currentSamples
|
||||
|
@ -2564,7 +2589,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
|
|||
continue
|
||||
}
|
||||
metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh)
|
||||
if returnBool {
|
||||
if !ev.enableDelayedNameRemoval && returnBool {
|
||||
metric = metric.DropMetricName()
|
||||
}
|
||||
insertedSigs, exists := matchedSigs[sig]
|
||||
|
@ -2589,9 +2614,10 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
|
|||
}
|
||||
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: metric,
|
||||
F: floatValue,
|
||||
H: histogramValue,
|
||||
Metric: metric,
|
||||
F: floatValue,
|
||||
H: histogramValue,
|
||||
DropName: returnBool,
|
||||
})
|
||||
}
|
||||
return enh.Out, lastErr
|
||||
|
@ -2691,7 +2717,10 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
|
|||
lhsSample.F = float
|
||||
lhsSample.H = histogram
|
||||
if shouldDropMetricName(op) || returnBool {
|
||||
lhsSample.Metric = lhsSample.Metric.DropMetricName()
|
||||
if !ev.enableDelayedNameRemoval {
|
||||
lhsSample.Metric = lhsSample.Metric.DropMetricName()
|
||||
}
|
||||
lhsSample.DropName = true
|
||||
}
|
||||
enh.Out = append(enh.Out, lhsSample)
|
||||
}
|
||||
|
@ -3030,6 +3059,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
|||
|
||||
ss := &outputMatrix[ri]
|
||||
addToSeries(ss, enh.Ts, aggr.floatValue, aggr.histogramValue, numSteps)
|
||||
ss.DropName = inputMatrix[ri].DropName
|
||||
}
|
||||
|
||||
return annos
|
||||
|
@ -3056,7 +3086,7 @@ seriesLoop:
|
|||
if !ok {
|
||||
continue
|
||||
}
|
||||
s = Sample{Metric: inputMatrix[si].Metric, F: f}
|
||||
s = Sample{Metric: inputMatrix[si].Metric, F: f, DropName: inputMatrix[si].DropName}
|
||||
|
||||
group := &groups[seriesToResult[si]]
|
||||
// Initialize this group if it's the first time we've seen it.
|
||||
|
@ -3140,16 +3170,16 @@ seriesLoop:
|
|||
mat = make(Matrix, 0, len(groups))
|
||||
}
|
||||
|
||||
add := func(lbls labels.Labels, f float64) {
|
||||
add := func(lbls labels.Labels, f float64, dropName bool) {
|
||||
// If this could be an instant query, add directly to the matrix so the result is in consistent order.
|
||||
if ev.endTimestamp == ev.startTimestamp {
|
||||
mat = append(mat, Series{Metric: lbls, Floats: []FPoint{{T: enh.Ts, F: f}}})
|
||||
mat = append(mat, Series{Metric: lbls, Floats: []FPoint{{T: enh.Ts, F: f}}, DropName: dropName})
|
||||
} else {
|
||||
// Otherwise the results are added into seriess elements.
|
||||
hash := lbls.Hash()
|
||||
ss, ok := seriess[hash]
|
||||
if !ok {
|
||||
ss = Series{Metric: lbls}
|
||||
ss = Series{Metric: lbls, DropName: dropName}
|
||||
}
|
||||
addToSeries(&ss, enh.Ts, f, nil, numSteps)
|
||||
seriess[hash] = ss
|
||||
|
@ -3166,7 +3196,7 @@ seriesLoop:
|
|||
sort.Sort(sort.Reverse(aggr.heap))
|
||||
}
|
||||
for _, v := range aggr.heap {
|
||||
add(v.Metric, v.F)
|
||||
add(v.Metric, v.F, v.DropName)
|
||||
}
|
||||
|
||||
case parser.BOTTOMK:
|
||||
|
@ -3175,12 +3205,12 @@ seriesLoop:
|
|||
sort.Sort(sort.Reverse((*vectorByReverseValueHeap)(&aggr.heap)))
|
||||
}
|
||||
for _, v := range aggr.heap {
|
||||
add(v.Metric, v.F)
|
||||
add(v.Metric, v.F, v.DropName)
|
||||
}
|
||||
|
||||
case parser.LIMITK, parser.LIMIT_RATIO:
|
||||
for _, v := range aggr.heap {
|
||||
add(v.Metric, v.F)
|
||||
add(v.Metric, v.F, v.DropName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3232,6 +3262,30 @@ func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []
|
|||
return enh.Out, nil
|
||||
}
|
||||
|
||||
func (ev *evaluator) cleanupMetricLabels(v parser.Value) {
|
||||
if v.Type() == parser.ValueTypeMatrix {
|
||||
mat := v.(Matrix)
|
||||
for i := range mat {
|
||||
if mat[i].DropName {
|
||||
mat[i].Metric = mat[i].Metric.DropMetricName()
|
||||
}
|
||||
}
|
||||
if mat.ContainsSameLabelset() {
|
||||
ev.errorf("vector cannot contain metrics with the same labelset")
|
||||
}
|
||||
} else if v.Type() == parser.ValueTypeVector {
|
||||
vec := v.(Vector)
|
||||
for i := range vec {
|
||||
if vec[i].DropName {
|
||||
vec[i].Metric = vec[i].Metric.DropMetricName()
|
||||
}
|
||||
}
|
||||
if vec.ContainsSameLabelset() {
|
||||
ev.errorf("vector cannot contain metrics with the same labelset")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addToSeries(ss *Series, ts int64, f float64, h *histogram.FloatHistogram, numSteps int) {
|
||||
if h == nil {
|
||||
if ss.Floats == nil {
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
@ -1708,7 +1707,8 @@ load 1ms
|
|||
{F: 3600, T: 6 * 60 * 1000},
|
||||
{F: 3600, T: 7 * 60 * 1000},
|
||||
},
|
||||
Metric: labels.EmptyLabels(),
|
||||
Metric: labels.EmptyLabels(),
|
||||
DropName: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -1924,20 +1924,24 @@ func TestSubquerySelector(t *testing.T) {
|
|||
nil,
|
||||
promql.Matrix{
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}},
|
||||
Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"),
|
||||
Floats: []promql.FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}},
|
||||
Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"),
|
||||
DropName: true,
|
||||
},
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}},
|
||||
Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"),
|
||||
Floats: []promql.FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}},
|
||||
Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"),
|
||||
DropName: true,
|
||||
},
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}},
|
||||
Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"),
|
||||
Floats: []promql.FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}},
|
||||
Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"),
|
||||
DropName: true,
|
||||
},
|
||||
promql.Series{
|
||||
Floats: []promql.FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}},
|
||||
Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"),
|
||||
Floats: []promql.FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}},
|
||||
Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"),
|
||||
DropName: true,
|
||||
},
|
||||
},
|
||||
nil,
|
||||
|
@ -3326,171 +3330,6 @@ func TestNativeHistogram_SubOperator(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNativeHistogram_MulDivOperator(t *testing.T) {
|
||||
// TODO(codesome): Integrate histograms into the PromQL testing framework
|
||||
// and write more tests there.
|
||||
originalHistogram := histogram.Histogram{
|
||||
Schema: 0,
|
||||
Count: 21,
|
||||
Sum: 33,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 3,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []int64{3, 0, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []int64{3, 0, 0},
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
scalar float64
|
||||
histogram histogram.Histogram
|
||||
expectedMul histogram.FloatHistogram
|
||||
expectedDiv histogram.FloatHistogram
|
||||
}{
|
||||
{
|
||||
scalar: 3,
|
||||
histogram: originalHistogram,
|
||||
expectedMul: histogram.FloatHistogram{
|
||||
Schema: 0,
|
||||
Count: 63,
|
||||
Sum: 99,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 9,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []float64{9, 9, 9},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []float64{9, 9, 9},
|
||||
},
|
||||
expectedDiv: histogram.FloatHistogram{
|
||||
Schema: 0,
|
||||
Count: 7,
|
||||
Sum: 11,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 1,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []float64{1, 1, 1},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []float64{1, 1, 1},
|
||||
},
|
||||
},
|
||||
{
|
||||
scalar: 0,
|
||||
histogram: originalHistogram,
|
||||
expectedMul: histogram.FloatHistogram{
|
||||
Schema: 0,
|
||||
Count: 0,
|
||||
Sum: 0,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 0,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []float64{0, 0, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []float64{0, 0, 0},
|
||||
},
|
||||
expectedDiv: histogram.FloatHistogram{
|
||||
Schema: 0,
|
||||
Count: math.Inf(1),
|
||||
Sum: math.Inf(1),
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: math.Inf(1),
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
NegativeBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
idx0 := int64(0)
|
||||
for _, c := range cases {
|
||||
for _, floatHisto := range []bool{true, false} {
|
||||
t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) {
|
||||
storage := teststorage.New(t)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
|
||||
seriesName := "sparse_histogram_series"
|
||||
floatSeriesName := "float_series"
|
||||
|
||||
engine := newTestEngine(t)
|
||||
|
||||
ts := idx0 * int64(10*time.Minute/time.Millisecond)
|
||||
app := storage.Appender(context.Background())
|
||||
h := c.histogram
|
||||
lbls := labels.FromStrings("__name__", seriesName)
|
||||
// Since we mutate h later, we need to create a copy here.
|
||||
var err error
|
||||
if floatHisto {
|
||||
_, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil))
|
||||
} else {
|
||||
_, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
_, err = app.Append(0, labels.FromStrings("__name__", floatSeriesName), ts, c.scalar)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
queryAndCheck := func(queryString string, exp promql.Vector) {
|
||||
qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts))
|
||||
require.NoError(t, err)
|
||||
|
||||
res := qry.Exec(context.Background())
|
||||
require.NoError(t, res.Err)
|
||||
|
||||
vector, err := res.Vector()
|
||||
require.NoError(t, err)
|
||||
|
||||
testutil.RequireEqual(t, exp, vector)
|
||||
}
|
||||
|
||||
// histogram * scalar.
|
||||
queryString := fmt.Sprintf(`%s * %f`, seriesName, c.scalar)
|
||||
queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
|
||||
|
||||
// scalar * histogram.
|
||||
queryString = fmt.Sprintf(`%f * %s`, c.scalar, seriesName)
|
||||
queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
|
||||
|
||||
// histogram * float.
|
||||
queryString = fmt.Sprintf(`%s * %s`, seriesName, floatSeriesName)
|
||||
queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
|
||||
|
||||
// float * histogram.
|
||||
queryString = fmt.Sprintf(`%s * %s`, floatSeriesName, seriesName)
|
||||
queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}})
|
||||
|
||||
// histogram / scalar.
|
||||
queryString = fmt.Sprintf(`%s / %f`, seriesName, c.scalar)
|
||||
queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}})
|
||||
|
||||
// histogram / float.
|
||||
queryString = fmt.Sprintf(`%s / %s`, seriesName, floatSeriesName)
|
||||
queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}})
|
||||
})
|
||||
idx0++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryLookbackDelta(t *testing.T) {
|
||||
var (
|
||||
load = `load 5m
|
||||
|
|
|
@ -483,9 +483,13 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
|
|||
return enh.Out, nil
|
||||
}
|
||||
for _, el := range vec {
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
el.Metric = el.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: el.Metric.DropMetricName(),
|
||||
F: math.Max(minVal, math.Min(maxVal, el.F)),
|
||||
Metric: el.Metric,
|
||||
F: math.Max(minVal, math.Min(maxVal, el.F)),
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
|
@ -496,9 +500,13 @@ func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
|
|||
vec := vals[0].(Vector)
|
||||
maxVal := vals[1].(Vector)[0].F
|
||||
for _, el := range vec {
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
el.Metric = el.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: el.Metric.DropMetricName(),
|
||||
F: math.Min(maxVal, el.F),
|
||||
Metric: el.Metric,
|
||||
F: math.Min(maxVal, el.F),
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
|
@ -509,9 +517,13 @@ func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
|
|||
vec := vals[0].(Vector)
|
||||
minVal := vals[1].(Vector)[0].F
|
||||
for _, el := range vec {
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
el.Metric = el.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: el.Metric.DropMetricName(),
|
||||
F: math.Max(minVal, el.F),
|
||||
Metric: el.Metric,
|
||||
F: math.Max(minVal, el.F),
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
|
@ -532,8 +544,9 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
|
|||
for _, el := range vec {
|
||||
f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: el.Metric.DropMetricName(),
|
||||
F: f,
|
||||
Metric: el.Metric,
|
||||
F: f,
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
|
@ -882,9 +895,13 @@ func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *Eval
|
|||
func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector {
|
||||
for _, el := range vals[0].(Vector) {
|
||||
if el.H == nil { // Process only float samples.
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
el.Metric = el.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: el.Metric.DropMetricName(),
|
||||
F: f(el.F),
|
||||
Metric: el.Metric,
|
||||
F: f(el.F),
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1028,9 +1045,13 @@ func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper)
|
|||
func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
vec := vals[0].(Vector)
|
||||
for _, el := range vec {
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
el.Metric = el.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: el.Metric.DropMetricName(),
|
||||
F: float64(el.T) / 1000,
|
||||
Metric: el.Metric,
|
||||
F: float64(el.T) / 1000,
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
|
@ -1137,9 +1158,13 @@ func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalN
|
|||
if sample.H == nil {
|
||||
continue
|
||||
}
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
sample.Metric = sample.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: sample.Metric.DropMetricName(),
|
||||
F: sample.H.Count,
|
||||
Metric: sample.Metric,
|
||||
F: sample.H.Count,
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
|
@ -1154,9 +1179,13 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod
|
|||
if sample.H == nil {
|
||||
continue
|
||||
}
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
sample.Metric = sample.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: sample.Metric.DropMetricName(),
|
||||
F: sample.H.Sum,
|
||||
Metric: sample.Metric,
|
||||
F: sample.H.Sum,
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
|
@ -1171,9 +1200,13 @@ func funcHistogramAvg(vals []parser.Value, args parser.Expressions, enh *EvalNod
|
|||
if sample.H == nil {
|
||||
continue
|
||||
}
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
sample.Metric = sample.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: sample.Metric.DropMetricName(),
|
||||
F: sample.H.Sum / sample.H.Count,
|
||||
Metric: sample.Metric,
|
||||
F: sample.H.Sum / sample.H.Count,
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
|
@ -1210,9 +1243,13 @@ func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *Eval
|
|||
}
|
||||
variance += cVariance
|
||||
variance /= sample.H.Count
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
sample.Metric = sample.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: sample.Metric.DropMetricName(),
|
||||
F: math.Sqrt(variance),
|
||||
Metric: sample.Metric,
|
||||
F: math.Sqrt(variance),
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
|
@ -1249,9 +1286,13 @@ func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *Eval
|
|||
}
|
||||
variance += cVariance
|
||||
variance /= sample.H.Count
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
sample.Metric = sample.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: sample.Metric.DropMetricName(),
|
||||
F: variance,
|
||||
Metric: sample.Metric,
|
||||
F: variance,
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
|
@ -1268,9 +1309,13 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev
|
|||
if sample.H == nil {
|
||||
continue
|
||||
}
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
sample.Metric = sample.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: sample.Metric.DropMetricName(),
|
||||
F: histogramFraction(lower, upper, sample.H),
|
||||
Metric: sample.Metric,
|
||||
F: histogramFraction(lower, upper, sample.H),
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
|
@ -1338,9 +1383,13 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
|
|||
continue
|
||||
}
|
||||
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
sample.Metric = sample.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: sample.Metric.DropMetricName(),
|
||||
F: histogramQuantile(q, sample.H),
|
||||
Metric: sample.Metric,
|
||||
F: histogramQuantile(q, sample.H),
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1442,6 +1491,11 @@ func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, an
|
|||
lb.Reset(el.Metric)
|
||||
lb.Set(dst, string(res))
|
||||
matrix[i].Metric = lb.Labels()
|
||||
if dst == model.MetricNameLabel {
|
||||
matrix[i].DropName = false
|
||||
} else {
|
||||
matrix[i].DropName = el.DropName
|
||||
}
|
||||
}
|
||||
}
|
||||
if matrix.ContainsSameLabelset() {
|
||||
|
@ -1496,6 +1550,12 @@ func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annot
|
|||
lb.Reset(el.Metric)
|
||||
lb.Set(dst, strval)
|
||||
matrix[i].Metric = lb.Labels()
|
||||
|
||||
if dst == model.MetricNameLabel {
|
||||
matrix[i].DropName = false
|
||||
} else {
|
||||
matrix[i].DropName = el.DropName
|
||||
}
|
||||
}
|
||||
|
||||
return matrix, ws
|
||||
|
@ -1518,9 +1578,13 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo
|
|||
|
||||
for _, el := range vals[0].(Vector) {
|
||||
t := time.Unix(int64(el.F), 0).UTC()
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
el.Metric = el.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: el.Metric.DropMetricName(),
|
||||
F: f(t),
|
||||
Metric: el.Metric,
|
||||
F: f(t),
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
return enh.Out
|
||||
|
|
|
@ -617,6 +617,16 @@ func lexBuckets(l *Lexer) stateFn {
|
|||
l.bracketOpen = false
|
||||
l.emit(RIGHT_BRACKET)
|
||||
return lexHistogram
|
||||
case isAlpha(r):
|
||||
// Current word is Inf or NaN.
|
||||
word := l.input[l.start:l.pos]
|
||||
if desc, ok := key[strings.ToLower(word)]; ok {
|
||||
if desc == NUMBER {
|
||||
l.emit(desc)
|
||||
return lexStatements
|
||||
}
|
||||
}
|
||||
return lexBuckets
|
||||
default:
|
||||
return l.errorf("invalid character in buckets description: %q", r)
|
||||
}
|
||||
|
|
|
@ -639,6 +639,29 @@ var tests = []struct {
|
|||
},
|
||||
seriesDesc: true,
|
||||
},
|
||||
{
|
||||
input: `{} {{buckets: [Inf NaN] schema:1}}`,
|
||||
expected: []Item{
|
||||
{LEFT_BRACE, 0, `{`},
|
||||
{RIGHT_BRACE, 1, `}`},
|
||||
{SPACE, 2, ` `},
|
||||
{OPEN_HIST, 3, `{{`},
|
||||
{BUCKETS_DESC, 5, `buckets`},
|
||||
{COLON, 12, `:`},
|
||||
{SPACE, 13, ` `},
|
||||
{LEFT_BRACKET, 14, `[`},
|
||||
{NUMBER, 15, `Inf`},
|
||||
{SPACE, 18, ` `},
|
||||
{NUMBER, 19, `NaN`},
|
||||
{RIGHT_BRACKET, 22, `]`},
|
||||
{SPACE, 23, ` `},
|
||||
{SCHEMA_DESC, 24, `schema`},
|
||||
{COLON, 30, `:`},
|
||||
{NUMBER, 31, `1`},
|
||||
{CLOSE_HIST, 32, `}}`},
|
||||
},
|
||||
seriesDesc: true,
|
||||
},
|
||||
{ // Series with sum as -Inf and count as NaN.
|
||||
input: `{} {{buckets: [5 10 7] sum:Inf count:NaN}}`,
|
||||
expected: []Item{
|
||||
|
|
|
@ -91,6 +91,7 @@ func NewTestEngine(tb testing.TB, enablePerStepStats bool, lookbackDelta time.Du
|
|||
EnableNegativeOffset: true,
|
||||
EnablePerStepStats: enablePerStepStats,
|
||||
LookbackDelta: lookbackDelta,
|
||||
EnableDelayedNameRemoval: true,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1373,6 +1374,7 @@ func (ll *LazyLoader) clear() error {
|
|||
NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(ll.SubqueryInterval) },
|
||||
EnableAtModifier: ll.opts.EnableAtModifier,
|
||||
EnableNegativeOffset: ll.opts.EnableNegativeOffset,
|
||||
EnableDelayedNameRemoval: true,
|
||||
}
|
||||
|
||||
ll.queryEngine = promql.NewEngine(opts)
|
||||
|
|
84
promql/promqltest/testdata/name_label_dropping.test
vendored
Normal file
84
promql/promqltest/testdata/name_label_dropping.test
vendored
Normal file
|
@ -0,0 +1,84 @@
|
|||
# Test for __name__ label drop.
|
||||
load 5m
|
||||
metric{env="1"} 0 60 120
|
||||
another_metric{env="1"} 60 120 180
|
||||
|
||||
# Does not drop __name__ for vector selector
|
||||
eval instant at 15m metric{env="1"}
|
||||
metric{env="1"} 120
|
||||
|
||||
# Drops __name__ for unary operators
|
||||
eval instant at 15m -metric
|
||||
{env="1"} -120
|
||||
|
||||
# Drops __name__ for binary operators
|
||||
eval instant at 15m metric + another_metric
|
||||
{env="1"} 300
|
||||
|
||||
# Does not drop __name__ for binary comparison operators
|
||||
eval instant at 15m metric <= another_metric
|
||||
metric{env="1"} 120
|
||||
|
||||
# Drops __name__ for binary comparison operators with "bool" modifier
|
||||
eval instant at 15m metric <= bool another_metric
|
||||
{env="1"} 1
|
||||
|
||||
# Drops __name__ for vector-scalar operations
|
||||
eval instant at 15m metric * 2
|
||||
{env="1"} 240
|
||||
|
||||
# Drops __name__ for instant-vector functions
|
||||
eval instant at 15m clamp(metric, 0, 100)
|
||||
{env="1"} 100
|
||||
|
||||
# Drops __name__ for range-vector functions
|
||||
eval instant at 15m rate(metric{env="1"}[10m])
|
||||
{env="1"} 0.2
|
||||
|
||||
# Does not drop __name__ for last_over_time function
|
||||
eval instant at 15m last_over_time(metric{env="1"}[10m])
|
||||
metric{env="1"} 120
|
||||
|
||||
# Drops name for other _over_time functions
|
||||
eval instant at 15m max_over_time(metric{env="1"}[10m])
|
||||
{env="1"} 120
|
||||
|
||||
# Allows relabeling (to-be-dropped) __name__ via label_replace
|
||||
eval instant at 15m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)")
|
||||
{my_name="rate_metric", env="1"} 0.2
|
||||
{my_name="rate_another_metric", env="1"} 0.2
|
||||
|
||||
# Allows preserving __name__ via label_replace
|
||||
eval instant at 15m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)")
|
||||
rate_metric{env="1"} 0.2
|
||||
rate_another_metric{env="1"} 0.2
|
||||
|
||||
# Allows relabeling (to-be-dropped) __name__ via label_join
|
||||
eval instant at 15m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__")
|
||||
{my_name="metric", env="1"} 0.2
|
||||
{my_name="another_metric", env="1"} 0.2
|
||||
|
||||
# Allows preserving __name__ via label_join
|
||||
eval instant at 15m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env")
|
||||
metric_1{env="1"} 0.2
|
||||
another_metric_1{env="1"} 0.2
|
||||
|
||||
# Does not drop metric names fro aggregation operators
|
||||
eval instant at 15m sum by (__name__, env) (metric{env="1"})
|
||||
metric{env="1"} 120
|
||||
|
||||
# Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label)
|
||||
# This is an accidental side effect of delayed __name__ label dropping
|
||||
eval_fail instant at 15m sum by (__name__) (rate({env="1"}[10m]))
|
||||
|
||||
# Aggregation operators aggregate metrics with same labelset and to-be-dropped names
|
||||
# This is an accidental side effect of delayed __name__ label dropping
|
||||
eval instant at 15m sum(rate({env="1"}[10m])) by (env)
|
||||
{env="1"} 0.4
|
||||
|
||||
# Aggregationk operators propagate __name__ label dropping information
|
||||
eval instant at 15m topk(10, sum by (__name__, env) (metric{env="1"}))
|
||||
metric{env="1"} 120
|
||||
|
||||
eval instant at 15m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m])))
|
||||
{env="1"} 0.2
|
|
@ -718,6 +718,52 @@ eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_4)
|
|||
eval instant at 10m histogram_sum(scalar(histogram_fraction(-Inf, +Inf, sum(histogram_fraction_4))) * histogram_fraction_4)
|
||||
{} 100
|
||||
|
||||
# Apply multiplication and division operator to histogram.
|
||||
load 10m
|
||||
histogram_mul_div {{schema:0 count:21 sum:33 z_bucket:3 z_bucket_w:0.001 buckets:[3 3 3] n_buckets:[3 3 3]}}x1
|
||||
float_series_3 3+0x1
|
||||
float_series_0 0+0x1
|
||||
|
||||
eval instant at 10m histogram_mul_div*3
|
||||
{} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}}
|
||||
|
||||
eval instant at 10m 3*histogram_mul_div
|
||||
{} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div*float_series_3
|
||||
{} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}}
|
||||
|
||||
eval instant at 10m float_series_3*histogram_mul_div
|
||||
{} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div/3
|
||||
{} {{schema:0 count:7 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[1 1 1]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div/float_series_3
|
||||
{} {{schema:0 count:7 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[1 1 1]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div*0
|
||||
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
|
||||
|
||||
eval instant at 10m 0*histogram_mul_div
|
||||
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div*float_series_0
|
||||
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
|
||||
|
||||
eval instant at 10m float_series_0*histogram_mul_div
|
||||
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
|
||||
|
||||
# TODO: (NeerajGartia21) remove all the histogram buckets in case of division with zero. See: https://github.com/prometheus/prometheus/issues/13934
|
||||
eval instant at 10m histogram_mul_div/0
|
||||
{} {{schema:0 count:Inf sum:Inf z_bucket:Inf z_bucket_w:0.001 buckets:[Inf Inf Inf] n_buckets:[Inf Inf Inf]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div/float_series_0
|
||||
{} {{schema:0 count:Inf sum:Inf z_bucket:Inf z_bucket_w:0.001 buckets:[Inf Inf Inf] n_buckets:[Inf Inf Inf]}}
|
||||
|
||||
eval instant at 10m histogram_mul_div*0/0
|
||||
{} {{schema:0 count:NaN sum:NaN z_bucket:NaN z_bucket_w:0.001 buckets:[NaN NaN NaN] n_buckets:[NaN NaN NaN]}}
|
||||
|
||||
clear
|
||||
|
||||
# Counter reset only noticeable in a single bucket.
|
||||
|
|
|
@ -68,6 +68,9 @@ type Series struct {
|
|||
Metric labels.Labels `json:"metric"`
|
||||
Floats []FPoint `json:"values,omitempty"`
|
||||
Histograms []HPoint `json:"histograms,omitempty"`
|
||||
// DropName is used to indicate whether the __name__ label should be dropped
|
||||
// as part of the query evaluation.
|
||||
DropName bool `json:"-"`
|
||||
}
|
||||
|
||||
func (s Series) String() string {
|
||||
|
@ -194,6 +197,9 @@ type Sample struct {
|
|||
H *histogram.FloatHistogram
|
||||
|
||||
Metric labels.Labels
|
||||
// DropName is used to indicate whether the __name__ label should be dropped
|
||||
// as part of the query evaluation.
|
||||
DropName bool
|
||||
}
|
||||
|
||||
func (s Sample) String() string {
|
||||
|
|
|
@ -724,8 +724,6 @@ func TestManagerCTZeroIngestion(t *testing.T) {
|
|||
name string
|
||||
counterSample *dto.Counter
|
||||
enableCTZeroIngestion bool
|
||||
|
||||
expectedValues []float64
|
||||
}{
|
||||
{
|
||||
name: "disabled with CT on counter",
|
||||
|
@ -734,7 +732,6 @@ func TestManagerCTZeroIngestion(t *testing.T) {
|
|||
// Timestamp does not matter as long as it exists in this test.
|
||||
CreatedTimestamp: timestamppb.Now(),
|
||||
},
|
||||
expectedValues: []float64{1.0},
|
||||
},
|
||||
{
|
||||
name: "enabled with CT on counter",
|
||||
|
@ -744,7 +741,6 @@ func TestManagerCTZeroIngestion(t *testing.T) {
|
|||
CreatedTimestamp: timestamppb.Now(),
|
||||
},
|
||||
enableCTZeroIngestion: true,
|
||||
expectedValues: []float64{0.0, 1.0},
|
||||
},
|
||||
{
|
||||
name: "enabled without CT on counter",
|
||||
|
@ -752,7 +748,6 @@ func TestManagerCTZeroIngestion(t *testing.T) {
|
|||
Value: proto.Float64(1.0),
|
||||
},
|
||||
enableCTZeroIngestion: true,
|
||||
expectedValues: []float64{1.0},
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
@ -819,46 +814,44 @@ func TestManagerCTZeroIngestion(t *testing.T) {
|
|||
})
|
||||
scrapeManager.reload()
|
||||
|
||||
var got []float64
|
||||
// Wait for one scrape.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
||||
defer cancel()
|
||||
require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error {
|
||||
if countFloatSamples(app, mName) != len(tc.expectedValues) {
|
||||
return fmt.Errorf("expected %v samples", tc.expectedValues)
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
// Check if scrape happened and grab the relevant samples, they have to be there - or it's a bug
|
||||
// and it's not worth waiting.
|
||||
for _, f := range app.resultFloats {
|
||||
if f.metric.Get(model.MetricNameLabel) == mName {
|
||||
got = append(got, f.f)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
if len(app.resultFloats) > 0 {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("expected some samples, got none")
|
||||
}), "after 1 minute")
|
||||
scrapeManager.Stop()
|
||||
|
||||
require.Equal(t, tc.expectedValues, getResultFloats(app, mName))
|
||||
// Check for zero samples, assuming we only injected always one sample.
|
||||
// Did it contain CT to inject? If yes, was CT zero enabled?
|
||||
if tc.counterSample.CreatedTimestamp.IsValid() && tc.enableCTZeroIngestion {
|
||||
require.Len(t, got, 2)
|
||||
require.Equal(t, 0.0, got[0])
|
||||
require.Equal(t, tc.counterSample.GetValue(), got[1])
|
||||
return
|
||||
}
|
||||
|
||||
// Expect only one, valid sample.
|
||||
require.Len(t, got, 1)
|
||||
require.Equal(t, tc.counterSample.GetValue(), got[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func countFloatSamples(a *collectResultAppender, expectedMetricName string) (count int) {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
for _, f := range a.resultFloats {
|
||||
if f.metric.Get(model.MetricNameLabel) == expectedMetricName {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func getResultFloats(app *collectResultAppender, expectedMetricName string) (result []float64) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
for _, f := range app.resultFloats {
|
||||
if f.metric.Get(model.MetricNameLabel) == expectedMetricName {
|
||||
result = append(result, f.f)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func TestUnregisterMetrics(t *testing.T) {
|
||||
reg := prometheus.NewRegistry()
|
||||
// Check that all metrics can be unregistered, allowing a second manager to be created.
|
||||
|
@ -1185,7 +1178,7 @@ scrape_configs:
|
|||
)
|
||||
}
|
||||
|
||||
// TestOnlyStaleTargetsAreDropped makes sure that when a job has multiple providers, when aone of them should no,
|
||||
// TestOnlyStaleTargetsAreDropped makes sure that when a job has multiple providers, when one of them should no
|
||||
// longer discover targets, only the stale targets of that provier are dropped.
|
||||
func TestOnlyStaleTargetsAreDropped(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
|
|
@ -37,7 +37,7 @@ if [ -z "${GITHUB_TOKEN}" ]; then
|
|||
fi
|
||||
|
||||
# List of files that should be synced.
|
||||
SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint scripts/golangci-lint.yml .github/workflows/scorecards.yml .github/workflows/container_description.yml"
|
||||
SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint scripts/golangci-lint.yml .github/workflows/scorecards.yml .github/workflows/container_description.yml .github/workflows/stale.yml"
|
||||
|
||||
# Go to the root of the repo
|
||||
cd "$(git rev-parse --show-cdup)" || exit 1
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
|
@ -594,5 +593,5 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta
|
|||
|
||||
// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms
|
||||
func convertTimeStamp(timestamp pcommon.Timestamp) int64 {
|
||||
return timestamp.AsTime().UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond))
|
||||
return int64(timestamp) / 1_000_000
|
||||
}
|
||||
|
|
|
@ -10,13 +10,21 @@
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/debbf30360b8d3a0ded8db09c4419d2a9c99b94a/pkg/translator/prometheusremotewrite/helper_test.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||
|
||||
package prometheusremotewrite
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
)
|
||||
|
@ -159,3 +167,239 @@ func TestCreateAttributes(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_convertTimeStamp(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
arg pcommon.Timestamp
|
||||
want int64
|
||||
}{
|
||||
{"zero", 0, 0},
|
||||
{"1ms", 1_000_000, 1},
|
||||
{"1s", pcommon.Timestamp(time.Unix(1, 0).UnixNano()), 1000},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := convertTimeStamp(tt.arg)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
|
||||
ts := pcommon.Timestamp(time.Now().UnixNano())
|
||||
tests := []struct {
|
||||
name string
|
||||
metric func() pmetric.Metric
|
||||
want func() map[uint64]*prompb.TimeSeries
|
||||
}{
|
||||
{
|
||||
name: "summary with start time",
|
||||
metric: func() pmetric.Metric {
|
||||
metric := pmetric.NewMetric()
|
||||
metric.SetName("test_summary")
|
||||
metric.SetEmptySummary()
|
||||
|
||||
dp := metric.Summary().DataPoints().AppendEmpty()
|
||||
dp.SetTimestamp(ts)
|
||||
dp.SetStartTimestamp(ts)
|
||||
|
||||
return metric
|
||||
},
|
||||
want: func() map[uint64]*prompb.TimeSeries {
|
||||
labels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test_summary" + countStr},
|
||||
}
|
||||
createdLabels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test_summary" + createdSuffix},
|
||||
}
|
||||
sumLabels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test_summary" + sumStr},
|
||||
}
|
||||
return map[uint64]*prompb.TimeSeries{
|
||||
timeSeriesSignature(labels): {
|
||||
Labels: labels,
|
||||
Samples: []prompb.Sample{
|
||||
{Value: 0, Timestamp: convertTimeStamp(ts)},
|
||||
},
|
||||
},
|
||||
timeSeriesSignature(sumLabels): {
|
||||
Labels: sumLabels,
|
||||
Samples: []prompb.Sample{
|
||||
{Value: 0, Timestamp: convertTimeStamp(ts)},
|
||||
},
|
||||
},
|
||||
timeSeriesSignature(createdLabels): {
|
||||
Labels: createdLabels,
|
||||
Samples: []prompb.Sample{
|
||||
{Value: float64(convertTimeStamp(ts)), Timestamp: convertTimeStamp(ts)},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "summary without start time",
|
||||
metric: func() pmetric.Metric {
|
||||
metric := pmetric.NewMetric()
|
||||
metric.SetName("test_summary")
|
||||
metric.SetEmptySummary()
|
||||
|
||||
dp := metric.Summary().DataPoints().AppendEmpty()
|
||||
dp.SetTimestamp(ts)
|
||||
|
||||
return metric
|
||||
},
|
||||
want: func() map[uint64]*prompb.TimeSeries {
|
||||
labels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test_summary" + countStr},
|
||||
}
|
||||
sumLabels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test_summary" + sumStr},
|
||||
}
|
||||
return map[uint64]*prompb.TimeSeries{
|
||||
timeSeriesSignature(labels): {
|
||||
Labels: labels,
|
||||
Samples: []prompb.Sample{
|
||||
{Value: 0, Timestamp: convertTimeStamp(ts)},
|
||||
},
|
||||
},
|
||||
timeSeriesSignature(sumLabels): {
|
||||
Labels: sumLabels,
|
||||
Samples: []prompb.Sample{
|
||||
{Value: 0, Timestamp: convertTimeStamp(ts)},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
metric := tt.metric()
|
||||
converter := NewPrometheusConverter()
|
||||
|
||||
converter.addSummaryDataPoints(
|
||||
metric.Summary().DataPoints(),
|
||||
pcommon.NewResource(),
|
||||
Settings{
|
||||
ExportCreatedMetric: true,
|
||||
},
|
||||
metric.Name(),
|
||||
)
|
||||
|
||||
assert.Equal(t, tt.want(), converter.unique)
|
||||
assert.Empty(t, converter.conflicts)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
|
||||
ts := pcommon.Timestamp(time.Now().UnixNano())
|
||||
tests := []struct {
|
||||
name string
|
||||
metric func() pmetric.Metric
|
||||
want func() map[uint64]*prompb.TimeSeries
|
||||
}{
|
||||
{
|
||||
name: "histogram with start time",
|
||||
metric: func() pmetric.Metric {
|
||||
metric := pmetric.NewMetric()
|
||||
metric.SetName("test_hist")
|
||||
metric.SetEmptyHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
|
||||
|
||||
pt := metric.Histogram().DataPoints().AppendEmpty()
|
||||
pt.SetTimestamp(ts)
|
||||
pt.SetStartTimestamp(ts)
|
||||
|
||||
return metric
|
||||
},
|
||||
want: func() map[uint64]*prompb.TimeSeries {
|
||||
labels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test_hist" + countStr},
|
||||
}
|
||||
createdLabels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test_hist" + createdSuffix},
|
||||
}
|
||||
infLabels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test_hist_bucket"},
|
||||
{Name: model.BucketLabel, Value: "+Inf"},
|
||||
}
|
||||
return map[uint64]*prompb.TimeSeries{
|
||||
timeSeriesSignature(infLabels): {
|
||||
Labels: infLabels,
|
||||
Samples: []prompb.Sample{
|
||||
{Value: 0, Timestamp: convertTimeStamp(ts)},
|
||||
},
|
||||
},
|
||||
timeSeriesSignature(labels): {
|
||||
Labels: labels,
|
||||
Samples: []prompb.Sample{
|
||||
{Value: 0, Timestamp: convertTimeStamp(ts)},
|
||||
},
|
||||
},
|
||||
timeSeriesSignature(createdLabels): {
|
||||
Labels: createdLabels,
|
||||
Samples: []prompb.Sample{
|
||||
{Value: float64(convertTimeStamp(ts)), Timestamp: convertTimeStamp(ts)},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "histogram without start time",
|
||||
metric: func() pmetric.Metric {
|
||||
metric := pmetric.NewMetric()
|
||||
metric.SetName("test_hist")
|
||||
metric.SetEmptyHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
|
||||
|
||||
pt := metric.Histogram().DataPoints().AppendEmpty()
|
||||
pt.SetTimestamp(ts)
|
||||
|
||||
return metric
|
||||
},
|
||||
want: func() map[uint64]*prompb.TimeSeries {
|
||||
labels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test_hist" + countStr},
|
||||
}
|
||||
infLabels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test_hist_bucket"},
|
||||
{Name: model.BucketLabel, Value: "+Inf"},
|
||||
}
|
||||
return map[uint64]*prompb.TimeSeries{
|
||||
timeSeriesSignature(infLabels): {
|
||||
Labels: infLabels,
|
||||
Samples: []prompb.Sample{
|
||||
{Value: 0, Timestamp: convertTimeStamp(ts)},
|
||||
},
|
||||
},
|
||||
timeSeriesSignature(labels): {
|
||||
Labels: labels,
|
||||
Samples: []prompb.Sample{
|
||||
{Value: 0, Timestamp: convertTimeStamp(ts)},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
metric := tt.metric()
|
||||
converter := NewPrometheusConverter()
|
||||
|
||||
converter.addHistogramDataPoints(
|
||||
metric.Histogram().DataPoints(),
|
||||
pcommon.NewResource(),
|
||||
Settings{
|
||||
ExportCreatedMetric: true,
|
||||
},
|
||||
metric.Name(),
|
||||
)
|
||||
|
||||
assert.Equal(t, tt.want(), converter.unique)
|
||||
assert.Empty(t, converter.conflicts)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,771 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/247a9f996e09a83cdc25addf70c05e42b8b30186/pkg/translator/prometheusremotewrite/histograms_test.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||
|
||||
package prometheusremotewrite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||
|
||||
prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
|
||||
)
|
||||
|
||||
type expectedBucketLayout struct {
|
||||
wantSpans []prompb.BucketSpan
|
||||
wantDeltas []int64
|
||||
}
|
||||
|
||||
func TestConvertBucketsLayout(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
buckets func() pmetric.ExponentialHistogramDataPointBuckets
|
||||
wantLayout map[int32]expectedBucketLayout
|
||||
}{
|
||||
{
|
||||
name: "zero offset",
|
||||
buckets: func() pmetric.ExponentialHistogramDataPointBuckets {
|
||||
b := pmetric.NewExponentialHistogramDataPointBuckets()
|
||||
b.SetOffset(0)
|
||||
b.BucketCounts().FromRaw([]uint64{4, 3, 2, 1})
|
||||
return b
|
||||
},
|
||||
wantLayout: map[int32]expectedBucketLayout{
|
||||
0: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 1,
|
||||
Length: 4,
|
||||
},
|
||||
},
|
||||
wantDeltas: []int64{4, -1, -1, -1},
|
||||
},
|
||||
1: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 1,
|
||||
Length: 2,
|
||||
},
|
||||
},
|
||||
// 4+3, 2+1 = 7, 3 =delta= 7, -4
|
||||
wantDeltas: []int64{7, -4},
|
||||
},
|
||||
2: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 1,
|
||||
Length: 1,
|
||||
},
|
||||
},
|
||||
// 4+3+2+1 = 10 =delta= 10
|
||||
wantDeltas: []int64{10},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "offset 1",
|
||||
buckets: func() pmetric.ExponentialHistogramDataPointBuckets {
|
||||
b := pmetric.NewExponentialHistogramDataPointBuckets()
|
||||
b.SetOffset(1)
|
||||
b.BucketCounts().FromRaw([]uint64{4, 3, 2, 1})
|
||||
return b
|
||||
},
|
||||
wantLayout: map[int32]expectedBucketLayout{
|
||||
0: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 2,
|
||||
Length: 4,
|
||||
},
|
||||
},
|
||||
wantDeltas: []int64{4, -1, -1, -1},
|
||||
},
|
||||
1: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 1,
|
||||
Length: 3,
|
||||
},
|
||||
},
|
||||
wantDeltas: []int64{4, 1, -4}, // 0+4, 3+2, 1+0 = 4, 5, 1
|
||||
},
|
||||
2: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 1,
|
||||
Length: 2,
|
||||
},
|
||||
},
|
||||
wantDeltas: []int64{9, -8}, // 0+4+3+2, 1+0+0+0 = 9, 1
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "positive offset",
|
||||
buckets: func() pmetric.ExponentialHistogramDataPointBuckets {
|
||||
b := pmetric.NewExponentialHistogramDataPointBuckets()
|
||||
b.SetOffset(4)
|
||||
b.BucketCounts().FromRaw([]uint64{4, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1})
|
||||
return b
|
||||
},
|
||||
wantLayout: map[int32]expectedBucketLayout{
|
||||
0: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 5,
|
||||
Length: 4,
|
||||
},
|
||||
{
|
||||
Offset: 12,
|
||||
Length: 1,
|
||||
},
|
||||
},
|
||||
wantDeltas: []int64{4, -2, -2, 2, -1},
|
||||
},
|
||||
1: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 3,
|
||||
Length: 2,
|
||||
},
|
||||
{
|
||||
Offset: 6,
|
||||
Length: 1,
|
||||
},
|
||||
},
|
||||
// Downscale:
|
||||
// 4+2, 0+2, 0+0, 0+0, 0+0, 0+0, 0+0, 0+0, 1+0 = 6, 2, 0, 0, 0, 0, 0, 0, 1
|
||||
wantDeltas: []int64{6, -4, -1},
|
||||
},
|
||||
2: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 2,
|
||||
Length: 1,
|
||||
},
|
||||
{
|
||||
Offset: 3,
|
||||
Length: 1,
|
||||
},
|
||||
},
|
||||
// Downscale:
|
||||
// 4+2+0+2, 0+0+0+0, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 0, 1
|
||||
// Check from sclaing from previous: 6+2, 0+0, 0+0, 0+0, 1+0 = 8, 0, 0, 0, 1
|
||||
wantDeltas: []int64{8, -7},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "scaledown merges spans",
|
||||
buckets: func() pmetric.ExponentialHistogramDataPointBuckets {
|
||||
b := pmetric.NewExponentialHistogramDataPointBuckets()
|
||||
b.SetOffset(4)
|
||||
b.BucketCounts().FromRaw([]uint64{4, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1})
|
||||
return b
|
||||
},
|
||||
wantLayout: map[int32]expectedBucketLayout{
|
||||
0: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 5,
|
||||
Length: 4,
|
||||
},
|
||||
{
|
||||
Offset: 8,
|
||||
Length: 1,
|
||||
},
|
||||
},
|
||||
wantDeltas: []int64{4, -2, -2, 2, -1},
|
||||
},
|
||||
1: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 3,
|
||||
Length: 2,
|
||||
},
|
||||
{
|
||||
Offset: 4,
|
||||
Length: 1,
|
||||
},
|
||||
},
|
||||
// Downscale:
|
||||
// 4+2, 0+2, 0+0, 0+0, 0+0, 0+0, 1+0 = 6, 2, 0, 0, 0, 0, 1
|
||||
wantDeltas: []int64{6, -4, -1},
|
||||
},
|
||||
2: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 2,
|
||||
Length: 4,
|
||||
},
|
||||
},
|
||||
// Downscale:
|
||||
// 4+2+0+2, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 1
|
||||
// Check from sclaing from previous: 6+2, 0+0, 0+0, 1+0 = 8, 0, 0, 1
|
||||
wantDeltas: []int64{8, -8, 0, 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "negative offset",
|
||||
buckets: func() pmetric.ExponentialHistogramDataPointBuckets {
|
||||
b := pmetric.NewExponentialHistogramDataPointBuckets()
|
||||
b.SetOffset(-2)
|
||||
b.BucketCounts().FromRaw([]uint64{3, 1, 0, 0, 0, 1})
|
||||
return b
|
||||
},
|
||||
wantLayout: map[int32]expectedBucketLayout{
|
||||
0: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: -1,
|
||||
Length: 2,
|
||||
},
|
||||
{
|
||||
Offset: 3,
|
||||
Length: 1,
|
||||
},
|
||||
},
|
||||
wantDeltas: []int64{3, -2, 0},
|
||||
},
|
||||
1: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 3,
|
||||
},
|
||||
},
|
||||
// Downscale:
|
||||
// 3+1, 0+0, 0+1 = 4, 0, 1
|
||||
wantDeltas: []int64{4, -4, 1},
|
||||
},
|
||||
2: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 2,
|
||||
},
|
||||
},
|
||||
// Downscale:
|
||||
// 0+0+3+1, 0+0+0+0 = 4, 1
|
||||
wantDeltas: []int64{4, -3},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "buckets with gaps of size 1",
|
||||
buckets: func() pmetric.ExponentialHistogramDataPointBuckets {
|
||||
b := pmetric.NewExponentialHistogramDataPointBuckets()
|
||||
b.SetOffset(-2)
|
||||
b.BucketCounts().FromRaw([]uint64{3, 1, 0, 1, 0, 1})
|
||||
return b
|
||||
},
|
||||
wantLayout: map[int32]expectedBucketLayout{
|
||||
0: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: -1,
|
||||
Length: 6,
|
||||
},
|
||||
},
|
||||
wantDeltas: []int64{3, -2, -1, 1, -1, 1},
|
||||
},
|
||||
1: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 3,
|
||||
},
|
||||
},
|
||||
// Downscale:
|
||||
// 3+1, 0+1, 0+1 = 4, 1, 1
|
||||
wantDeltas: []int64{4, -3, 0},
|
||||
},
|
||||
2: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 2,
|
||||
},
|
||||
},
|
||||
// Downscale:
|
||||
// 0+0+3+1, 0+1+0+1 = 4, 2
|
||||
wantDeltas: []int64{4, -2},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "buckets with gaps of size 2",
|
||||
buckets: func() pmetric.ExponentialHistogramDataPointBuckets {
|
||||
b := pmetric.NewExponentialHistogramDataPointBuckets()
|
||||
b.SetOffset(-2)
|
||||
b.BucketCounts().FromRaw([]uint64{3, 0, 0, 1, 0, 0, 1})
|
||||
return b
|
||||
},
|
||||
wantLayout: map[int32]expectedBucketLayout{
|
||||
0: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: -1,
|
||||
Length: 7,
|
||||
},
|
||||
},
|
||||
wantDeltas: []int64{3, -3, 0, 1, -1, 0, 1},
|
||||
},
|
||||
1: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 4,
|
||||
},
|
||||
},
|
||||
// Downscale:
|
||||
// 3+0, 0+1, 0+0, 0+1 = 3, 1, 0, 1
|
||||
wantDeltas: []int64{3, -2, -1, 1},
|
||||
},
|
||||
2: {
|
||||
wantSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 3,
|
||||
},
|
||||
},
|
||||
// Downscale:
|
||||
// 0+0+3+0, 0+1+0+0, 1+0+0+0 = 3, 1, 1
|
||||
wantDeltas: []int64{3, -2, 0},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "zero buckets",
|
||||
buckets: pmetric.NewExponentialHistogramDataPointBuckets,
|
||||
wantLayout: map[int32]expectedBucketLayout{
|
||||
0: {
|
||||
wantSpans: nil,
|
||||
wantDeltas: nil,
|
||||
},
|
||||
1: {
|
||||
wantSpans: nil,
|
||||
wantDeltas: nil,
|
||||
},
|
||||
2: {
|
||||
wantSpans: nil,
|
||||
wantDeltas: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
for scaleDown, wantLayout := range tt.wantLayout {
|
||||
t.Run(fmt.Sprintf("%s-scaleby-%d", tt.name, scaleDown), func(t *testing.T) {
|
||||
gotSpans, gotDeltas := convertBucketsLayout(tt.buckets(), scaleDown)
|
||||
assert.Equal(t, wantLayout.wantSpans, gotSpans)
|
||||
assert.Equal(t, wantLayout.wantDeltas, gotDeltas)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkConvertBucketLayout(b *testing.B) {
|
||||
scenarios := []struct {
|
||||
gap int
|
||||
}{
|
||||
{gap: 0},
|
||||
{gap: 1},
|
||||
{gap: 2},
|
||||
{gap: 3},
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
buckets := pmetric.NewExponentialHistogramDataPointBuckets()
|
||||
buckets.SetOffset(0)
|
||||
for i := 0; i < 1000; i++ {
|
||||
if i%(scenario.gap+1) == 0 {
|
||||
buckets.BucketCounts().Append(10)
|
||||
} else {
|
||||
buckets.BucketCounts().Append(0)
|
||||
}
|
||||
}
|
||||
b.Run(fmt.Sprintf("gap %d", scenario.gap), func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
convertBucketsLayout(buckets, 0)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExponentialToNativeHistogram(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
exponentialHist func() pmetric.ExponentialHistogramDataPoint
|
||||
wantNativeHist func() prompb.Histogram
|
||||
wantErrMessage string
|
||||
}{
|
||||
{
|
||||
name: "convert exp. to native histogram",
|
||||
exponentialHist: func() pmetric.ExponentialHistogramDataPoint {
|
||||
pt := pmetric.NewExponentialHistogramDataPoint()
|
||||
pt.SetStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(100)))
|
||||
pt.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(500)))
|
||||
pt.SetCount(4)
|
||||
pt.SetSum(10.1)
|
||||
pt.SetScale(1)
|
||||
pt.SetZeroCount(1)
|
||||
|
||||
pt.Positive().BucketCounts().FromRaw([]uint64{1, 1})
|
||||
pt.Positive().SetOffset(1)
|
||||
|
||||
pt.Negative().BucketCounts().FromRaw([]uint64{1, 1})
|
||||
pt.Negative().SetOffset(1)
|
||||
|
||||
return pt
|
||||
},
|
||||
wantNativeHist: func() prompb.Histogram {
|
||||
return prompb.Histogram{
|
||||
Count: &prompb.Histogram_CountInt{CountInt: 4},
|
||||
Sum: 10.1,
|
||||
Schema: 1,
|
||||
ZeroThreshold: defaultZeroThreshold,
|
||||
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 1},
|
||||
NegativeSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}},
|
||||
NegativeDeltas: []int64{1, 0},
|
||||
PositiveSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}},
|
||||
PositiveDeltas: []int64{1, 0},
|
||||
Timestamp: 500,
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "convert exp. to native histogram with no sum",
|
||||
exponentialHist: func() pmetric.ExponentialHistogramDataPoint {
|
||||
pt := pmetric.NewExponentialHistogramDataPoint()
|
||||
pt.SetStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(100)))
|
||||
pt.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(500)))
|
||||
|
||||
pt.SetCount(4)
|
||||
pt.SetScale(1)
|
||||
pt.SetZeroCount(1)
|
||||
|
||||
pt.Positive().BucketCounts().FromRaw([]uint64{1, 1})
|
||||
pt.Positive().SetOffset(1)
|
||||
|
||||
pt.Negative().BucketCounts().FromRaw([]uint64{1, 1})
|
||||
pt.Negative().SetOffset(1)
|
||||
|
||||
return pt
|
||||
},
|
||||
wantNativeHist: func() prompb.Histogram {
|
||||
return prompb.Histogram{
|
||||
Count: &prompb.Histogram_CountInt{CountInt: 4},
|
||||
Schema: 1,
|
||||
ZeroThreshold: defaultZeroThreshold,
|
||||
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 1},
|
||||
NegativeSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}},
|
||||
NegativeDeltas: []int64{1, 0},
|
||||
PositiveSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}},
|
||||
PositiveDeltas: []int64{1, 0},
|
||||
Timestamp: 500,
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid negative scale",
|
||||
exponentialHist: func() pmetric.ExponentialHistogramDataPoint {
|
||||
pt := pmetric.NewExponentialHistogramDataPoint()
|
||||
pt.SetScale(-10)
|
||||
return pt
|
||||
},
|
||||
wantErrMessage: "cannot convert exponential to native histogram." +
|
||||
" Scale must be >= -4, was -10",
|
||||
},
|
||||
{
|
||||
name: "no downscaling at scale 8",
|
||||
exponentialHist: func() pmetric.ExponentialHistogramDataPoint {
|
||||
pt := pmetric.NewExponentialHistogramDataPoint()
|
||||
pt.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(500)))
|
||||
pt.SetCount(6)
|
||||
pt.SetSum(10.1)
|
||||
pt.SetScale(8)
|
||||
pt.SetZeroCount(1)
|
||||
|
||||
pt.Positive().BucketCounts().FromRaw([]uint64{1, 1, 1})
|
||||
pt.Positive().SetOffset(1)
|
||||
|
||||
pt.Negative().BucketCounts().FromRaw([]uint64{1, 1, 1})
|
||||
pt.Negative().SetOffset(2)
|
||||
return pt
|
||||
},
|
||||
wantNativeHist: func() prompb.Histogram {
|
||||
return prompb.Histogram{
|
||||
Count: &prompb.Histogram_CountInt{CountInt: 6},
|
||||
Sum: 10.1,
|
||||
Schema: 8,
|
||||
ZeroThreshold: defaultZeroThreshold,
|
||||
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 1},
|
||||
PositiveSpans: []prompb.BucketSpan{{Offset: 2, Length: 3}},
|
||||
PositiveDeltas: []int64{1, 0, 0}, // 1, 1, 1
|
||||
NegativeSpans: []prompb.BucketSpan{{Offset: 3, Length: 3}},
|
||||
NegativeDeltas: []int64{1, 0, 0}, // 1, 1, 1
|
||||
Timestamp: 500,
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "downsample if scale is more than 8",
|
||||
exponentialHist: func() pmetric.ExponentialHistogramDataPoint {
|
||||
pt := pmetric.NewExponentialHistogramDataPoint()
|
||||
pt.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(500)))
|
||||
pt.SetCount(6)
|
||||
pt.SetSum(10.1)
|
||||
pt.SetScale(9)
|
||||
pt.SetZeroCount(1)
|
||||
|
||||
pt.Positive().BucketCounts().FromRaw([]uint64{1, 1, 1})
|
||||
pt.Positive().SetOffset(1)
|
||||
|
||||
pt.Negative().BucketCounts().FromRaw([]uint64{1, 1, 1})
|
||||
pt.Negative().SetOffset(2)
|
||||
return pt
|
||||
},
|
||||
wantNativeHist: func() prompb.Histogram {
|
||||
return prompb.Histogram{
|
||||
Count: &prompb.Histogram_CountInt{CountInt: 6},
|
||||
Sum: 10.1,
|
||||
Schema: 8,
|
||||
ZeroThreshold: defaultZeroThreshold,
|
||||
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 1},
|
||||
PositiveSpans: []prompb.BucketSpan{{Offset: 1, Length: 2}},
|
||||
PositiveDeltas: []int64{1, 1}, // 0+1, 1+1 = 1, 2
|
||||
NegativeSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}},
|
||||
NegativeDeltas: []int64{2, -1}, // 1+1, 1+0 = 2, 1
|
||||
Timestamp: 500,
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
validateExponentialHistogramCount(t, tt.exponentialHist()) // Sanity check.
|
||||
got, annots, err := exponentialToNativeHistogram(tt.exponentialHist())
|
||||
if tt.wantErrMessage != "" {
|
||||
assert.ErrorContains(t, err, tt.wantErrMessage)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, annots)
|
||||
assert.Equal(t, tt.wantNativeHist(), got)
|
||||
validateNativeHistogramCount(t, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func validateExponentialHistogramCount(t *testing.T, h pmetric.ExponentialHistogramDataPoint) {
|
||||
actualCount := uint64(0)
|
||||
for _, bucket := range h.Positive().BucketCounts().AsRaw() {
|
||||
actualCount += bucket
|
||||
}
|
||||
for _, bucket := range h.Negative().BucketCounts().AsRaw() {
|
||||
actualCount += bucket
|
||||
}
|
||||
require.Equal(t, h.Count(), actualCount, "exponential histogram count mismatch")
|
||||
}
|
||||
|
||||
func validateNativeHistogramCount(t *testing.T, h prompb.Histogram) {
|
||||
require.NotNil(t, h.Count)
|
||||
require.IsType(t, &prompb.Histogram_CountInt{}, h.Count)
|
||||
want := h.Count.(*prompb.Histogram_CountInt).CountInt
|
||||
var (
|
||||
actualCount uint64
|
||||
prevBucket int64
|
||||
)
|
||||
for _, delta := range h.PositiveDeltas {
|
||||
prevBucket += delta
|
||||
actualCount += uint64(prevBucket)
|
||||
}
|
||||
prevBucket = 0
|
||||
for _, delta := range h.NegativeDeltas {
|
||||
prevBucket += delta
|
||||
actualCount += uint64(prevBucket)
|
||||
}
|
||||
assert.Equal(t, want, actualCount, "native histogram count mismatch")
|
||||
}
|
||||
|
||||
func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
metric func() pmetric.Metric
|
||||
wantSeries func() map[uint64]*prompb.TimeSeries
|
||||
}{
|
||||
{
|
||||
name: "histogram data points with same labels",
|
||||
metric: func() pmetric.Metric {
|
||||
metric := pmetric.NewMetric()
|
||||
metric.SetName("test_hist")
|
||||
metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
|
||||
|
||||
pt := metric.ExponentialHistogram().DataPoints().AppendEmpty()
|
||||
pt.SetCount(7)
|
||||
pt.SetScale(1)
|
||||
pt.Positive().SetOffset(-1)
|
||||
pt.Positive().BucketCounts().FromRaw([]uint64{4, 2})
|
||||
pt.Exemplars().AppendEmpty().SetDoubleValue(1)
|
||||
pt.Attributes().PutStr("attr", "test_attr")
|
||||
|
||||
pt = metric.ExponentialHistogram().DataPoints().AppendEmpty()
|
||||
pt.SetCount(4)
|
||||
pt.SetScale(1)
|
||||
pt.Positive().SetOffset(-1)
|
||||
pt.Positive().BucketCounts().FromRaw([]uint64{4, 2, 1})
|
||||
pt.Exemplars().AppendEmpty().SetDoubleValue(2)
|
||||
pt.Attributes().PutStr("attr", "test_attr")
|
||||
|
||||
return metric
|
||||
},
|
||||
wantSeries: func() map[uint64]*prompb.TimeSeries {
|
||||
labels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test_hist"},
|
||||
{Name: "attr", Value: "test_attr"},
|
||||
}
|
||||
return map[uint64]*prompb.TimeSeries{
|
||||
timeSeriesSignature(labels): {
|
||||
Labels: labels,
|
||||
Histograms: []prompb.Histogram{
|
||||
{
|
||||
Count: &prompb.Histogram_CountInt{CountInt: 7},
|
||||
Schema: 1,
|
||||
ZeroThreshold: defaultZeroThreshold,
|
||||
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0},
|
||||
PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}},
|
||||
PositiveDeltas: []int64{4, -2},
|
||||
},
|
||||
{
|
||||
Count: &prompb.Histogram_CountInt{CountInt: 4},
|
||||
Schema: 1,
|
||||
ZeroThreshold: defaultZeroThreshold,
|
||||
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0},
|
||||
PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}},
|
||||
PositiveDeltas: []int64{4, -2, -1},
|
||||
},
|
||||
},
|
||||
Exemplars: []prompb.Exemplar{
|
||||
{Value: 1},
|
||||
{Value: 2},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "histogram data points with different labels",
|
||||
metric: func() pmetric.Metric {
|
||||
metric := pmetric.NewMetric()
|
||||
metric.SetName("test_hist")
|
||||
metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
|
||||
|
||||
pt := metric.ExponentialHistogram().DataPoints().AppendEmpty()
|
||||
pt.SetCount(7)
|
||||
pt.SetScale(1)
|
||||
pt.Positive().SetOffset(-1)
|
||||
pt.Positive().BucketCounts().FromRaw([]uint64{4, 2})
|
||||
pt.Exemplars().AppendEmpty().SetDoubleValue(1)
|
||||
pt.Attributes().PutStr("attr", "test_attr")
|
||||
|
||||
pt = metric.ExponentialHistogram().DataPoints().AppendEmpty()
|
||||
pt.SetCount(4)
|
||||
pt.SetScale(1)
|
||||
pt.Negative().SetOffset(-1)
|
||||
pt.Negative().BucketCounts().FromRaw([]uint64{4, 2, 1})
|
||||
pt.Exemplars().AppendEmpty().SetDoubleValue(2)
|
||||
pt.Attributes().PutStr("attr", "test_attr_two")
|
||||
|
||||
return metric
|
||||
},
|
||||
wantSeries: func() map[uint64]*prompb.TimeSeries {
|
||||
labels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test_hist"},
|
||||
{Name: "attr", Value: "test_attr"},
|
||||
}
|
||||
labelsAnother := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test_hist"},
|
||||
{Name: "attr", Value: "test_attr_two"},
|
||||
}
|
||||
|
||||
return map[uint64]*prompb.TimeSeries{
|
||||
timeSeriesSignature(labels): {
|
||||
Labels: labels,
|
||||
Histograms: []prompb.Histogram{
|
||||
{
|
||||
Count: &prompb.Histogram_CountInt{CountInt: 7},
|
||||
Schema: 1,
|
||||
ZeroThreshold: defaultZeroThreshold,
|
||||
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0},
|
||||
PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}},
|
||||
PositiveDeltas: []int64{4, -2},
|
||||
},
|
||||
},
|
||||
Exemplars: []prompb.Exemplar{
|
||||
{Value: 1},
|
||||
},
|
||||
},
|
||||
timeSeriesSignature(labelsAnother): {
|
||||
Labels: labelsAnother,
|
||||
Histograms: []prompb.Histogram{
|
||||
{
|
||||
Count: &prompb.Histogram_CountInt{CountInt: 4},
|
||||
Schema: 1,
|
||||
ZeroThreshold: defaultZeroThreshold,
|
||||
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0},
|
||||
NegativeSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}},
|
||||
NegativeDeltas: []int64{4, -2, -1},
|
||||
},
|
||||
},
|
||||
Exemplars: []prompb.Exemplar{
|
||||
{Value: 2},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
metric := tt.metric()
|
||||
|
||||
converter := NewPrometheusConverter()
|
||||
annots, err := converter.addExponentialHistogramDataPoints(
|
||||
metric.ExponentialHistogram().DataPoints(),
|
||||
pcommon.NewResource(),
|
||||
Settings{
|
||||
ExportCreatedMetric: true,
|
||||
},
|
||||
prometheustranslator.BuildCompliantName(metric, "", true),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, annots)
|
||||
|
||||
assert.Equal(t, tt.wantSeries(), converter.unique)
|
||||
assert.Empty(t, converter.conflicts)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,258 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/247a9f996e09a83cdc25addf70c05e42b8b30186/pkg/translator/prometheusremotewrite/number_data_points_test.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||
|
||||
package prometheusremotewrite
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||
)
|
||||
|
||||
func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
|
||||
ts := uint64(time.Now().UnixNano())
|
||||
tests := []struct {
|
||||
name string
|
||||
metric func() pmetric.Metric
|
||||
want func() map[uint64]*prompb.TimeSeries
|
||||
}{
|
||||
{
|
||||
name: "gauge",
|
||||
metric: func() pmetric.Metric {
|
||||
return getIntGaugeMetric(
|
||||
"test",
|
||||
pcommon.NewMap(),
|
||||
1, ts,
|
||||
)
|
||||
},
|
||||
want: func() map[uint64]*prompb.TimeSeries {
|
||||
labels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test"},
|
||||
}
|
||||
return map[uint64]*prompb.TimeSeries{
|
||||
timeSeriesSignature(labels): {
|
||||
Labels: labels,
|
||||
Samples: []prompb.Sample{
|
||||
{
|
||||
Value: 1,
|
||||
Timestamp: convertTimeStamp(pcommon.Timestamp(ts)),
|
||||
}},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
metric := tt.metric()
|
||||
converter := NewPrometheusConverter()
|
||||
|
||||
converter.addGaugeNumberDataPoints(
|
||||
metric.Gauge().DataPoints(),
|
||||
pcommon.NewResource(),
|
||||
Settings{
|
||||
ExportCreatedMetric: true,
|
||||
},
|
||||
metric.Name(),
|
||||
)
|
||||
|
||||
assert.Equal(t, tt.want(), converter.unique)
|
||||
assert.Empty(t, converter.conflicts)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
|
||||
ts := pcommon.Timestamp(time.Now().UnixNano())
|
||||
tests := []struct {
|
||||
name string
|
||||
metric func() pmetric.Metric
|
||||
want func() map[uint64]*prompb.TimeSeries
|
||||
}{
|
||||
{
|
||||
name: "sum",
|
||||
metric: func() pmetric.Metric {
|
||||
return getIntSumMetric(
|
||||
"test",
|
||||
pcommon.NewMap(),
|
||||
1,
|
||||
uint64(ts.AsTime().UnixNano()),
|
||||
)
|
||||
},
|
||||
want: func() map[uint64]*prompb.TimeSeries {
|
||||
labels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test"},
|
||||
}
|
||||
return map[uint64]*prompb.TimeSeries{
|
||||
timeSeriesSignature(labels): {
|
||||
Labels: labels,
|
||||
Samples: []prompb.Sample{
|
||||
{
|
||||
Value: 1,
|
||||
Timestamp: convertTimeStamp(ts),
|
||||
}},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sum with exemplars",
|
||||
metric: func() pmetric.Metric {
|
||||
m := getIntSumMetric(
|
||||
"test",
|
||||
pcommon.NewMap(),
|
||||
1,
|
||||
uint64(ts.AsTime().UnixNano()),
|
||||
)
|
||||
m.Sum().DataPoints().At(0).Exemplars().AppendEmpty().SetDoubleValue(2)
|
||||
return m
|
||||
},
|
||||
want: func() map[uint64]*prompb.TimeSeries {
|
||||
labels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test"},
|
||||
}
|
||||
return map[uint64]*prompb.TimeSeries{
|
||||
timeSeriesSignature(labels): {
|
||||
Labels: labels,
|
||||
Samples: []prompb.Sample{{
|
||||
Value: 1,
|
||||
Timestamp: convertTimeStamp(ts),
|
||||
}},
|
||||
Exemplars: []prompb.Exemplar{
|
||||
{Value: 2},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "monotonic cumulative sum with start timestamp",
|
||||
metric: func() pmetric.Metric {
|
||||
metric := pmetric.NewMetric()
|
||||
metric.SetName("test_sum")
|
||||
metric.SetEmptySum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
|
||||
metric.SetEmptySum().SetIsMonotonic(true)
|
||||
|
||||
dp := metric.Sum().DataPoints().AppendEmpty()
|
||||
dp.SetDoubleValue(1)
|
||||
dp.SetTimestamp(ts)
|
||||
dp.SetStartTimestamp(ts)
|
||||
|
||||
return metric
|
||||
},
|
||||
want: func() map[uint64]*prompb.TimeSeries {
|
||||
labels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test_sum"},
|
||||
}
|
||||
createdLabels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test_sum" + createdSuffix},
|
||||
}
|
||||
return map[uint64]*prompb.TimeSeries{
|
||||
timeSeriesSignature(labels): {
|
||||
Labels: labels,
|
||||
Samples: []prompb.Sample{
|
||||
{Value: 1, Timestamp: convertTimeStamp(ts)},
|
||||
},
|
||||
},
|
||||
timeSeriesSignature(createdLabels): {
|
||||
Labels: createdLabels,
|
||||
Samples: []prompb.Sample{
|
||||
{Value: float64(convertTimeStamp(ts)), Timestamp: convertTimeStamp(ts)},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "monotonic cumulative sum with no start time",
|
||||
metric: func() pmetric.Metric {
|
||||
metric := pmetric.NewMetric()
|
||||
metric.SetName("test_sum")
|
||||
metric.SetEmptySum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
|
||||
metric.SetEmptySum().SetIsMonotonic(true)
|
||||
|
||||
dp := metric.Sum().DataPoints().AppendEmpty()
|
||||
dp.SetTimestamp(ts)
|
||||
|
||||
return metric
|
||||
},
|
||||
want: func() map[uint64]*prompb.TimeSeries {
|
||||
labels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test_sum"},
|
||||
}
|
||||
return map[uint64]*prompb.TimeSeries{
|
||||
timeSeriesSignature(labels): {
|
||||
Labels: labels,
|
||||
Samples: []prompb.Sample{
|
||||
{Value: 0, Timestamp: convertTimeStamp(ts)},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non-monotonic cumulative sum with start time",
|
||||
metric: func() pmetric.Metric {
|
||||
metric := pmetric.NewMetric()
|
||||
metric.SetName("test_sum")
|
||||
metric.SetEmptySum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
|
||||
metric.SetEmptySum().SetIsMonotonic(false)
|
||||
|
||||
dp := metric.Sum().DataPoints().AppendEmpty()
|
||||
dp.SetTimestamp(ts)
|
||||
|
||||
return metric
|
||||
},
|
||||
want: func() map[uint64]*prompb.TimeSeries {
|
||||
labels := []prompb.Label{
|
||||
{Name: model.MetricNameLabel, Value: "test_sum"},
|
||||
}
|
||||
return map[uint64]*prompb.TimeSeries{
|
||||
timeSeriesSignature(labels): {
|
||||
Labels: labels,
|
||||
Samples: []prompb.Sample{
|
||||
{Value: 0, Timestamp: convertTimeStamp(ts)},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
metric := tt.metric()
|
||||
converter := NewPrometheusConverter()
|
||||
|
||||
converter.addSumNumberDataPoints(
|
||||
metric.Sum().DataPoints(),
|
||||
pcommon.NewResource(),
|
||||
metric,
|
||||
Settings{
|
||||
ExportCreatedMetric: true,
|
||||
},
|
||||
metric.Name(),
|
||||
)
|
||||
|
||||
assert.Equal(t, tt.want(), converter.unique)
|
||||
assert.Empty(t, converter.conflicts)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/247a9f996e09a83cdc25addf70c05e42b8b30186/pkg/translator/prometheusremotewrite/testutil_test.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||
|
||||
package prometheusremotewrite
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||
)
|
||||
|
||||
func getIntGaugeMetric(name string, attributes pcommon.Map, value int64, ts uint64) pmetric.Metric {
|
||||
metric := pmetric.NewMetric()
|
||||
metric.SetName(name)
|
||||
dp := metric.SetEmptyGauge().DataPoints().AppendEmpty()
|
||||
if strings.HasPrefix(name, "staleNaN") {
|
||||
dp.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true))
|
||||
}
|
||||
dp.SetIntValue(value)
|
||||
attributes.CopyTo(dp.Attributes())
|
||||
|
||||
dp.SetStartTimestamp(pcommon.Timestamp(0))
|
||||
dp.SetTimestamp(pcommon.Timestamp(ts))
|
||||
return metric
|
||||
}
|
||||
|
||||
func getIntSumMetric(name string, attributes pcommon.Map, value int64, ts uint64) pmetric.Metric {
|
||||
metric := pmetric.NewMetric()
|
||||
metric.SetName(name)
|
||||
metric.SetEmptySum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
|
||||
dp := metric.Sum().DataPoints().AppendEmpty()
|
||||
if strings.HasPrefix(name, "staleNaN") {
|
||||
dp.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true))
|
||||
}
|
||||
dp.SetIntValue(value)
|
||||
attributes.CopyTo(dp.Attributes())
|
||||
|
||||
dp.SetStartTimestamp(pcommon.Timestamp(0))
|
||||
dp.SetTimestamp(pcommon.Timestamp(ts))
|
||||
return metric
|
||||
}
|
|
@ -133,9 +133,6 @@ type Meta struct {
|
|||
// Time range the data covers.
|
||||
// When MaxTime == math.MaxInt64 the chunk is still open and being appended to.
|
||||
MinTime, MaxTime int64
|
||||
|
||||
// Flag to indicate that this meta needs merge with OOO data.
|
||||
MergeOOO bool
|
||||
}
|
||||
|
||||
// ChunkFromSamples requires all samples to have the same type.
|
||||
|
|
|
@ -366,7 +366,7 @@ func (h *headChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Ch
|
|||
// If copyLastChunk is true, then it makes a copy of the head chunk if asked for it.
|
||||
// Also returns max time of the chunk.
|
||||
func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.Chunk, int64, error) {
|
||||
sid, cid := chunks.HeadChunkRef(meta.Ref).Unpack()
|
||||
sid, cid, isOOO := unpackHeadChunkRef(meta.Ref)
|
||||
|
||||
s := h.head.series.getByID(sid)
|
||||
// This means that the series has been garbage collected.
|
||||
|
@ -376,12 +376,21 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.
|
|||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return h.chunkFromSeries(s, cid, copyLastChunk)
|
||||
return h.head.chunkFromSeries(s, cid, isOOO, h.mint, h.maxt, h.isoState, copyLastChunk)
|
||||
}
|
||||
|
||||
// Dumb thing to defeat chunk pool.
|
||||
type wrapOOOHeadChunk struct {
|
||||
chunkenc.Chunk
|
||||
}
|
||||
|
||||
// Call with s locked.
|
||||
func (h *headChunkReader) chunkFromSeries(s *memSeries, cid chunks.HeadChunkID, copyLastChunk bool) (chunkenc.Chunk, int64, error) {
|
||||
c, headChunk, isOpen, err := s.chunk(cid, h.head.chunkDiskMapper, &h.head.memChunkPool)
|
||||
func (h *Head) chunkFromSeries(s *memSeries, cid chunks.HeadChunkID, isOOO bool, mint, maxt int64, isoState *isolationState, copyLastChunk bool) (chunkenc.Chunk, int64, error) {
|
||||
if isOOO {
|
||||
chk, maxTime, err := s.oooChunk(cid, h.chunkDiskMapper, &h.memChunkPool)
|
||||
return wrapOOOHeadChunk{chk}, maxTime, err
|
||||
}
|
||||
c, headChunk, isOpen, err := s.chunk(cid, h.chunkDiskMapper, &h.memChunkPool)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
@ -390,12 +399,12 @@ func (h *headChunkReader) chunkFromSeries(s *memSeries, cid chunks.HeadChunkID,
|
|||
// Set this to nil so that Go GC can collect it after it has been used.
|
||||
c.chunk = nil
|
||||
c.prev = nil
|
||||
h.head.memChunkPool.Put(c)
|
||||
h.memChunkPool.Put(c)
|
||||
}
|
||||
}()
|
||||
|
||||
// This means that the chunk is outside the specified range.
|
||||
if !c.OverlapsClosedInterval(h.mint, h.maxt) {
|
||||
if !c.OverlapsClosedInterval(mint, maxt) {
|
||||
return nil, 0, storage.ErrNotFound
|
||||
}
|
||||
|
||||
|
@ -407,7 +416,7 @@ func (h *headChunkReader) chunkFromSeries(s *memSeries, cid chunks.HeadChunkID,
|
|||
newB := make([]byte, len(b))
|
||||
copy(newB, b) // TODO(codesome): Use bytes.Clone() when we upgrade to Go 1.20.
|
||||
// TODO(codesome): Put back in the pool (non-trivial).
|
||||
chk, err = h.head.opts.ChunkPool.Get(s.headChunks.chunk.Encoding(), newB)
|
||||
chk, err = h.opts.ChunkPool.Get(s.headChunks.chunk.Encoding(), newB)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
@ -417,7 +426,7 @@ func (h *headChunkReader) chunkFromSeries(s *memSeries, cid chunks.HeadChunkID,
|
|||
Chunk: chk,
|
||||
s: s,
|
||||
cid: cid,
|
||||
isoState: h.isoState,
|
||||
isoState: isoState,
|
||||
}, maxTime, nil
|
||||
}
|
||||
|
||||
|
@ -481,85 +490,19 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi
|
|||
return elem, true, offset == 0, nil
|
||||
}
|
||||
|
||||
// mergedChunks return an iterable over all chunks that overlap the
|
||||
// time window [mint,maxt], plus meta.Chunk if populated.
|
||||
// If hr is non-nil then in-order chunks are included.
|
||||
// This function is not thread safe unless the caller holds a lock.
|
||||
// The caller must ensure that s.ooo is not nil.
|
||||
func (s *memSeries) mergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, hr *headChunkReader, mint, maxt int64, maxMmapRef chunks.ChunkDiskMapperRef) (chunkenc.Iterable, error) {
|
||||
// We create a temporary slice of chunk metas to hold the information of all
|
||||
// possible chunks that may overlap with the requested chunk.
|
||||
tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.ooo.oooMmappedChunks)+1)
|
||||
// oooChunk returns the chunk for the HeadChunkID by m-mapping it from the disk.
|
||||
// It never returns the head OOO chunk.
|
||||
func (s *memSeries) oooChunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, memChunkPool *sync.Pool) (chunk chunkenc.Chunk, maxTime int64, err error) {
|
||||
// ix represents the index of chunk in the s.ooo.oooMmappedChunks slice. The chunk id's are
|
||||
// incremented by 1 when new chunk is created, hence (id - firstOOOChunkID) gives the slice index.
|
||||
ix := int(id) - int(s.ooo.firstOOOChunkID)
|
||||
|
||||
for i, c := range s.ooo.oooMmappedChunks {
|
||||
if maxMmapRef != 0 && c.ref > maxMmapRef {
|
||||
break
|
||||
}
|
||||
if c.OverlapsClosedInterval(mint, maxt) {
|
||||
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
|
||||
meta: chunks.Meta{
|
||||
MinTime: c.minTime,
|
||||
MaxTime: c.maxTime,
|
||||
Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i))),
|
||||
},
|
||||
ref: c.ref,
|
||||
})
|
||||
}
|
||||
}
|
||||
// Add in data copied from the head OOO chunk.
|
||||
if meta.Chunk != nil {
|
||||
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{meta: meta})
|
||||
if ix < 0 || ix >= len(s.ooo.oooMmappedChunks) {
|
||||
return nil, 0, storage.ErrNotFound
|
||||
}
|
||||
|
||||
if hr != nil { // Include in-order chunks.
|
||||
metas := appendSeriesChunks(s, max(meta.MinTime, mint), min(meta.MaxTime, maxt), nil)
|
||||
for _, m := range metas {
|
||||
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
|
||||
meta: m,
|
||||
ref: 0, // This tells the loop below it's an in-order head chunk.
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Next we want to sort all the collected chunks by min time so we can find
|
||||
// those that overlap and stop when we know the rest don't.
|
||||
slices.SortFunc(tmpChks, refLessByMinTimeAndMinRef)
|
||||
|
||||
mc := &mergedOOOChunks{}
|
||||
absoluteMax := int64(math.MinInt64)
|
||||
for _, c := range tmpChks {
|
||||
if c.meta.Ref != meta.Ref && (len(mc.chunkIterables) == 0 || c.meta.MinTime > absoluteMax) {
|
||||
continue
|
||||
}
|
||||
var iterable chunkenc.Iterable
|
||||
switch {
|
||||
case c.meta.Chunk != nil:
|
||||
iterable = c.meta.Chunk
|
||||
case c.ref == 0: // This is an in-order head chunk.
|
||||
_, cid := chunks.HeadChunkRef(c.meta.Ref).Unpack()
|
||||
var err error
|
||||
iterable, _, err = hr.chunkFromSeries(s, cid, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid head chunk: %w", err)
|
||||
}
|
||||
default:
|
||||
chk, err := cdm.Chunk(c.ref)
|
||||
if err != nil {
|
||||
var cerr *chunks.CorruptionErr
|
||||
if errors.As(err, &cerr) {
|
||||
return nil, fmt.Errorf("invalid ooo mmapped chunk: %w", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
iterable = chk
|
||||
}
|
||||
mc.chunkIterables = append(mc.chunkIterables, iterable)
|
||||
if c.meta.MaxTime > absoluteMax {
|
||||
absoluteMax = c.meta.MaxTime
|
||||
}
|
||||
}
|
||||
|
||||
return mc, nil
|
||||
chk, err := chunkDiskMapper.Chunk(s.ooo.oooMmappedChunks[ix].ref)
|
||||
return chk, s.ooo.oooMmappedChunks[ix].maxTime, err
|
||||
}
|
||||
|
||||
// safeHeadChunk makes sure that the chunk can be accessed without a race condition.
|
||||
|
|
|
@ -16,6 +16,7 @@ package tsdb
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"slices"
|
||||
|
||||
|
@ -91,11 +92,10 @@ func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmap
|
|||
|
||||
addChunk := func(minT, maxT int64, ref chunks.ChunkRef, chunk chunkenc.Chunk) {
|
||||
tmpChks = append(tmpChks, chunks.Meta{
|
||||
MinTime: minT,
|
||||
MaxTime: maxT,
|
||||
Ref: ref,
|
||||
Chunk: chunk,
|
||||
MergeOOO: true,
|
||||
MinTime: minT,
|
||||
MaxTime: maxT,
|
||||
Ref: ref,
|
||||
Chunk: chunk,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -140,34 +140,39 @@ func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmap
|
|||
// those that overlap.
|
||||
slices.SortFunc(tmpChks, lessByMinTimeAndMinRef)
|
||||
|
||||
// Next we want to iterate the sorted collected chunks and only return the
|
||||
// chunks Meta the first chunk that overlaps with others.
|
||||
// Next we want to iterate the sorted collected chunks and return composites for chunks that overlap with others.
|
||||
// Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650)
|
||||
// In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to
|
||||
// return chunk Metas for chunk 5 and chunk 6e
|
||||
*chks = append(*chks, tmpChks[0])
|
||||
maxTime := tmpChks[0].MaxTime // Tracks the maxTime of the previous "to be merged chunk".
|
||||
// In the example 5 overlaps with 7 and 6 overlaps with 8 so we will return
|
||||
// [5,7], [6,8].
|
||||
toBeMerged := tmpChks[0]
|
||||
for _, c := range tmpChks[1:] {
|
||||
switch {
|
||||
case c.MinTime > maxTime:
|
||||
*chks = append(*chks, c)
|
||||
maxTime = c.MaxTime
|
||||
case c.MaxTime > maxTime:
|
||||
maxTime = c.MaxTime
|
||||
(*chks)[len(*chks)-1].MaxTime = c.MaxTime
|
||||
fallthrough
|
||||
default:
|
||||
// If the head OOO chunk is part of an output chunk, copy the chunk pointer.
|
||||
if c.Chunk != nil {
|
||||
(*chks)[len(*chks)-1].Chunk = c.Chunk
|
||||
if c.MinTime > toBeMerged.MaxTime {
|
||||
// This chunk doesn't overlap. Send current toBeMerged to output and start a new one.
|
||||
*chks = append(*chks, toBeMerged)
|
||||
toBeMerged = c
|
||||
} else {
|
||||
// Merge this chunk with existing toBeMerged.
|
||||
if mm, ok := toBeMerged.Chunk.(*multiMeta); ok {
|
||||
mm.metas = append(mm.metas, c)
|
||||
} else {
|
||||
toBeMerged.Chunk = &multiMeta{metas: []chunks.Meta{toBeMerged, c}}
|
||||
}
|
||||
if toBeMerged.MaxTime < c.MaxTime {
|
||||
toBeMerged.MaxTime = c.MaxTime
|
||||
}
|
||||
(*chks)[len(*chks)-1].MergeOOO = (*chks)[len(*chks)-1].MergeOOO || c.MergeOOO
|
||||
}
|
||||
}
|
||||
*chks = append(*chks, toBeMerged)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fake Chunk object to pass a set of Metas inside Meta.Chunk.
|
||||
type multiMeta struct {
|
||||
chunkenc.Chunk // We don't expect any of the methods to be called.
|
||||
metas []chunks.Meta
|
||||
}
|
||||
|
||||
// LabelValues needs to be overridden from the headIndexReader implementation
|
||||
// so we can return labels within either in-order range or ooo range.
|
||||
func (oh *HeadAndOOOIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
|
||||
|
@ -182,29 +187,6 @@ func (oh *HeadAndOOOIndexReader) LabelValues(ctx context.Context, name string, m
|
|||
return labelValuesWithMatchers(ctx, oh, name, matchers...)
|
||||
}
|
||||
|
||||
type chunkMetaAndChunkDiskMapperRef struct {
|
||||
meta chunks.Meta
|
||||
ref chunks.ChunkDiskMapperRef
|
||||
}
|
||||
|
||||
func refLessByMinTimeAndMinRef(a, b chunkMetaAndChunkDiskMapperRef) int {
|
||||
switch {
|
||||
case a.meta.MinTime < b.meta.MinTime:
|
||||
return -1
|
||||
case a.meta.MinTime > b.meta.MinTime:
|
||||
return 1
|
||||
}
|
||||
|
||||
switch {
|
||||
case a.meta.Ref < b.meta.Ref:
|
||||
return -1
|
||||
case a.meta.Ref > b.meta.Ref:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func lessByMinTimeAndMinRef(a, b chunks.Meta) int {
|
||||
switch {
|
||||
case a.MinTime < b.MinTime:
|
||||
|
@ -243,36 +225,55 @@ func NewHeadAndOOOChunkReader(head *Head, mint, maxt int64, cr *headChunkReader,
|
|||
}
|
||||
|
||||
func (cr *HeadAndOOOChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
|
||||
sid, _, _ := unpackHeadChunkRef(meta.Ref)
|
||||
if !meta.MergeOOO {
|
||||
return cr.cr.ChunkOrIterable(meta)
|
||||
}
|
||||
|
||||
s := cr.head.series.getByID(sid)
|
||||
// This means that the series has been garbage collected.
|
||||
if s == nil {
|
||||
return nil, nil, storage.ErrNotFound
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
if s.ooo == nil { // Must have s.ooo non-nil to call mergedChunks().
|
||||
s.Unlock()
|
||||
return cr.cr.ChunkOrIterable(meta)
|
||||
}
|
||||
mc, err := s.mergedChunks(meta, cr.head.chunkDiskMapper, cr.cr, cr.mint, cr.maxt, cr.maxMmapRef)
|
||||
s.Unlock()
|
||||
|
||||
return nil, mc, err
|
||||
c, it, _, err := cr.chunkOrIterable(meta, false)
|
||||
return c, it, err
|
||||
}
|
||||
|
||||
// ChunkOrIterableWithCopy implements ChunkReaderWithCopy. The special Copy
|
||||
// behaviour is only implemented for the in-order head chunk.
|
||||
func (cr *HeadAndOOOChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) {
|
||||
if !meta.MergeOOO {
|
||||
return cr.cr.ChunkOrIterableWithCopy(meta)
|
||||
return cr.chunkOrIterable(meta, true)
|
||||
}
|
||||
|
||||
func (cr *HeadAndOOOChunkReader) chunkOrIterable(meta chunks.Meta, copyLastChunk bool) (chunkenc.Chunk, chunkenc.Iterable, int64, error) {
|
||||
sid, cid, isOOO := unpackHeadChunkRef(meta.Ref)
|
||||
s := cr.head.series.getByID(sid)
|
||||
// This means that the series has been garbage collected.
|
||||
if s == nil {
|
||||
return nil, nil, 0, storage.ErrNotFound
|
||||
}
|
||||
chk, iter, err := cr.ChunkOrIterable(meta)
|
||||
return chk, iter, 0, err
|
||||
var isoState *isolationState
|
||||
if cr.cr != nil {
|
||||
isoState = cr.cr.isoState
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if meta.Chunk == nil {
|
||||
c, maxt, err := cr.head.chunkFromSeries(s, cid, isOOO, meta.MinTime, meta.MaxTime, isoState, copyLastChunk)
|
||||
return c, nil, maxt, err
|
||||
}
|
||||
mm, ok := meta.Chunk.(*multiMeta)
|
||||
if !ok { // Complete chunk was supplied.
|
||||
return meta.Chunk, nil, meta.MaxTime, nil
|
||||
}
|
||||
// We have a composite meta: construct a composite iterable.
|
||||
mc := &mergedOOOChunks{}
|
||||
for _, m := range mm.metas {
|
||||
switch {
|
||||
case m.Chunk != nil:
|
||||
mc.chunkIterables = append(mc.chunkIterables, m.Chunk)
|
||||
default:
|
||||
_, cid, isOOO := unpackHeadChunkRef(m.Ref)
|
||||
iterable, _, err := cr.head.chunkFromSeries(s, cid, isOOO, m.MinTime, m.MaxTime, isoState, copyLastChunk)
|
||||
if err != nil {
|
||||
return nil, nil, 0, fmt.Errorf("invalid head chunk: %w", err)
|
||||
}
|
||||
mc.chunkIterables = append(mc.chunkIterables, iterable)
|
||||
}
|
||||
}
|
||||
return nil, mc, meta.MaxTime, nil
|
||||
}
|
||||
|
||||
func (cr *HeadAndOOOChunkReader) Close() error {
|
||||
|
|
|
@ -39,6 +39,11 @@ type chunkInterval struct {
|
|||
maxt int64
|
||||
}
|
||||
|
||||
type expChunk struct {
|
||||
c chunkInterval
|
||||
m []chunkInterval
|
||||
}
|
||||
|
||||
// permutateChunkIntervals returns all possible orders of the given chunkIntervals.
|
||||
func permutateChunkIntervals(in []chunkInterval, out [][]chunkInterval, left, right int) [][]chunkInterval {
|
||||
if left == right {
|
||||
|
@ -65,7 +70,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
|||
queryMinT int64
|
||||
queryMaxT int64
|
||||
inputChunkIntervals []chunkInterval
|
||||
expChunks []chunkInterval
|
||||
expChunks []expChunk
|
||||
}{
|
||||
{
|
||||
name: "Empty result and no error when head is empty",
|
||||
|
@ -107,8 +112,8 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
|||
// ts 0 100 150 200 250 300 350 400 450 500 550 600 650 700
|
||||
// Query Interval [-----------------------------------------------------------]
|
||||
// Chunk 0: [---------------------------------------]
|
||||
expChunks: []chunkInterval{
|
||||
{0, 150, 350},
|
||||
expChunks: []expChunk{
|
||||
{c: chunkInterval{0, 150, 350}},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -121,8 +126,8 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
|||
// ts 0 100 150 200 250 300 350 400 450 500 550 600 650 700
|
||||
// Query Interval: [---------------------------------------]
|
||||
// Chunk 0: [-----------------------------------------------------------]
|
||||
expChunks: []chunkInterval{
|
||||
{0, 100, 400},
|
||||
expChunks: []expChunk{
|
||||
{c: chunkInterval{0, 100, 400}},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -142,9 +147,9 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
|||
// Chunk 2: [-------------------]
|
||||
// Chunk 3: [-------------------]
|
||||
// Output Graphically [-----------------------------] [-----------------------------]
|
||||
expChunks: []chunkInterval{
|
||||
{0, 100, 250},
|
||||
{1, 500, 650},
|
||||
expChunks: []expChunk{
|
||||
{c: chunkInterval{0, 100, 250}, m: []chunkInterval{{0, 100, 200}, {2, 150, 250}}},
|
||||
{c: chunkInterval{1, 500, 650}, m: []chunkInterval{{1, 500, 600}, {3, 550, 650}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -164,8 +169,8 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
|||
// Chunk 2: [-------------------]
|
||||
// Chunk 3: [------------------]
|
||||
// Output Graphically [------------------------------------------------------------------------------]
|
||||
expChunks: []chunkInterval{
|
||||
{0, 100, 500},
|
||||
expChunks: []expChunk{
|
||||
{c: chunkInterval{0, 100, 500}, m: []chunkInterval{{0, 100, 200}, {1, 200, 300}, {2, 300, 400}, {3, 400, 500}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -185,11 +190,11 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
|||
// Chunk 2: [------------------]
|
||||
// Chunk 3: [------------------]
|
||||
// Output Graphically [------------------][------------------][------------------][------------------]
|
||||
expChunks: []chunkInterval{
|
||||
{0, 100, 199},
|
||||
{1, 200, 299},
|
||||
{2, 300, 399},
|
||||
{3, 400, 499},
|
||||
expChunks: []expChunk{
|
||||
{c: chunkInterval{0, 100, 199}},
|
||||
{c: chunkInterval{1, 200, 299}},
|
||||
{c: chunkInterval{2, 300, 399}},
|
||||
{c: chunkInterval{3, 400, 499}},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -209,8 +214,8 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
|||
// Chunk 2: [------------------]
|
||||
// Chunk 3: [------------------]
|
||||
// Output Graphically [-----------------------------------------------]
|
||||
expChunks: []chunkInterval{
|
||||
{0, 100, 350},
|
||||
expChunks: []expChunk{
|
||||
{c: chunkInterval{0, 100, 350}, m: []chunkInterval{{0, 100, 200}, {1, 150, 300}, {2, 250, 350}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -228,8 +233,8 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
|||
// Chunk 1: [-----------------------------]
|
||||
// Chunk 2: [------------------------------]
|
||||
// Output Graphically [-----------------------------------------------------------------------------------------]
|
||||
expChunks: []chunkInterval{
|
||||
{1, 0, 500},
|
||||
expChunks: []expChunk{
|
||||
{c: chunkInterval{1, 0, 500}, m: []chunkInterval{{1, 0, 200}, {2, 150, 300}, {0, 250, 500}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -251,9 +256,9 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
|||
// Chunk 3: [-------------------]
|
||||
// Chunk 4: [---------------------------------------]
|
||||
// Output Graphically [---------------------------------------] [------------------------------------------------]
|
||||
expChunks: []chunkInterval{
|
||||
{0, 100, 300},
|
||||
{4, 600, 850},
|
||||
expChunks: []expChunk{
|
||||
{c: chunkInterval{0, 100, 300}, m: []chunkInterval{{0, 100, 300}, {2, 150, 250}}},
|
||||
{c: chunkInterval{4, 600, 850}, m: []chunkInterval{{4, 600, 800}, {3, 650, 750}, {1, 770, 850}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -271,10 +276,10 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
|||
// Chunk 1: [----------]
|
||||
// Chunk 2: [--------]
|
||||
// Output Graphically [-------] [--------] [----------]
|
||||
expChunks: []chunkInterval{
|
||||
{0, 100, 150},
|
||||
{1, 300, 350},
|
||||
{2, 200, 250},
|
||||
expChunks: []expChunk{
|
||||
{c: chunkInterval{0, 100, 150}},
|
||||
{c: chunkInterval{2, 200, 250}},
|
||||
{c: chunkInterval{1, 300, 350}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -305,25 +310,38 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
|||
s1.ooo = &memSeriesOOOFields{}
|
||||
|
||||
// define our expected chunks, by looking at the expected ChunkIntervals and setting...
|
||||
// Ref to whatever Ref the chunk has, that we refer to by ID
|
||||
findID := func(id int) chunks.ChunkRef {
|
||||
for ref, c := range intervals {
|
||||
if c.ID == id {
|
||||
return chunks.ChunkRef(chunks.NewHeadChunkRef(chunks.HeadSeriesRef(s1ID), s1.oooHeadChunkID(ref)))
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
var expChunks []chunks.Meta
|
||||
for _, e := range tc.expChunks {
|
||||
meta := chunks.Meta{
|
||||
Chunk: chunkenc.Chunk(nil),
|
||||
MinTime: e.mint,
|
||||
MaxTime: e.maxt,
|
||||
MergeOOO: true, // Only OOO chunks are tested here, so we always request merge from OOO head.
|
||||
}
|
||||
|
||||
// Ref to whatever Ref the chunk has, that we refer to by ID
|
||||
for ref, c := range intervals {
|
||||
if c.ID == e.ID {
|
||||
meta.Ref = chunks.ChunkRef(chunks.NewHeadChunkRef(chunks.HeadSeriesRef(s1ID), s1.oooHeadChunkID(ref)))
|
||||
break
|
||||
var chunk chunkenc.Chunk
|
||||
if len(e.m) > 0 {
|
||||
mm := &multiMeta{}
|
||||
for _, x := range e.m {
|
||||
meta := chunks.Meta{
|
||||
MinTime: x.mint,
|
||||
MaxTime: x.maxt,
|
||||
Ref: findID(x.ID),
|
||||
}
|
||||
mm.metas = append(mm.metas, meta)
|
||||
}
|
||||
chunk = mm
|
||||
}
|
||||
meta := chunks.Meta{
|
||||
Chunk: chunk,
|
||||
MinTime: e.c.mint,
|
||||
MaxTime: e.c.maxt,
|
||||
Ref: findID(e.c.ID),
|
||||
}
|
||||
expChunks = append(expChunks, meta)
|
||||
}
|
||||
slices.SortFunc(expChunks, lessByMinTimeAndMinRef) // We always want the chunks to come back sorted by minTime asc.
|
||||
|
||||
if headChunk && len(intervals) > 0 {
|
||||
// Put the last interval in the head chunk
|
||||
|
@ -485,7 +503,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
|
|||
cr := NewHeadAndOOOChunkReader(db.head, 0, 1000, nil, nil, 0)
|
||||
defer cr.Close()
|
||||
c, iterable, err := cr.ChunkOrIterable(chunks.Meta{
|
||||
Ref: 0x1800000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300, MergeOOO: true,
|
||||
Ref: 0x1800000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300,
|
||||
})
|
||||
require.Nil(t, iterable)
|
||||
require.Equal(t, err, fmt.Errorf("not found"))
|
||||
|
@ -498,6 +516,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
|
|||
queryMaxT int64
|
||||
firstInOrderSampleAt int64
|
||||
inputSamples []testValue
|
||||
expSingleChunks bool
|
||||
expChunkError bool
|
||||
expChunksSamples []chunks.SampleSlice
|
||||
}{
|
||||
|
@ -510,7 +529,8 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
|
|||
{Ts: minutes(30), V: 0},
|
||||
{Ts: minutes(40), V: 0},
|
||||
},
|
||||
expChunkError: false,
|
||||
expChunkError: false,
|
||||
expSingleChunks: true,
|
||||
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
|
||||
// Query Interval [------------------------------------------------------------------------------------------]
|
||||
// Chunk 0: Current Head [--------] (With 2 samples)
|
||||
|
@ -690,7 +710,8 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
|
|||
{Ts: minutes(40), V: 3},
|
||||
{Ts: minutes(42), V: 3},
|
||||
},
|
||||
expChunkError: false,
|
||||
expChunkError: false,
|
||||
expSingleChunks: true,
|
||||
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
|
||||
// Query Interval [------------------------------------------------------------------------------------------]
|
||||
// Chunk 0 [-------]
|
||||
|
@ -845,9 +866,13 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
|
|||
for i := 0; i < len(chks); i++ {
|
||||
c, iterable, err := cr.ChunkOrIterable(chks[i])
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, c)
|
||||
|
||||
it := iterable.Iterator(nil)
|
||||
var it chunkenc.Iterator
|
||||
if tc.expSingleChunks {
|
||||
it = c.Iterator(nil)
|
||||
} else {
|
||||
require.Nil(t, c)
|
||||
it = iterable.Iterator(nil)
|
||||
}
|
||||
resultSamples, err := storage.ExpandSamples(it, nil)
|
||||
require.NoError(t, err)
|
||||
requireEqualSamples(t, s1.String(), tc.expChunksSamples[i], resultSamples, true)
|
||||
|
@ -1030,94 +1055,6 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
|||
}
|
||||
}
|
||||
|
||||
// TestSortByMinTimeAndMinRef tests that the sort function for chunk metas does sort
|
||||
// by chunk meta MinTime and in case of same references by the lower reference.
|
||||
func TestSortByMinTimeAndMinRef(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input []chunkMetaAndChunkDiskMapperRef
|
||||
exp []chunkMetaAndChunkDiskMapperRef
|
||||
}{
|
||||
{
|
||||
name: "chunks are ordered by min time",
|
||||
input: []chunkMetaAndChunkDiskMapperRef{
|
||||
{
|
||||
meta: chunks.Meta{
|
||||
Ref: 0,
|
||||
MinTime: 0,
|
||||
},
|
||||
ref: chunks.ChunkDiskMapperRef(0),
|
||||
},
|
||||
{
|
||||
meta: chunks.Meta{
|
||||
Ref: 1,
|
||||
MinTime: 1,
|
||||
},
|
||||
ref: chunks.ChunkDiskMapperRef(1),
|
||||
},
|
||||
},
|
||||
exp: []chunkMetaAndChunkDiskMapperRef{
|
||||
{
|
||||
meta: chunks.Meta{
|
||||
Ref: 0,
|
||||
MinTime: 0,
|
||||
},
|
||||
ref: chunks.ChunkDiskMapperRef(0),
|
||||
},
|
||||
{
|
||||
meta: chunks.Meta{
|
||||
Ref: 1,
|
||||
MinTime: 1,
|
||||
},
|
||||
ref: chunks.ChunkDiskMapperRef(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "if same mintime, lower reference goes first",
|
||||
input: []chunkMetaAndChunkDiskMapperRef{
|
||||
{
|
||||
meta: chunks.Meta{
|
||||
Ref: 10,
|
||||
MinTime: 0,
|
||||
},
|
||||
ref: chunks.ChunkDiskMapperRef(0),
|
||||
},
|
||||
{
|
||||
meta: chunks.Meta{
|
||||
Ref: 5,
|
||||
MinTime: 0,
|
||||
},
|
||||
ref: chunks.ChunkDiskMapperRef(1),
|
||||
},
|
||||
},
|
||||
exp: []chunkMetaAndChunkDiskMapperRef{
|
||||
{
|
||||
meta: chunks.Meta{
|
||||
Ref: 5,
|
||||
MinTime: 0,
|
||||
},
|
||||
ref: chunks.ChunkDiskMapperRef(1),
|
||||
},
|
||||
{
|
||||
meta: chunks.Meta{
|
||||
Ref: 10,
|
||||
MinTime: 0,
|
||||
},
|
||||
ref: chunks.ChunkDiskMapperRef(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) {
|
||||
slices.SortFunc(tc.input, refLessByMinTimeAndMinRef)
|
||||
require.Equal(t, tc.exp, tc.input)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSortMetaByMinTimeAndMinRef tests that the sort function for chunk metas does sort
|
||||
// by chunk meta MinTime and in case of same references by the lower reference.
|
||||
func TestSortMetaByMinTimeAndMinRef(t *testing.T) {
|
||||
|
|
|
@ -603,7 +603,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
|
|||
}
|
||||
|
||||
case record.Metadata:
|
||||
if !w.sendMetadata || !tail {
|
||||
if !w.sendMetadata {
|
||||
break
|
||||
}
|
||||
meta, err := dec.Metadata(rec, metadata[:0])
|
||||
|
|
|
@ -22,10 +22,10 @@ import (
|
|||
var minNormal = math.Float64frombits(0x0010000000000000) // The smallest positive normal value of type float64.
|
||||
|
||||
// Equal returns true if a and b differ by less than their sum
|
||||
// multiplied by epsilon.
|
||||
// multiplied by epsilon, or if both are StaleNaN, or if both are any other NaN.
|
||||
func Equal(a, b, epsilon float64) bool {
|
||||
// StaleNaN is a special value that is used as staleness maker, so
|
||||
// the two values are equal when both are exactly equals to stale NaN.
|
||||
// StaleNaN is a special value that is used as staleness maker, and
|
||||
// we don't want it to compare equal to any other NaN.
|
||||
if value.IsStaleNaN(a) || value.IsStaleNaN(b) {
|
||||
return value.IsStaleNaN(a) && value.IsStaleNaN(b)
|
||||
}
|
||||
|
|
50
util/almost/almost_test.go
Normal file
50
util/almost/almost_test.go
Normal file
|
@ -0,0 +1,50 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package almost
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
)
|
||||
|
||||
func TestEqual(t *testing.T) {
|
||||
staleNaN := math.Float64frombits(value.StaleNaN)
|
||||
tests := []struct {
|
||||
a float64
|
||||
b float64
|
||||
epsilon float64
|
||||
want bool
|
||||
}{
|
||||
{0.0, 0.0, 0.0, true},
|
||||
{0.0, 0.1, 0.0, false},
|
||||
{1.0, 1.1, 0.1, true},
|
||||
{-1.0, -1.1, 0.1, true},
|
||||
{math.MaxFloat64, math.MaxFloat64 / 10, 0.1, false},
|
||||
{1.0, math.NaN(), 0.1, false},
|
||||
{math.NaN(), math.NaN(), 0.1, true},
|
||||
{math.NaN(), staleNaN, 0.1, false},
|
||||
{staleNaN, math.NaN(), 0.1, false},
|
||||
{staleNaN, staleNaN, 0.1, true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%v,%v,%v", tt.a, tt.b, tt.epsilon), func(t *testing.T) {
|
||||
if got := Equal(tt.a, tt.b, tt.epsilon); got != tt.want {
|
||||
t.Errorf("Equal(%v,%v,%v) = %v, want %v", tt.a, tt.b, tt.epsilon, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue