mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
Merge remote-tracking branch 'origin/main' into merge-2.54-to-main
This commit is contained in:
commit
80adc5baf4
12
.github/workflows/ci.yml
vendored
12
.github/workflows/ci.yml
vendored
|
@ -143,6 +143,18 @@ jobs:
|
|||
with:
|
||||
parallelism: 12
|
||||
thread: ${{ matrix.thread }}
|
||||
build_all_status:
|
||||
name: Report status of build Prometheus for all architectures
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build_all]
|
||||
if: github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-')
|
||||
steps:
|
||||
- name: Successful build
|
||||
if: ${{ !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled')) }}
|
||||
run: exit 0
|
||||
- name: Failing or cancelled build
|
||||
if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }}
|
||||
run: exit 1
|
||||
check_generated_parser:
|
||||
name: Check generated parser
|
||||
runs-on: ubuntu-latest
|
||||
|
|
10
CHANGELOG.md
10
CHANGELOG.md
|
@ -1,11 +1,11 @@
|
|||
# Changelog
|
||||
|
||||
## unreleased
|
||||
|
||||
* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200
|
||||
|
||||
## 2.54.0-rc.1 / 2024-08-05
|
||||
|
||||
* [BUGFIX] TSDB: Exclude OOO chunks mapped after compaction starts (introduced by #14396). #14584
|
||||
|
||||
## 2.54.0-rc.0 / 2024-07-19
|
||||
|
||||
Release 2.54 brings a release candidate of a major new version of [Remote Write: 2.0](https://prometheus.io/docs/specs/remote_write_spec_2_0/).
|
||||
This is experimental at this time and may still change.
|
||||
Remote-write v2 is enabled by default, but can be disabled via feature-flag `web.remote-write-receiver.accepted-protobuf-messages`.
|
||||
|
@ -22,7 +22,7 @@ Remote-write v2 is enabled by default, but can be disabled via feature-flag `web
|
|||
* [ENHANCEMENT] TSDB: Optimise seek within index. #14393
|
||||
* [ENHANCEMENT] TSDB: Optimise deletion of stale series. #14307
|
||||
* [ENHANCEMENT] TSDB: Reduce locking to optimise adding and removing series. #13286,#14286
|
||||
* [ENHANCEMENT] TSDB: Small optimisation: streamline special handling for out-of-order data. #14396
|
||||
* [ENHANCEMENT] TSDB: Small optimisation: streamline special handling for out-of-order data. #14396,#14584
|
||||
* [ENHANCEMENT] Regexps: Optimize patterns with multiple prefixes. #13843,#14368
|
||||
* [ENHANCEMENT] Regexps: Optimize patterns containing multiple literal strings. #14173
|
||||
* [ENHANCEMENT] AWS SD: expose Primary IPv6 addresses as __meta_ec2_primary_ipv6_addresses. #14156
|
||||
|
|
|
@ -12,9 +12,10 @@ examples and guides.</p>
|
|||
[![Docker Pulls](https://img.shields.io/docker/pulls/prom/prometheus.svg?maxAge=604800)][hub]
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/prometheus)](https://goreportcard.com/report/github.com/prometheus/prometheus)
|
||||
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486)
|
||||
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://securityscorecards.dev/viewer/?uri=github.com/prometheus/prometheus)
|
||||
[![CLOMonitor](https://img.shields.io/endpoint?url=https://clomonitor.io/api/projects/cncf/prometheus/badge)](https://clomonitor.io/projects/cncf/prometheus)
|
||||
[![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus)
|
||||
[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/prometheus.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus)
|
||||
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://securityscorecards.dev/viewer/?uri=github.com/prometheus/prometheus)
|
||||
|
||||
</div>
|
||||
|
||||
|
|
48
SECURITY-INSIGHTS.yml
Normal file
48
SECURITY-INSIGHTS.yml
Normal file
|
@ -0,0 +1,48 @@
|
|||
header:
|
||||
schema-version: '1.0.0'
|
||||
expiration-date: '2025-07-30T01:00:00.000Z'
|
||||
last-updated: '2024-07-30'
|
||||
last-reviewed: '2024-07-30'
|
||||
project-url: https://github.com/prometheus/prometheus
|
||||
changelog: https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md
|
||||
license: https://github.com/prometheus/prometheus/blob/main/LICENSE
|
||||
project-lifecycle:
|
||||
status: active
|
||||
bug-fixes-only: false
|
||||
core-maintainers:
|
||||
- https://github.com/prometheus/prometheus/blob/main/MAINTAINERS.md
|
||||
contribution-policy:
|
||||
accepts-pull-requests: true
|
||||
accepts-automated-pull-requests: true
|
||||
dependencies:
|
||||
third-party-packages: true
|
||||
dependencies-lists:
|
||||
- https://github.com/prometheus/prometheus/blob/main/go.mod
|
||||
- https://github.com/prometheus/prometheus/blob/main/web/ui/package.json
|
||||
env-dependencies-policy:
|
||||
policy-url: https://github.com/prometheus/prometheus/blob/main/CONTRIBUTING.md#dependency-management
|
||||
distribution-points:
|
||||
- https://github.com/prometheus/prometheus/releases
|
||||
documentation:
|
||||
- https://prometheus.io/docs/introduction/overview/
|
||||
security-contacts:
|
||||
- type: email
|
||||
value: prometheus-team@googlegroups.com
|
||||
security-testing:
|
||||
- tool-type: sca
|
||||
tool-name: Dependabot
|
||||
tool-version: latest
|
||||
integration:
|
||||
ad-hoc: false
|
||||
ci: true
|
||||
before-release: true
|
||||
- tool-type: sast
|
||||
tool-name: CodeQL
|
||||
tool-version: latest
|
||||
integration:
|
||||
ad-hoc: false
|
||||
ci: true
|
||||
before-release: true
|
||||
vulnerability-reporting:
|
||||
accepts-vulnerability-reports: true
|
||||
security-policy: https://github.com/prometheus/prometheus/security/policy
|
|
@ -204,6 +204,7 @@ func main() {
|
|||
pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap()
|
||||
|
||||
testCmd := app.Command("test", "Unit testing.")
|
||||
junitOutFile := testCmd.Flag("junit", "File path to store JUnit XML test results.").OpenFile(os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
|
||||
testRulesCmd := testCmd.Command("rules", "Unit tests for rules.")
|
||||
testRulesRun := testRulesCmd.Flag("run", "If set, will only run test groups whose names match the regular expression. Can be specified multiple times.").Strings()
|
||||
testRulesFiles := testRulesCmd.Arg(
|
||||
|
@ -378,7 +379,11 @@ func main() {
|
|||
os.Exit(QueryLabels(serverURL, httpRoundTripper, *queryLabelsMatch, *queryLabelsName, *queryLabelsBegin, *queryLabelsEnd, p))
|
||||
|
||||
case testRulesCmd.FullCommand():
|
||||
os.Exit(RulesUnitTest(
|
||||
results := io.Discard
|
||||
if *junitOutFile != nil {
|
||||
results = *junitOutFile
|
||||
}
|
||||
os.Exit(RulesUnitTestResult(results,
|
||||
promqltest.LazyLoaderOpts{
|
||||
EnableAtModifier: true,
|
||||
EnableNegativeOffset: true,
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
@ -29,9 +30,10 @@ import (
|
|||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/grafana/regexp"
|
||||
"github.com/nsf/jsondiff"
|
||||
"github.com/prometheus/common/model"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
|
@ -39,12 +41,18 @@ import (
|
|||
"github.com/prometheus/prometheus/promql/promqltest"
|
||||
"github.com/prometheus/prometheus/rules"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/util/junitxml"
|
||||
)
|
||||
|
||||
// RulesUnitTest does unit testing of rules based on the unit testing files provided.
|
||||
// More info about the file format can be found in the docs.
|
||||
func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
|
||||
return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, files...)
|
||||
}
|
||||
|
||||
func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
|
||||
failed := false
|
||||
junit := &junitxml.JUnitXML{}
|
||||
|
||||
var run *regexp.Regexp
|
||||
if runStrings != nil {
|
||||
|
@ -52,7 +60,7 @@ func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, dif
|
|||
}
|
||||
|
||||
for _, f := range files {
|
||||
if errs := ruleUnitTest(f, queryOpts, run, diffFlag); errs != nil {
|
||||
if errs := ruleUnitTest(f, queryOpts, run, diffFlag, junit.Suite(f)); errs != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||
for _, e := range errs {
|
||||
fmt.Fprintln(os.Stderr, e.Error())
|
||||
|
@ -64,25 +72,30 @@ func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, dif
|
|||
}
|
||||
fmt.Println()
|
||||
}
|
||||
err := junit.WriteXML(results)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to write JUnit XML: %s\n", err)
|
||||
}
|
||||
if failed {
|
||||
return failureExitCode
|
||||
}
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error {
|
||||
fmt.Println("Unit Testing: ", filename)
|
||||
|
||||
func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool, ts *junitxml.TestSuite) []error {
|
||||
b, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
ts.Abort(err)
|
||||
return []error{err}
|
||||
}
|
||||
|
||||
var unitTestInp unitTestFile
|
||||
if err := yaml.UnmarshalStrict(b, &unitTestInp); err != nil {
|
||||
ts.Abort(err)
|
||||
return []error{err}
|
||||
}
|
||||
if err := resolveAndGlobFilepaths(filepath.Dir(filename), &unitTestInp); err != nil {
|
||||
ts.Abort(err)
|
||||
return []error{err}
|
||||
}
|
||||
|
||||
|
@ -91,29 +104,38 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg
|
|||
}
|
||||
|
||||
evalInterval := time.Duration(unitTestInp.EvaluationInterval)
|
||||
|
||||
ts.Settime(time.Now().Format("2006-01-02T15:04:05"))
|
||||
// Giving number for groups mentioned in the file for ordering.
|
||||
// Lower number group should be evaluated before higher number group.
|
||||
groupOrderMap := make(map[string]int)
|
||||
for i, gn := range unitTestInp.GroupEvalOrder {
|
||||
if _, ok := groupOrderMap[gn]; ok {
|
||||
return []error{fmt.Errorf("group name repeated in evaluation order: %s", gn)}
|
||||
err := fmt.Errorf("group name repeated in evaluation order: %s", gn)
|
||||
ts.Abort(err)
|
||||
return []error{err}
|
||||
}
|
||||
groupOrderMap[gn] = i
|
||||
}
|
||||
|
||||
// Testing.
|
||||
var errs []error
|
||||
for _, t := range unitTestInp.Tests {
|
||||
for i, t := range unitTestInp.Tests {
|
||||
if !matchesRun(t.TestGroupName, run) {
|
||||
continue
|
||||
}
|
||||
|
||||
testname := t.TestGroupName
|
||||
if testname == "" {
|
||||
testname = fmt.Sprintf("unnamed#%d", i)
|
||||
}
|
||||
tc := ts.Case(testname)
|
||||
if t.Interval == 0 {
|
||||
t.Interval = unitTestInp.EvaluationInterval
|
||||
}
|
||||
ers := t.test(evalInterval, groupOrderMap, queryOpts, diffFlag, unitTestInp.RuleFiles...)
|
||||
if ers != nil {
|
||||
for _, e := range ers {
|
||||
tc.Fail(e.Error())
|
||||
}
|
||||
errs = append(errs, ers...)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,11 +14,15 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/promql/promqltest"
|
||||
"github.com/prometheus/prometheus/util/junitxml"
|
||||
)
|
||||
|
||||
func TestRulesUnitTest(t *testing.T) {
|
||||
|
@ -125,13 +129,59 @@ func TestRulesUnitTest(t *testing.T) {
|
|||
want: 0,
|
||||
},
|
||||
}
|
||||
reuseFiles := []string{}
|
||||
reuseCount := [2]int{}
|
||||
for _, tt := range tests {
|
||||
if (tt.queryOpts == promqltest.LazyLoaderOpts{
|
||||
EnableNegativeOffset: true,
|
||||
} || tt.queryOpts == promqltest.LazyLoaderOpts{
|
||||
EnableAtModifier: true,
|
||||
}) {
|
||||
reuseFiles = append(reuseFiles, tt.args.files...)
|
||||
reuseCount[tt.want] += len(tt.args.files)
|
||||
}
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := RulesUnitTest(tt.queryOpts, nil, false, tt.args.files...); got != tt.want {
|
||||
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
t.Run("Junit xml output ", func(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, reuseFiles...); got != 1 {
|
||||
t.Errorf("RulesUnitTestResults() = %v, want 1", got)
|
||||
}
|
||||
var test junitxml.JUnitXML
|
||||
output := buf.Bytes()
|
||||
err := xml.Unmarshal(output, &test)
|
||||
if err != nil {
|
||||
fmt.Println("error in decoding XML:", err)
|
||||
return
|
||||
}
|
||||
var total int
|
||||
var passes int
|
||||
var failures int
|
||||
var cases int
|
||||
total = len(test.Suites)
|
||||
if total != len(reuseFiles) {
|
||||
t.Errorf("JUnit output had %d testsuite elements; expected %d\n", total, len(reuseFiles))
|
||||
}
|
||||
|
||||
for _, i := range test.Suites {
|
||||
if i.FailureCount == 0 {
|
||||
passes++
|
||||
} else {
|
||||
failures++
|
||||
}
|
||||
cases += len(i.Cases)
|
||||
}
|
||||
if total != passes+failures {
|
||||
t.Errorf("JUnit output mismatch: Total testsuites (%d) does not equal the sum of passes (%d) and failures (%d).", total, passes, failures)
|
||||
}
|
||||
if cases < total {
|
||||
t.Errorf("JUnit output had %d suites without test cases\n", total-cases)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestRulesUnitTestRun(t *testing.T) {
|
||||
|
|
|
@ -37,6 +37,7 @@ import (
|
|||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/relabel"
|
||||
"github.com/prometheus/prometheus/storage/remote/azuread"
|
||||
"github.com/prometheus/prometheus/storage/remote/googleiam"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -227,6 +228,9 @@ var (
|
|||
DefaultExemplarsConfig = ExemplarsConfig{
|
||||
MaxExemplars: 100000,
|
||||
}
|
||||
|
||||
// DefaultOTLPConfig is the default OTLP configuration.
|
||||
DefaultOTLPConfig = OTLPConfig{}
|
||||
)
|
||||
|
||||
// Config is the top-level configuration for Prometheus's config files.
|
||||
|
@ -242,6 +246,7 @@ type Config struct {
|
|||
|
||||
RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"`
|
||||
RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"`
|
||||
OTLPConfig OTLPConfig `yaml:"otlp,omitempty"`
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -1120,6 +1125,7 @@ type RemoteWriteConfig struct {
|
|||
MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"`
|
||||
SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"`
|
||||
AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"`
|
||||
GoogleIAMConfig *googleiam.Config `yaml:"google_iam,omitempty"`
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -1157,17 +1163,33 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
|
|||
return err
|
||||
}
|
||||
|
||||
httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil ||
|
||||
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
|
||||
return validateAuthConfigs(c)
|
||||
}
|
||||
|
||||
if httpClientConfigAuthEnabled && (c.SigV4Config != nil || c.AzureADConfig != nil) {
|
||||
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured")
|
||||
// validateAuthConfigs validates that at most one of basic_auth, authorization, oauth2, sigv4, azuread or google_iam must be configured.
|
||||
func validateAuthConfigs(c *RemoteWriteConfig) error {
|
||||
var authConfigured []string
|
||||
if c.HTTPClientConfig.BasicAuth != nil {
|
||||
authConfigured = append(authConfigured, "basic_auth")
|
||||
}
|
||||
|
||||
if c.SigV4Config != nil && c.AzureADConfig != nil {
|
||||
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured")
|
||||
if c.HTTPClientConfig.Authorization != nil {
|
||||
authConfigured = append(authConfigured, "authorization")
|
||||
}
|
||||
if c.HTTPClientConfig.OAuth2 != nil {
|
||||
authConfigured = append(authConfigured, "oauth2")
|
||||
}
|
||||
if c.SigV4Config != nil {
|
||||
authConfigured = append(authConfigured, "sigv4")
|
||||
}
|
||||
if c.AzureADConfig != nil {
|
||||
authConfigured = append(authConfigured, "azuread")
|
||||
}
|
||||
if c.GoogleIAMConfig != nil {
|
||||
authConfigured = append(authConfigured, "google_iam")
|
||||
}
|
||||
if len(authConfigured) > 1 {
|
||||
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, azuread or google_iam must be configured. Currently configured: %v", authConfigured)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1186,7 +1208,7 @@ func validateHeadersForTracing(headers map[string]string) error {
|
|||
func validateHeaders(headers map[string]string) error {
|
||||
for header := range headers {
|
||||
if strings.ToLower(header) == "authorization" {
|
||||
return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter")
|
||||
return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, azuread or google_iam parameter")
|
||||
}
|
||||
if _, ok := reservedHeaders[strings.ToLower(header)]; ok {
|
||||
return fmt.Errorf("%s is a reserved header. It must not be changed", header)
|
||||
|
@ -1305,3 +1327,35 @@ func getGoGCEnv() int {
|
|||
}
|
||||
return DefaultRuntimeConfig.GoGC
|
||||
}
|
||||
|
||||
// OTLPConfig is the configuration for writing to the OTLP endpoint.
|
||||
type OTLPConfig struct {
|
||||
PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*c = DefaultOTLPConfig
|
||||
type plain OTLPConfig
|
||||
if err := unmarshal((*plain)(c)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
seen := map[string]struct{}{}
|
||||
var err error
|
||||
for i, attr := range c.PromoteResourceAttributes {
|
||||
attr = strings.TrimSpace(attr)
|
||||
if attr == "" {
|
||||
err = errors.Join(err, fmt.Errorf("empty promoted OTel resource attribute"))
|
||||
continue
|
||||
}
|
||||
if _, exists := seen[attr]; exists {
|
||||
err = errors.Join(err, fmt.Errorf("duplicated promoted OTel resource attribute %q", attr))
|
||||
continue
|
||||
}
|
||||
|
||||
seen[attr] = struct{}{}
|
||||
c.PromoteResourceAttributes[i] = attr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -156,6 +156,12 @@ var expectedConf = &Config{
|
|||
},
|
||||
},
|
||||
|
||||
OTLPConfig: OTLPConfig{
|
||||
PromoteResourceAttributes: []string{
|
||||
"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name",
|
||||
},
|
||||
},
|
||||
|
||||
RemoteReadConfigs: []*RemoteReadConfig{
|
||||
{
|
||||
URL: mustParseURL("http://remote1/read"),
|
||||
|
@ -1471,6 +1477,26 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
|
|||
require.False(t, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit)
|
||||
}
|
||||
|
||||
func TestOTLPSanitizeResourceAttributes(t *testing.T) {
|
||||
t.Run("good config", func(t *testing.T) {
|
||||
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
require.NoError(t, err)
|
||||
var got Config
|
||||
require.NoError(t, yaml.UnmarshalStrict(out, &got))
|
||||
|
||||
require.Equal(t, []string{"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"}, got.OTLPConfig.PromoteResourceAttributes)
|
||||
})
|
||||
|
||||
t.Run("bad config", func(t *testing.T) {
|
||||
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, log.NewNopLogger())
|
||||
require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`)
|
||||
require.ErrorContains(t, err, `empty promoted OTel resource attribute`)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoadConfig(t *testing.T) {
|
||||
// Parse a valid file that sets a global scrape timeout. This tests whether parsing
|
||||
// an overwritten default field in the global config permanently changes the default.
|
||||
|
@ -1800,7 +1826,7 @@ var expectedErrors = []struct {
|
|||
},
|
||||
{
|
||||
filename: "remote_write_authorization_header.bad.yml",
|
||||
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`,
|
||||
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, azuread or google_iam parameter`,
|
||||
},
|
||||
{
|
||||
filename: "remote_write_wrong_msg.bad.yml",
|
||||
|
|
3
config/testdata/conf.good.yml
vendored
3
config/testdata/conf.good.yml
vendored
|
@ -45,6 +45,9 @@ remote_write:
|
|||
headers:
|
||||
name: value
|
||||
|
||||
otlp:
|
||||
promote_resource_attributes: ["k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"]
|
||||
|
||||
remote_read:
|
||||
- url: http://remote1/read
|
||||
read_recent: true
|
||||
|
|
2
config/testdata/otlp_sanitize_resource_attributes.bad.yml
vendored
Normal file
2
config/testdata/otlp_sanitize_resource_attributes.bad.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
otlp:
|
||||
promote_resource_attributes: ["k8s.cluster.name", " k8s.job.name ", "k8s.namespace.name", "k8s.job.name", ""]
|
2
config/testdata/otlp_sanitize_resource_attributes.good.yml
vendored
Normal file
2
config/testdata/otlp_sanitize_resource_attributes.good.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
otlp:
|
||||
promote_resource_attributes: ["k8s.cluster.name", " k8s.job.name ", "k8s.namespace.name"]
|
|
@ -1090,7 +1090,6 @@ func TestCoordinationWithReceiver(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.title, func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
|
|
@ -442,6 +442,15 @@ Unit testing.
|
|||
|
||||
|
||||
|
||||
#### Flags
|
||||
|
||||
| Flag | Description |
|
||||
| --- | --- |
|
||||
| <code class="text-nowrap">--junit</code> | File path to store JUnit XML test results. |
|
||||
|
||||
|
||||
|
||||
|
||||
##### `promtool test rules`
|
||||
|
||||
Unit tests for rules.
|
||||
|
|
|
@ -152,6 +152,10 @@ alerting:
|
|||
remote_write:
|
||||
[ - <remote_write> ... ]
|
||||
|
||||
# Settings related to the OTLP receiver feature.
|
||||
otlp:
|
||||
[ promote_resource_attributes: [<string>, ...] | default = [ ] ]
|
||||
|
||||
# Settings related to the remote read feature.
|
||||
remote_read:
|
||||
[ - <remote_read> ... ]
|
||||
|
@ -3397,8 +3401,8 @@ authorization:
|
|||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# Optionally configures AWS's Signature Verification 4 signing process to
|
||||
# sign requests. Cannot be set at the same time as basic_auth, authorization, or oauth2.
|
||||
# Optionally configures AWS's Signature Verification 4 signing process to sign requests.
|
||||
# Cannot be set at the same time as basic_auth, authorization, oauth2, azuread or google_iam.
|
||||
# To use the default credentials from the AWS SDK, use `sigv4: {}`.
|
||||
sigv4:
|
||||
# The AWS region. If blank, the region from the default credentials chain
|
||||
|
@ -3651,12 +3655,12 @@ sigv4:
|
|||
[ role_arn: <string> ]
|
||||
|
||||
# Optional OAuth 2.0 configuration.
|
||||
# Cannot be used at the same time as basic_auth, authorization, sigv4, or azuread.
|
||||
# Cannot be used at the same time as basic_auth, authorization, sigv4, azuread or google_iam.
|
||||
oauth2:
|
||||
[ <oauth2> ]
|
||||
|
||||
# Optional AzureAD configuration.
|
||||
# Cannot be used at the same time as basic_auth, authorization, oauth2, or sigv4.
|
||||
# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or google_iam.
|
||||
azuread:
|
||||
# The Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'.
|
||||
[ cloud: <string> | default = AzurePublic ]
|
||||
|
@ -3676,6 +3680,14 @@ azuread:
|
|||
[ sdk:
|
||||
[ tenant_id: <string> ] ]
|
||||
|
||||
# WARNING: Remote write is NOT SUPPORTED by Google Cloud. This configuration is reserved for future use.
|
||||
# Optional Google Cloud Monitoring configuration.
|
||||
# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or azuread.
|
||||
# To use the default credentials from the Google Cloud SDK, use `google_iam: {}`.
|
||||
google_iam:
|
||||
# Service account key with monitoring write permessions.
|
||||
credentials_file: <file_name>
|
||||
|
||||
# Configures the remote write request's TLS settings.
|
||||
tls_config:
|
||||
[ <tls_config> ]
|
||||
|
|
|
@ -92,7 +92,7 @@ series: <string>
|
|||
#
|
||||
# Native histogram notation:
|
||||
# Native histograms can be used instead of floating point numbers using the following notation:
|
||||
# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}
|
||||
# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5 counter_reset_hint:gauge}}
|
||||
# Native histograms support the same expanding notation as floating point numbers, i.e. 'axn', 'a+bxn' and 'a-bxn'.
|
||||
# All properties are optional and default to 0. The order is not important. The following properties are supported:
|
||||
# - schema (int):
|
||||
|
@ -119,6 +119,8 @@ series: <string>
|
|||
# Observation counts in negative buckets. Each represents an absolute count.
|
||||
# - n_offset (int):
|
||||
# The starting index of the first entry in the negative buckets.
|
||||
# - counter_reset_hint (one of 'unknown', 'reset', 'not_reset' or 'gauge')
|
||||
# The counter reset hint associated with this histogram. Defaults to 'unknown' if not set.
|
||||
values: <string>
|
||||
```
|
||||
|
||||
|
|
|
@ -8,9 +8,15 @@ sort_rank: 1
|
|||
|
||||
Prometheus provides a functional query language called PromQL (Prometheus Query
|
||||
Language) that lets the user select and aggregate time series data in real
|
||||
time. The result of an expression can either be shown as a graph, viewed as
|
||||
tabular data in Prometheus's expression browser, or consumed by external
|
||||
systems via the [HTTP API](api.md).
|
||||
time.
|
||||
|
||||
When you send a query request to Prometheus, it can be an _instant query_, evaluated at one point in time,
|
||||
or a _range query_ at equally-spaced steps between a start and an end time. PromQL works exactly the same
|
||||
in each cases; the range query is just like an instant query run multiple times at different timestamps.
|
||||
|
||||
In the Prometheus UI, the "Table" tab is for instant queries and the "Graph" tab is for range queries.
|
||||
|
||||
Other programs can fetch the result of a PromQL expression via the [HTTP API](api.md).
|
||||
|
||||
## Examples
|
||||
|
||||
|
@ -94,9 +100,7 @@ Examples:
|
|||
|
||||
## Time series selectors
|
||||
|
||||
Time series selectors are responsible for selecting the times series and raw or inferred sample timestamps and values.
|
||||
|
||||
Time series *selectors* are not to be confused with higher level concept of instant and range *queries* that can execute the time series *selectors*. A higher level instant query would evaluate the given selector at one point in time, however the range query would evaluate the selector at multiple different times in between a minimum and maximum timestamp at regular steps.
|
||||
These are the basic building-blocks that instruct PromQL what data to fetch.
|
||||
|
||||
### Instant vector selectors
|
||||
|
||||
|
|
|
@ -16,8 +16,8 @@ require (
|
|||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
|
||||
github.com/aws/aws-sdk-go v1.53.16 // indirect
|
||||
|
@ -36,7 +36,6 @@ require (
|
|||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.8 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
|
||||
|
@ -39,7 +39,6 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
|
|||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
|
||||
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
|
@ -50,8 +49,6 @@ github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LO
|
|||
github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
|
||||
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
|
@ -279,8 +276,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
|
|||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc=
|
||||
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
|
|
18
go.mod
18
go.mod
|
@ -17,7 +17,7 @@ require (
|
|||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
|
||||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
github.com/dennwc/varint v1.0.0
|
||||
github.com/digitalocean/godo v1.118.0
|
||||
github.com/digitalocean/godo v1.119.0
|
||||
github.com/docker/docker v27.0.3+incompatible
|
||||
github.com/edsrzf/mmap-go v1.1.0
|
||||
github.com/envoyproxy/go-control-plane v0.12.0
|
||||
|
@ -33,17 +33,17 @@ require (
|
|||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gophercloud/gophercloud v1.13.0
|
||||
github.com/gophercloud/gophercloud v1.14.0
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/consul/api v1.29.2
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.10.2
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.12.0
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.11
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/klauspost/compress v1.17.9
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
|
||||
github.com/linode/linodego v1.37.0
|
||||
github.com/linode/linodego v1.38.0
|
||||
github.com/miekg/dns v1.1.61
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
||||
|
@ -82,7 +82,7 @@ require (
|
|||
golang.org/x/text v0.16.0
|
||||
golang.org/x/time v0.5.0
|
||||
golang.org/x/tools v0.23.0
|
||||
google.golang.org/api v0.188.0
|
||||
google.golang.org/api v0.189.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d
|
||||
google.golang.org/grpc v1.65.0
|
||||
google.golang.org/protobuf v1.34.2
|
||||
|
@ -96,9 +96,9 @@ require (
|
|||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.7.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.4.0 // indirect
|
||||
cloud.google.com/go/auth v0.7.2 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.5.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
|
@ -191,7 +191,7 @@ require (
|
|||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
|
||||
golang.org/x/mod v0.19.0 // indirect
|
||||
golang.org/x/term v0.22.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
|
|
36
go.sum
36
go.sum
|
@ -12,18 +12,18 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
|
|||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go/auth v0.7.0 h1:kf/x9B3WTbBUHkC+1VS8wwwli9TzhSt0vSTVBmMR8Ts=
|
||||
cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
|
||||
cloud.google.com/go/auth v0.7.2 h1:uiha352VrCDMXg+yoBtaD0tUF4Kv9vrtrWPYXwutnDE=
|
||||
cloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute/metadata v0.4.0 h1:vHzJCWaM4g8XIcm8kopr3XmDA4Gy/lblD3EhhSux05c=
|
||||
cloud.google.com/go/compute/metadata v0.4.0/go.mod h1:SIQh1Kkb4ZJ8zJ874fqVkslA29PRXuleyj6vOzlbK7M=
|
||||
cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=
|
||||
cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
|
@ -143,8 +143,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc
|
|||
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/digitalocean/godo v1.118.0 h1:lkzGFQmACrVCp7UqH1sAi4JK/PWwlc5aaxubgorKmC4=
|
||||
github.com/digitalocean/godo v1.118.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
|
||||
github.com/digitalocean/godo v1.119.0 h1:dmFNQwSIAcH3z+FVovHLkazKDC2uA8oOlGvg5+H4vRw=
|
||||
github.com/digitalocean/godo v1.119.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
|
@ -334,8 +334,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
|
|||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA=
|
||||
github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E=
|
||||
github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdyt4fX9IeJ0=
|
||||
github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8=
|
||||
github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
|
@ -414,8 +414,8 @@ github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtx
|
|||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.10.2 h1:9gyTUPhfNbfbS40Spgij5mV5k37bOZgt8iHKCbfGs5I=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.10.2/go.mod h1:xQ+8KhIS62W0D78Dpi57jsufWh844gUw1az5OUvaeq8=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.12.0 h1:nOgfNTo0gyXZJJdM8mo/XH5MO/e80wAEpldRzdWayhY=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.12.0/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
|
@ -472,8 +472,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
|
|||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/linode/linodego v1.37.0 h1:B/2Spzv9jYXzKA+p+GD8fVCNJ7Wuw6P91ZDD9eCkkso=
|
||||
github.com/linode/linodego v1.37.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ=
|
||||
github.com/linode/linodego v1.38.0 h1:wP3oW9OhGc6vhze8NPf2knbwH4TzSbrjzuCd9okjbTY=
|
||||
github.com/linode/linodego v1.38.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
|
@ -1047,8 +1047,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
|
|||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.188.0 h1:51y8fJ/b1AaaBRJr4yWm96fPcuxSo0JcegXE3DaHQHw=
|
||||
google.golang.org/api v0.188.0/go.mod h1:VR0d+2SIiWOYG3r/jdm7adPW9hI2aRv9ETOSCQ9Beag=
|
||||
google.golang.org/api v0.189.0 h1:equMo30LypAkdkLMBqfeIqtyAnlyig1JSZArl4XPwdI=
|
||||
google.golang.org/api v0.189.0/go.mod h1:FLWGJKb0hb+pU2j+rJqwbnsF+ym+fQs73rbJ+KAUgy8=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1087,8 +1087,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
|
|||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b h1:04+jVzTs2XBnOZcPsLnmrTGqltqJbZQ1Ey26hjYdQQ0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade h1:oCRSWfwGXQsqlVdErcyTt4A93Y8fo0/9D4b1gnI++qo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
|
|
@ -38,10 +38,10 @@ func (ls Labels) Bytes(buf []byte) []byte {
|
|||
b.WriteByte(labelSep)
|
||||
for i, l := range ls {
|
||||
if i > 0 {
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
}
|
||||
b.WriteString(l.Name)
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
b.WriteString(l.Value)
|
||||
}
|
||||
return b.Bytes()
|
||||
|
@ -86,9 +86,9 @@ func (ls Labels) Hash() uint64 {
|
|||
}
|
||||
|
||||
b = append(b, v.Name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, v.Value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
}
|
||||
return xxhash.Sum64(b)
|
||||
}
|
||||
|
@ -106,9 +106,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
|
|||
i++
|
||||
default:
|
||||
b = append(b, ls[i].Name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, ls[i].Value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
i++
|
||||
j++
|
||||
}
|
||||
|
@ -130,9 +130,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
|
|||
continue
|
||||
}
|
||||
b = append(b, ls[i].Name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, ls[i].Value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
}
|
||||
return xxhash.Sum64(b), b
|
||||
}
|
||||
|
@ -151,10 +151,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte {
|
|||
i++
|
||||
default:
|
||||
if b.Len() > 1 {
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
}
|
||||
b.WriteString(ls[i].Name)
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
b.WriteString(ls[i].Value)
|
||||
i++
|
||||
j++
|
||||
|
@ -177,10 +177,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte {
|
|||
continue
|
||||
}
|
||||
if b.Len() > 1 {
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
}
|
||||
b.WriteString(ls[i].Name)
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
b.WriteString(ls[i].Value)
|
||||
}
|
||||
return b.Bytes()
|
||||
|
|
|
@ -29,10 +29,11 @@ const (
|
|||
BucketLabel = "le"
|
||||
InstanceName = "instance"
|
||||
|
||||
labelSep = '\xfe'
|
||||
labelSep = '\xfe' // Used at beginning of `Bytes` return.
|
||||
sep = '\xff' // Used between labels in `Bytes` and `Hash`.
|
||||
)
|
||||
|
||||
var seps = []byte{'\xff'}
|
||||
var seps = []byte{sep} // Used with Hash, which has no WriteByte method.
|
||||
|
||||
// Label is a key/value pair of strings.
|
||||
type Label struct {
|
||||
|
|
|
@ -146,13 +146,13 @@ func (ls Labels) Bytes(buf []byte) []byte {
|
|||
b := bytes.NewBuffer(buf[:0])
|
||||
for i := 0; i < len(ls.data); {
|
||||
if i > 0 {
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
}
|
||||
var name, value string
|
||||
name, i = decodeString(ls.syms, ls.data, i)
|
||||
value, i = decodeString(ls.syms, ls.data, i)
|
||||
b.WriteString(name)
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
b.WriteString(value)
|
||||
}
|
||||
return b.Bytes()
|
||||
|
@ -201,9 +201,9 @@ func (ls Labels) Hash() uint64 {
|
|||
}
|
||||
|
||||
b = append(b, name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
pos = newPos
|
||||
}
|
||||
return xxhash.Sum64(b)
|
||||
|
@ -226,9 +226,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
|
|||
}
|
||||
if name == names[j] {
|
||||
b = append(b, name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -252,9 +252,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
|
|||
continue
|
||||
}
|
||||
b = append(b, name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
}
|
||||
return xxhash.Sum64(b), b
|
||||
}
|
||||
|
@ -275,10 +275,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte {
|
|||
}
|
||||
if lName == names[j] {
|
||||
if b.Len() > 1 {
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
}
|
||||
b.WriteString(lName)
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
b.WriteString(lValue)
|
||||
}
|
||||
pos = newPos
|
||||
|
@ -299,10 +299,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte {
|
|||
}
|
||||
if j == len(names) || lName != names[j] {
|
||||
if b.Len() > 1 {
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
}
|
||||
b.WriteString(lName)
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
b.WriteString(lValue)
|
||||
}
|
||||
pos = newPos
|
||||
|
|
|
@ -112,9 +112,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
|
|||
}
|
||||
if name == names[j] {
|
||||
b = append(b, name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -138,9 +138,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
|
|||
continue
|
||||
}
|
||||
b = append(b, name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
}
|
||||
return xxhash.Sum64(b), b
|
||||
}
|
||||
|
|
|
@ -39,9 +39,9 @@ func StableHash(ls Labels) uint64 {
|
|||
}
|
||||
|
||||
b = append(b, v.Name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, v.Value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
}
|
||||
return xxhash.Sum64(b)
|
||||
}
|
||||
|
|
|
@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 {
|
|||
}
|
||||
|
||||
b = append(b, name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
pos = newPos
|
||||
}
|
||||
return xxhash.Sum64(b)
|
||||
|
|
|
@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 {
|
|||
}
|
||||
|
||||
b = append(b, v.Name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, v.Value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
}
|
||||
if h != nil {
|
||||
return h.Sum64()
|
||||
|
|
|
@ -213,6 +213,10 @@ func (re Regexp) IsZero() bool {
|
|||
|
||||
// String returns the original string used to compile the regular expression.
|
||||
func (re Regexp) String() string {
|
||||
if re.Regexp == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
str := re.Regexp.String()
|
||||
// Trim the anchor `^(?:` prefix and `)$` suffix.
|
||||
return str[4 : len(str)-2]
|
||||
|
|
|
@ -900,3 +900,16 @@ action: replace
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegexp_ShouldMarshalAndUnmarshalZeroValue(t *testing.T) {
|
||||
var zero Regexp
|
||||
|
||||
marshalled, err := yaml.Marshal(&zero)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "null\n", string(marshalled))
|
||||
|
||||
var unmarshalled Regexp
|
||||
err = yaml.Unmarshal(marshalled, &unmarshalled)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, unmarshalled.Regexp)
|
||||
}
|
||||
|
|
|
@ -302,15 +302,10 @@ type Exemplar struct {
|
|||
// value represents an exact example value. This can be useful when the exemplar
|
||||
// is attached to a histogram, which only gives an estimated value through buckets.
|
||||
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
// timestamp represents an optional timestamp of the sample in ms.
|
||||
// timestamp represents the timestamp of the exemplar in ms.
|
||||
//
|
||||
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
|
||||
// for conversion from/to time.Time to Prometheus timestamp.
|
||||
//
|
||||
// Note that the "optional" keyword is omitted due to
|
||||
// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
|
||||
// Zero value means value not set. If you need to use exactly zero value for
|
||||
// the timestamp, use 1 millisecond before or after.
|
||||
Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
|
|
|
@ -107,15 +107,10 @@ message Exemplar {
|
|||
// value represents an exact example value. This can be useful when the exemplar
|
||||
// is attached to a histogram, which only gives an estimated value through buckets.
|
||||
double value = 2;
|
||||
// timestamp represents an optional timestamp of the sample in ms.
|
||||
// timestamp represents the timestamp of the exemplar in ms.
|
||||
//
|
||||
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
|
||||
// for conversion from/to time.Time to Prometheus timestamp.
|
||||
//
|
||||
// Note that the "optional" keyword is omitted due to
|
||||
// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
|
||||
// Zero value means value not set. If you need to use exactly zero value for
|
||||
// the timestamp, use 1 millisecond before or after.
|
||||
int64 timestamp = 3;
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
@ -51,7 +50,7 @@ const (
|
|||
func TestMain(m *testing.M) {
|
||||
// Enable experimental functions testing
|
||||
parser.EnableExperimentalFunctions = true
|
||||
goleak.VerifyTestMain(m)
|
||||
testutil.TolerantVerifyLeak(m)
|
||||
}
|
||||
|
||||
func TestQueryConcurrency(t *testing.T) {
|
||||
|
|
|
@ -97,9 +97,10 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
|
|||
lastT = samples.Histograms[numSamplesMinusOne].T
|
||||
var newAnnos annotations.Annotations
|
||||
resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, metricName, args[0].PositionRange())
|
||||
annos.Merge(newAnnos)
|
||||
if resultHistogram == nil {
|
||||
// The histograms are not compatible with each other.
|
||||
return enh.Out, annos.Merge(newAnnos)
|
||||
return enh.Out, annos
|
||||
}
|
||||
case len(samples.Floats) > 1:
|
||||
numSamplesMinusOne = len(samples.Floats) - 1
|
||||
|
@ -189,6 +190,12 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
|
|||
|
||||
var annos annotations.Annotations
|
||||
|
||||
// We check for gauge type histograms in the loop below, but the loop below does not run on the first and last point,
|
||||
// so check the first and last point now.
|
||||
if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) {
|
||||
annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos))
|
||||
}
|
||||
|
||||
// First iteration to find out two things:
|
||||
// - What's the smallest relevant schema?
|
||||
// - Are all data points histograms?
|
||||
|
@ -241,7 +248,7 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
|
|||
}
|
||||
|
||||
h.CounterResetHint = histogram.GaugeType
|
||||
return h.Compact(0), nil
|
||||
return h.Compact(0), annos
|
||||
}
|
||||
|
||||
// === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
|
||||
|
|
|
@ -48,7 +48,6 @@ func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *hi
|
|||
var t int64
|
||||
t, f.currentH = f.Iterator.AtHistogram(f.currentH)
|
||||
if value.IsStaleNaN(f.currentH.Sum) {
|
||||
f.setLastH(f.currentH)
|
||||
h = &histogram.Histogram{Sum: f.currentH.Sum}
|
||||
return t, h
|
||||
}
|
||||
|
@ -63,9 +62,13 @@ func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *hi
|
|||
return t, h
|
||||
}
|
||||
|
||||
h.CounterResetHint = f.getResetHint(f.currentH)
|
||||
h.Count = f.currentH.Count
|
||||
h.Sum = f.currentH.Sum
|
||||
returnValue := histogram.Histogram{
|
||||
CounterResetHint: f.getResetHint(f.currentH),
|
||||
Count: f.currentH.Count,
|
||||
Sum: f.currentH.Sum,
|
||||
}
|
||||
returnValue.CopyTo(h)
|
||||
|
||||
f.setLastH(f.currentH)
|
||||
return t, h
|
||||
}
|
||||
|
@ -77,7 +80,6 @@ func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
|
|||
var t int64
|
||||
t, f.currentFH = f.Iterator.AtFloatHistogram(f.currentFH)
|
||||
if value.IsStaleNaN(f.currentFH.Sum) {
|
||||
f.setLastFH(f.currentFH)
|
||||
return t, &histogram.FloatHistogram{Sum: f.currentFH.Sum}
|
||||
}
|
||||
|
||||
|
@ -91,9 +93,13 @@ func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram)
|
|||
return t, fh
|
||||
}
|
||||
|
||||
fh.CounterResetHint = f.getFloatResetHint(f.currentFH.CounterResetHint)
|
||||
fh.Count = f.currentFH.Count
|
||||
fh.Sum = f.currentFH.Sum
|
||||
returnValue := histogram.FloatHistogram{
|
||||
CounterResetHint: f.getFloatResetHint(f.currentFH.CounterResetHint),
|
||||
Count: f.currentFH.Count,
|
||||
Sum: f.currentFH.Sum,
|
||||
}
|
||||
returnValue.CopyTo(fh)
|
||||
|
||||
f.setLastFH(f.currentFH)
|
||||
return t, fh
|
||||
}
|
||||
|
|
|
@ -14,62 +14,132 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||
)
|
||||
|
||||
func TestHistogramStatsDecoding(t *testing.T) {
|
||||
histograms := []*histogram.Histogram{
|
||||
tsdbutil.GenerateTestHistogram(0),
|
||||
tsdbutil.GenerateTestHistogram(1),
|
||||
tsdbutil.GenerateTestHistogram(2),
|
||||
tsdbutil.GenerateTestHistogram(2),
|
||||
}
|
||||
histograms[0].CounterResetHint = histogram.NotCounterReset
|
||||
histograms[1].CounterResetHint = histogram.UnknownCounterReset
|
||||
histograms[2].CounterResetHint = histogram.CounterReset
|
||||
histograms[3].CounterResetHint = histogram.UnknownCounterReset
|
||||
|
||||
expectedHints := []histogram.CounterResetHint{
|
||||
cases := []struct {
|
||||
name string
|
||||
histograms []*histogram.Histogram
|
||||
expectedHints []histogram.CounterResetHint
|
||||
}{
|
||||
{
|
||||
name: "unknown counter reset triggers detection",
|
||||
histograms: []*histogram.Histogram{
|
||||
tsdbutil.GenerateTestHistogramWithHint(0, histogram.NotCounterReset),
|
||||
tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
|
||||
tsdbutil.GenerateTestHistogramWithHint(2, histogram.CounterReset),
|
||||
tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset),
|
||||
},
|
||||
expectedHints: []histogram.CounterResetHint{
|
||||
histogram.NotCounterReset,
|
||||
histogram.NotCounterReset,
|
||||
histogram.CounterReset,
|
||||
histogram.NotCounterReset,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "stale sample before unknown reset hint",
|
||||
histograms: []*histogram.Histogram{
|
||||
tsdbutil.GenerateTestHistogramWithHint(0, histogram.NotCounterReset),
|
||||
tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
|
||||
{Sum: math.Float64frombits(value.StaleNaN)},
|
||||
tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
|
||||
},
|
||||
expectedHints: []histogram.CounterResetHint{
|
||||
histogram.NotCounterReset,
|
||||
histogram.NotCounterReset,
|
||||
histogram.UnknownCounterReset,
|
||||
histogram.NotCounterReset,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown counter reset at the beginning",
|
||||
histograms: []*histogram.Histogram{
|
||||
tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
|
||||
},
|
||||
expectedHints: []histogram.CounterResetHint{
|
||||
histogram.NotCounterReset,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "detect real counter reset",
|
||||
histograms: []*histogram.Histogram{
|
||||
tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset),
|
||||
tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
|
||||
},
|
||||
expectedHints: []histogram.CounterResetHint{
|
||||
histogram.NotCounterReset,
|
||||
histogram.CounterReset,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "detect real counter reset after stale NaN",
|
||||
histograms: []*histogram.Histogram{
|
||||
tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset),
|
||||
{Sum: math.Float64frombits(value.StaleNaN)},
|
||||
tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset),
|
||||
},
|
||||
expectedHints: []histogram.CounterResetHint{
|
||||
histogram.NotCounterReset,
|
||||
histogram.UnknownCounterReset,
|
||||
histogram.CounterReset,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Run("histogram_stats", func(t *testing.T) {
|
||||
decodedStats := make([]*histogram.Histogram, 0)
|
||||
statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil))
|
||||
statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil))
|
||||
for statsIterator.Next() != chunkenc.ValNone {
|
||||
_, h := statsIterator.AtHistogram(nil)
|
||||
decodedStats = append(decodedStats, h)
|
||||
}
|
||||
for i := 0; i < len(histograms); i++ {
|
||||
require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint)
|
||||
require.Equal(t, histograms[i].Count, decodedStats[i].Count)
|
||||
require.Equal(t, histograms[i].Sum, decodedStats[i].Sum)
|
||||
for i := 0; i < len(tc.histograms); i++ {
|
||||
require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint, fmt.Sprintf("mismatch in counter reset hint for histogram %d", i))
|
||||
h := tc.histograms[i]
|
||||
if value.IsStaleNaN(h.Sum) {
|
||||
require.True(t, value.IsStaleNaN(decodedStats[i].Sum))
|
||||
require.Equal(t, uint64(0), decodedStats[i].Count)
|
||||
} else {
|
||||
require.Equal(t, tc.histograms[i].Count, decodedStats[i].Count)
|
||||
require.Equal(t, tc.histograms[i].Sum, decodedStats[i].Sum)
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run("float_histogram_stats", func(t *testing.T) {
|
||||
decodedStats := make([]*histogram.FloatHistogram, 0)
|
||||
statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil))
|
||||
statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil))
|
||||
for statsIterator.Next() != chunkenc.ValNone {
|
||||
_, h := statsIterator.AtFloatHistogram(nil)
|
||||
decodedStats = append(decodedStats, h)
|
||||
}
|
||||
for i := 0; i < len(histograms); i++ {
|
||||
fh := histograms[i].ToFloat(nil)
|
||||
require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint)
|
||||
for i := 0; i < len(tc.histograms); i++ {
|
||||
require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint)
|
||||
fh := tc.histograms[i].ToFloat(nil)
|
||||
if value.IsStaleNaN(fh.Sum) {
|
||||
require.True(t, value.IsStaleNaN(decodedStats[i].Sum))
|
||||
require.Equal(t, float64(0), decodedStats[i].Count)
|
||||
} else {
|
||||
require.Equal(t, fh.Count, decodedStats[i].Count)
|
||||
require.Equal(t, fh.Sum, decodedStats[i].Sum)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type histogramSeries struct {
|
||||
|
|
|
@ -84,6 +84,7 @@ NEGATIVE_BUCKETS_DESC
|
|||
ZERO_BUCKET_DESC
|
||||
ZERO_BUCKET_WIDTH_DESC
|
||||
CUSTOM_VALUES_DESC
|
||||
COUNTER_RESET_HINT_DESC
|
||||
%token histogramDescEnd
|
||||
|
||||
// Operators.
|
||||
|
@ -149,6 +150,14 @@ START
|
|||
END
|
||||
%token preprocessorEnd
|
||||
|
||||
// Counter reset hints.
|
||||
%token counterResetHintsStart
|
||||
%token <item>
|
||||
UNKNOWN_COUNTER_RESET
|
||||
COUNTER_RESET
|
||||
NOT_COUNTER_RESET
|
||||
GAUGE_TYPE
|
||||
%token counterResetHintsEnd
|
||||
|
||||
// Start symbols for the generated parser.
|
||||
%token startSymbolsStart
|
||||
|
@ -163,7 +172,7 @@ START_METRIC_SELECTOR
|
|||
// Type definitions for grammar rules.
|
||||
%type <matchers> label_match_list
|
||||
%type <matcher> label_matcher
|
||||
%type <item> aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier
|
||||
%type <item> aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier counter_reset_hint
|
||||
%type <labels> label_set metric
|
||||
%type <lblList> label_set_list
|
||||
%type <label> label_set_item
|
||||
|
@ -839,6 +848,11 @@ histogram_desc_item
|
|||
$$ = yylex.(*parser).newMap()
|
||||
$$["n_offset"] = $3
|
||||
}
|
||||
| COUNTER_RESET_HINT_DESC COLON counter_reset_hint
|
||||
{
|
||||
$$ = yylex.(*parser).newMap()
|
||||
$$["counter_reset_hint"] = $3
|
||||
}
|
||||
;
|
||||
|
||||
bucket_set : LEFT_BRACKET bucket_set_list SPACE RIGHT_BRACKET
|
||||
|
@ -862,6 +876,7 @@ bucket_set_list : bucket_set_list SPACE number
|
|||
| bucket_set_list error
|
||||
;
|
||||
|
||||
counter_reset_hint : UNKNOWN_COUNTER_RESET | COUNTER_RESET | NOT_COUNTER_RESET | GAUGE_TYPE;
|
||||
|
||||
/*
|
||||
* Keyword lists.
|
||||
|
|
|
@ -67,64 +67,71 @@ const NEGATIVE_BUCKETS_DESC = 57376
|
|||
const ZERO_BUCKET_DESC = 57377
|
||||
const ZERO_BUCKET_WIDTH_DESC = 57378
|
||||
const CUSTOM_VALUES_DESC = 57379
|
||||
const histogramDescEnd = 57380
|
||||
const operatorsStart = 57381
|
||||
const ADD = 57382
|
||||
const DIV = 57383
|
||||
const EQLC = 57384
|
||||
const EQL_REGEX = 57385
|
||||
const GTE = 57386
|
||||
const GTR = 57387
|
||||
const LAND = 57388
|
||||
const LOR = 57389
|
||||
const LSS = 57390
|
||||
const LTE = 57391
|
||||
const LUNLESS = 57392
|
||||
const MOD = 57393
|
||||
const MUL = 57394
|
||||
const NEQ = 57395
|
||||
const NEQ_REGEX = 57396
|
||||
const POW = 57397
|
||||
const SUB = 57398
|
||||
const AT = 57399
|
||||
const ATAN2 = 57400
|
||||
const operatorsEnd = 57401
|
||||
const aggregatorsStart = 57402
|
||||
const AVG = 57403
|
||||
const BOTTOMK = 57404
|
||||
const COUNT = 57405
|
||||
const COUNT_VALUES = 57406
|
||||
const GROUP = 57407
|
||||
const MAX = 57408
|
||||
const MIN = 57409
|
||||
const QUANTILE = 57410
|
||||
const STDDEV = 57411
|
||||
const STDVAR = 57412
|
||||
const SUM = 57413
|
||||
const TOPK = 57414
|
||||
const LIMITK = 57415
|
||||
const LIMIT_RATIO = 57416
|
||||
const aggregatorsEnd = 57417
|
||||
const keywordsStart = 57418
|
||||
const BOOL = 57419
|
||||
const BY = 57420
|
||||
const GROUP_LEFT = 57421
|
||||
const GROUP_RIGHT = 57422
|
||||
const IGNORING = 57423
|
||||
const OFFSET = 57424
|
||||
const ON = 57425
|
||||
const WITHOUT = 57426
|
||||
const keywordsEnd = 57427
|
||||
const preprocessorStart = 57428
|
||||
const START = 57429
|
||||
const END = 57430
|
||||
const preprocessorEnd = 57431
|
||||
const startSymbolsStart = 57432
|
||||
const START_METRIC = 57433
|
||||
const START_SERIES_DESCRIPTION = 57434
|
||||
const START_EXPRESSION = 57435
|
||||
const START_METRIC_SELECTOR = 57436
|
||||
const startSymbolsEnd = 57437
|
||||
const COUNTER_RESET_HINT_DESC = 57380
|
||||
const histogramDescEnd = 57381
|
||||
const operatorsStart = 57382
|
||||
const ADD = 57383
|
||||
const DIV = 57384
|
||||
const EQLC = 57385
|
||||
const EQL_REGEX = 57386
|
||||
const GTE = 57387
|
||||
const GTR = 57388
|
||||
const LAND = 57389
|
||||
const LOR = 57390
|
||||
const LSS = 57391
|
||||
const LTE = 57392
|
||||
const LUNLESS = 57393
|
||||
const MOD = 57394
|
||||
const MUL = 57395
|
||||
const NEQ = 57396
|
||||
const NEQ_REGEX = 57397
|
||||
const POW = 57398
|
||||
const SUB = 57399
|
||||
const AT = 57400
|
||||
const ATAN2 = 57401
|
||||
const operatorsEnd = 57402
|
||||
const aggregatorsStart = 57403
|
||||
const AVG = 57404
|
||||
const BOTTOMK = 57405
|
||||
const COUNT = 57406
|
||||
const COUNT_VALUES = 57407
|
||||
const GROUP = 57408
|
||||
const MAX = 57409
|
||||
const MIN = 57410
|
||||
const QUANTILE = 57411
|
||||
const STDDEV = 57412
|
||||
const STDVAR = 57413
|
||||
const SUM = 57414
|
||||
const TOPK = 57415
|
||||
const LIMITK = 57416
|
||||
const LIMIT_RATIO = 57417
|
||||
const aggregatorsEnd = 57418
|
||||
const keywordsStart = 57419
|
||||
const BOOL = 57420
|
||||
const BY = 57421
|
||||
const GROUP_LEFT = 57422
|
||||
const GROUP_RIGHT = 57423
|
||||
const IGNORING = 57424
|
||||
const OFFSET = 57425
|
||||
const ON = 57426
|
||||
const WITHOUT = 57427
|
||||
const keywordsEnd = 57428
|
||||
const preprocessorStart = 57429
|
||||
const START = 57430
|
||||
const END = 57431
|
||||
const preprocessorEnd = 57432
|
||||
const counterResetHintsStart = 57433
|
||||
const UNKNOWN_COUNTER_RESET = 57434
|
||||
const COUNTER_RESET = 57435
|
||||
const NOT_COUNTER_RESET = 57436
|
||||
const GAUGE_TYPE = 57437
|
||||
const counterResetHintsEnd = 57438
|
||||
const startSymbolsStart = 57439
|
||||
const START_METRIC = 57440
|
||||
const START_SERIES_DESCRIPTION = 57441
|
||||
const START_EXPRESSION = 57442
|
||||
const START_METRIC_SELECTOR = 57443
|
||||
const startSymbolsEnd = 57444
|
||||
|
||||
var yyToknames = [...]string{
|
||||
"$end",
|
||||
|
@ -164,6 +171,7 @@ var yyToknames = [...]string{
|
|||
"ZERO_BUCKET_DESC",
|
||||
"ZERO_BUCKET_WIDTH_DESC",
|
||||
"CUSTOM_VALUES_DESC",
|
||||
"COUNTER_RESET_HINT_DESC",
|
||||
"histogramDescEnd",
|
||||
"operatorsStart",
|
||||
"ADD",
|
||||
|
@ -216,6 +224,12 @@ var yyToknames = [...]string{
|
|||
"START",
|
||||
"END",
|
||||
"preprocessorEnd",
|
||||
"counterResetHintsStart",
|
||||
"UNKNOWN_COUNTER_RESET",
|
||||
"COUNTER_RESET",
|
||||
"NOT_COUNTER_RESET",
|
||||
"GAUGE_TYPE",
|
||||
"counterResetHintsEnd",
|
||||
"startSymbolsStart",
|
||||
"START_METRIC",
|
||||
"START_SERIES_DESCRIPTION",
|
||||
|
@ -240,308 +254,313 @@ var yyExca = [...]int16{
|
|||
24, 137,
|
||||
-2, 0,
|
||||
-1, 61,
|
||||
2, 175,
|
||||
15, 175,
|
||||
78, 175,
|
||||
84, 175,
|
||||
-2, 101,
|
||||
-1, 62,
|
||||
2, 176,
|
||||
15, 176,
|
||||
78, 176,
|
||||
84, 176,
|
||||
-2, 102,
|
||||
-1, 63,
|
||||
2, 177,
|
||||
15, 177,
|
||||
78, 177,
|
||||
84, 177,
|
||||
-2, 104,
|
||||
-1, 64,
|
||||
2, 178,
|
||||
15, 178,
|
||||
78, 178,
|
||||
84, 178,
|
||||
-2, 105,
|
||||
-1, 65,
|
||||
2, 179,
|
||||
15, 179,
|
||||
78, 179,
|
||||
84, 179,
|
||||
-2, 106,
|
||||
-1, 66,
|
||||
2, 180,
|
||||
15, 180,
|
||||
78, 180,
|
||||
84, 180,
|
||||
-2, 111,
|
||||
-1, 67,
|
||||
79, 180,
|
||||
85, 180,
|
||||
-2, 101,
|
||||
-1, 62,
|
||||
2, 181,
|
||||
15, 181,
|
||||
78, 181,
|
||||
84, 181,
|
||||
-2, 113,
|
||||
-1, 68,
|
||||
79, 181,
|
||||
85, 181,
|
||||
-2, 102,
|
||||
-1, 63,
|
||||
2, 182,
|
||||
15, 182,
|
||||
78, 182,
|
||||
84, 182,
|
||||
-2, 115,
|
||||
-1, 69,
|
||||
79, 182,
|
||||
85, 182,
|
||||
-2, 104,
|
||||
-1, 64,
|
||||
2, 183,
|
||||
15, 183,
|
||||
78, 183,
|
||||
84, 183,
|
||||
-2, 116,
|
||||
-1, 70,
|
||||
79, 183,
|
||||
85, 183,
|
||||
-2, 105,
|
||||
-1, 65,
|
||||
2, 184,
|
||||
15, 184,
|
||||
78, 184,
|
||||
84, 184,
|
||||
-2, 117,
|
||||
-1, 71,
|
||||
79, 184,
|
||||
85, 184,
|
||||
-2, 106,
|
||||
-1, 66,
|
||||
2, 185,
|
||||
15, 185,
|
||||
78, 185,
|
||||
84, 185,
|
||||
-2, 118,
|
||||
-1, 72,
|
||||
79, 185,
|
||||
85, 185,
|
||||
-2, 111,
|
||||
-1, 67,
|
||||
2, 186,
|
||||
15, 186,
|
||||
78, 186,
|
||||
84, 186,
|
||||
-2, 119,
|
||||
-1, 73,
|
||||
79, 186,
|
||||
85, 186,
|
||||
-2, 113,
|
||||
-1, 68,
|
||||
2, 187,
|
||||
15, 187,
|
||||
78, 187,
|
||||
84, 187,
|
||||
-2, 123,
|
||||
-1, 74,
|
||||
79, 187,
|
||||
85, 187,
|
||||
-2, 115,
|
||||
-1, 69,
|
||||
2, 188,
|
||||
15, 188,
|
||||
78, 188,
|
||||
84, 188,
|
||||
79, 188,
|
||||
85, 188,
|
||||
-2, 116,
|
||||
-1, 70,
|
||||
2, 189,
|
||||
15, 189,
|
||||
79, 189,
|
||||
85, 189,
|
||||
-2, 117,
|
||||
-1, 71,
|
||||
2, 190,
|
||||
15, 190,
|
||||
79, 190,
|
||||
85, 190,
|
||||
-2, 118,
|
||||
-1, 72,
|
||||
2, 191,
|
||||
15, 191,
|
||||
79, 191,
|
||||
85, 191,
|
||||
-2, 119,
|
||||
-1, 73,
|
||||
2, 192,
|
||||
15, 192,
|
||||
79, 192,
|
||||
85, 192,
|
||||
-2, 123,
|
||||
-1, 74,
|
||||
2, 193,
|
||||
15, 193,
|
||||
79, 193,
|
||||
85, 193,
|
||||
-2, 124,
|
||||
-1, 200,
|
||||
9, 237,
|
||||
12, 237,
|
||||
13, 237,
|
||||
18, 237,
|
||||
19, 237,
|
||||
25, 237,
|
||||
40, 237,
|
||||
46, 237,
|
||||
47, 237,
|
||||
50, 237,
|
||||
56, 237,
|
||||
61, 237,
|
||||
62, 237,
|
||||
63, 237,
|
||||
64, 237,
|
||||
65, 237,
|
||||
66, 237,
|
||||
67, 237,
|
||||
68, 237,
|
||||
69, 237,
|
||||
70, 237,
|
||||
71, 237,
|
||||
72, 237,
|
||||
73, 237,
|
||||
74, 237,
|
||||
78, 237,
|
||||
82, 237,
|
||||
84, 237,
|
||||
87, 237,
|
||||
88, 237,
|
||||
9, 242,
|
||||
12, 242,
|
||||
13, 242,
|
||||
18, 242,
|
||||
19, 242,
|
||||
25, 242,
|
||||
41, 242,
|
||||
47, 242,
|
||||
48, 242,
|
||||
51, 242,
|
||||
57, 242,
|
||||
62, 242,
|
||||
63, 242,
|
||||
64, 242,
|
||||
65, 242,
|
||||
66, 242,
|
||||
67, 242,
|
||||
68, 242,
|
||||
69, 242,
|
||||
70, 242,
|
||||
71, 242,
|
||||
72, 242,
|
||||
73, 242,
|
||||
74, 242,
|
||||
75, 242,
|
||||
79, 242,
|
||||
83, 242,
|
||||
85, 242,
|
||||
88, 242,
|
||||
89, 242,
|
||||
-2, 0,
|
||||
-1, 201,
|
||||
9, 237,
|
||||
12, 237,
|
||||
13, 237,
|
||||
18, 237,
|
||||
19, 237,
|
||||
25, 237,
|
||||
40, 237,
|
||||
46, 237,
|
||||
47, 237,
|
||||
50, 237,
|
||||
56, 237,
|
||||
61, 237,
|
||||
62, 237,
|
||||
63, 237,
|
||||
64, 237,
|
||||
65, 237,
|
||||
66, 237,
|
||||
67, 237,
|
||||
68, 237,
|
||||
69, 237,
|
||||
70, 237,
|
||||
71, 237,
|
||||
72, 237,
|
||||
73, 237,
|
||||
74, 237,
|
||||
78, 237,
|
||||
82, 237,
|
||||
84, 237,
|
||||
87, 237,
|
||||
88, 237,
|
||||
9, 242,
|
||||
12, 242,
|
||||
13, 242,
|
||||
18, 242,
|
||||
19, 242,
|
||||
25, 242,
|
||||
41, 242,
|
||||
47, 242,
|
||||
48, 242,
|
||||
51, 242,
|
||||
57, 242,
|
||||
62, 242,
|
||||
63, 242,
|
||||
64, 242,
|
||||
65, 242,
|
||||
66, 242,
|
||||
67, 242,
|
||||
68, 242,
|
||||
69, 242,
|
||||
70, 242,
|
||||
71, 242,
|
||||
72, 242,
|
||||
73, 242,
|
||||
74, 242,
|
||||
75, 242,
|
||||
79, 242,
|
||||
83, 242,
|
||||
85, 242,
|
||||
88, 242,
|
||||
89, 242,
|
||||
-2, 0,
|
||||
}
|
||||
|
||||
const yyPrivate = 57344
|
||||
|
||||
const yyLast = 728
|
||||
const yyLast = 763
|
||||
|
||||
var yyAct = [...]int16{
|
||||
155, 331, 329, 275, 336, 152, 226, 39, 192, 44,
|
||||
289, 288, 156, 118, 82, 178, 55, 106, 6, 53,
|
||||
77, 109, 56, 133, 108, 22, 54, 110, 107, 172,
|
||||
300, 198, 57, 199, 200, 201, 60, 111, 326, 151,
|
||||
325, 302, 321, 308, 266, 154, 55, 75, 128, 105,
|
||||
291, 300, 160, 18, 19, 309, 54, 20, 307, 218,
|
||||
105, 320, 159, 76, 113, 306, 114, 330, 61, 62,
|
||||
63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
|
||||
73, 74, 112, 161, 180, 13, 87, 89, 265, 24,
|
||||
101, 30, 104, 150, 31, 32, 115, 98, 99, 162,
|
||||
109, 101, 102, 104, 88, 349, 110, 2, 3, 4,
|
||||
5, 264, 196, 149, 111, 163, 160, 103, 337, 173,
|
||||
167, 170, 84, 182, 348, 166, 159, 347, 103, 194,
|
||||
157, 158, 83, 181, 183, 165, 184, 197, 77, 186,
|
||||
185, 195, 202, 203, 204, 205, 206, 207, 208, 209,
|
||||
210, 211, 212, 213, 214, 215, 216, 129, 269, 263,
|
||||
217, 160, 219, 220, 55, 38, 35, 53, 77, 267,
|
||||
56, 159, 270, 22, 54, 121, 297, 188, 7, 259,
|
||||
57, 296, 262, 161, 319, 119, 318, 317, 271, 179,
|
||||
261, 180, 161, 260, 258, 75, 295, 84, 122, 162,
|
||||
187, 18, 19, 316, 268, 20, 315, 83, 162, 286,
|
||||
287, 76, 314, 290, 313, 81, 61, 62, 63, 64,
|
||||
65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
|
||||
182, 86, 292, 13, 55, 10, 312, 24, 311, 30,
|
||||
181, 183, 31, 32, 54, 79, 134, 135, 136, 137,
|
||||
138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
|
||||
148, 310, 127, 36, 126, 1, 121, 298, 299, 301,
|
||||
164, 303, 49, 48, 190, 294, 119, 55, 160, 304,
|
||||
305, 193, 55, 160, 117, 196, 223, 54, 159, 122,
|
||||
222, 228, 54, 159, 293, 350, 50, 47, 46, 169,
|
||||
132, 238, 78, 323, 324, 221, 45, 244, 43, 161,
|
||||
328, 322, 168, 333, 334, 335, 130, 332, 171, 177,
|
||||
339, 338, 341, 340, 176, 162, 125, 342, 343, 42,
|
||||
59, 124, 344, 9, 9, 240, 241, 175, 346, 242,
|
||||
131, 8, 41, 40, 123, 37, 51, 255, 351, 191,
|
||||
229, 231, 233, 234, 235, 243, 245, 248, 249, 250,
|
||||
251, 252, 256, 257, 345, 272, 230, 232, 236, 237,
|
||||
239, 246, 247, 85, 189, 55, 253, 254, 53, 77,
|
||||
224, 56, 80, 120, 22, 54, 153, 58, 227, 52,
|
||||
116, 57, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 228, 0, 0, 0, 0, 75, 0, 0, 0,
|
||||
0, 238, 18, 19, 0, 0, 20, 244, 0, 0,
|
||||
0, 225, 76, 0, 0, 0, 0, 61, 62, 63,
|
||||
155, 333, 331, 275, 338, 152, 226, 39, 192, 44,
|
||||
290, 289, 156, 118, 82, 178, 106, 55, 109, 105,
|
||||
53, 77, 133, 56, 110, 108, 22, 54, 356, 6,
|
||||
172, 107, 60, 57, 345, 346, 347, 348, 111, 198,
|
||||
328, 199, 200, 201, 327, 154, 303, 355, 266, 75,
|
||||
354, 151, 160, 128, 259, 18, 19, 160, 55, 20,
|
||||
301, 101, 159, 104, 113, 76, 114, 159, 54, 258,
|
||||
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
|
||||
71, 72, 73, 74, 161, 112, 269, 13, 103, 161,
|
||||
292, 24, 115, 30, 309, 265, 31, 32, 332, 267,
|
||||
162, 270, 109, 223, 323, 162, 150, 222, 110, 308,
|
||||
301, 263, 310, 149, 161, 163, 307, 271, 264, 173,
|
||||
167, 170, 221, 322, 166, 2, 3, 4, 5, 194,
|
||||
162, 157, 158, 179, 262, 180, 184, 197, 165, 186,
|
||||
196, 195, 202, 203, 204, 205, 206, 207, 208, 209,
|
||||
210, 211, 212, 213, 214, 215, 216, 129, 188, 121,
|
||||
217, 121, 219, 220, 55, 38, 218, 53, 77, 119,
|
||||
56, 119, 339, 22, 54, 182, 169, 260, 298, 117,
|
||||
57, 187, 122, 297, 122, 181, 183, 160, 295, 168,
|
||||
261, 180, 111, 77, 164, 55, 75, 159, 296, 357,
|
||||
7, 55, 18, 19, 268, 54, 20, 294, 35, 287,
|
||||
288, 54, 76, 291, 321, 320, 319, 61, 62, 63,
|
||||
64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
|
||||
74, 0, 0, 0, 13, 240, 241, 0, 24, 242,
|
||||
30, 0, 0, 31, 32, 0, 0, 255, 105, 0,
|
||||
229, 231, 233, 234, 235, 243, 245, 248, 249, 250,
|
||||
251, 252, 256, 257, 0, 0, 230, 232, 236, 237,
|
||||
239, 246, 247, 17, 77, 89, 253, 254, 0, 22,
|
||||
0, 0, 327, 0, 0, 98, 99, 0, 0, 101,
|
||||
0, 104, 88, 277, 278, 276, 283, 285, 282, 284,
|
||||
279, 280, 281, 17, 35, 0, 0, 18, 19, 22,
|
||||
0, 20, 0, 0, 0, 0, 103, 0, 0, 0,
|
||||
0, 0, 11, 12, 14, 15, 16, 21, 23, 25,
|
||||
26, 27, 28, 29, 33, 34, 0, 18, 19, 13,
|
||||
0, 20, 0, 24, 0, 30, 0, 0, 31, 32,
|
||||
0, 0, 11, 12, 14, 15, 16, 21, 23, 25,
|
||||
26, 27, 28, 29, 33, 34, 105, 0, 0, 13,
|
||||
0, 0, 0, 24, 174, 30, 0, 0, 31, 32,
|
||||
0, 0, 0, 0, 0, 105, 0, 0, 0, 0,
|
||||
0, 0, 87, 89, 90, 0, 91, 92, 93, 94,
|
||||
95, 96, 97, 98, 99, 100, 0, 101, 102, 104,
|
||||
88, 87, 89, 90, 0, 91, 92, 93, 94, 95,
|
||||
96, 97, 98, 99, 100, 274, 101, 102, 104, 88,
|
||||
105, 0, 273, 0, 103, 0, 277, 278, 276, 283,
|
||||
285, 282, 284, 279, 280, 281, 0, 0, 0, 105,
|
||||
0, 0, 0, 103, 0, 0, 87, 89, 90, 0,
|
||||
91, 92, 93, 0, 95, 96, 97, 98, 99, 100,
|
||||
74, 182, 293, 318, 13, 160, 317, 316, 24, 315,
|
||||
30, 181, 183, 31, 32, 159, 134, 135, 136, 137,
|
||||
138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
|
||||
148, 314, 313, 55, 105, 84, 84, 299, 300, 302,
|
||||
86, 304, 177, 54, 190, 83, 83, 176, 160, 305,
|
||||
306, 193, 125, 185, 81, 196, 10, 124, 159, 312,
|
||||
175, 311, 89, 50, 8, 36, 79, 228, 37, 78,
|
||||
123, 1, 98, 99, 325, 326, 101, 238, 104, 88,
|
||||
161, 330, 49, 244, 335, 336, 337, 324, 334, 48,
|
||||
47, 341, 340, 343, 342, 127, 162, 126, 59, 349,
|
||||
350, 9, 9, 103, 351, 46, 132, 45, 43, 130,
|
||||
353, 171, 240, 241, 42, 131, 242, 41, 40, 51,
|
||||
191, 352, 272, 85, 255, 358, 189, 229, 231, 233,
|
||||
234, 235, 243, 245, 248, 249, 250, 251, 252, 256,
|
||||
257, 224, 80, 230, 232, 236, 237, 239, 246, 247,
|
||||
344, 120, 55, 253, 254, 53, 77, 153, 56, 274,
|
||||
58, 22, 54, 227, 52, 116, 273, 0, 57, 0,
|
||||
277, 278, 276, 283, 285, 282, 284, 279, 280, 281,
|
||||
286, 0, 0, 0, 75, 0, 0, 0, 0, 0,
|
||||
18, 19, 0, 0, 20, 0, 0, 0, 0, 0,
|
||||
76, 0, 0, 0, 0, 61, 62, 63, 64, 65,
|
||||
66, 67, 68, 69, 70, 71, 72, 73, 74, 228,
|
||||
0, 0, 13, 0, 0, 0, 24, 0, 30, 238,
|
||||
329, 31, 32, 0, 0, 244, 0, 0, 0, 225,
|
||||
0, 277, 278, 276, 283, 285, 282, 284, 279, 280,
|
||||
281, 286, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 240, 241, 0, 0, 242, 0,
|
||||
0, 0, 17, 77, 0, 105, 255, 0, 22, 229,
|
||||
231, 233, 234, 235, 243, 245, 248, 249, 250, 251,
|
||||
252, 256, 257, 0, 0, 230, 232, 236, 237, 239,
|
||||
246, 247, 87, 89, 0, 253, 254, 18, 19, 0,
|
||||
0, 20, 0, 98, 99, 17, 35, 101, 102, 104,
|
||||
88, 22, 11, 12, 14, 15, 16, 21, 23, 25,
|
||||
26, 27, 28, 29, 33, 34, 0, 0, 0, 13,
|
||||
0, 0, 0, 24, 103, 30, 0, 0, 31, 32,
|
||||
18, 19, 0, 0, 20, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 11, 12, 14, 15, 16,
|
||||
21, 23, 25, 26, 27, 28, 29, 33, 34, 105,
|
||||
0, 0, 13, 0, 0, 0, 24, 174, 30, 0,
|
||||
0, 31, 32, 0, 0, 0, 0, 0, 105, 0,
|
||||
0, 0, 0, 0, 0, 0, 87, 89, 90, 0,
|
||||
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
|
||||
0, 101, 102, 104, 88, 87, 89, 90, 0, 91,
|
||||
92, 0, 0, 95, 96, 0, 98, 99, 100, 0,
|
||||
101, 102, 104, 88, 0, 0, 0, 0, 103, 0,
|
||||
92, 93, 94, 95, 96, 97, 98, 99, 100, 0,
|
||||
101, 102, 104, 88, 105, 0, 0, 0, 103, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 103,
|
||||
0, 0, 0, 105, 0, 0, 0, 103, 0, 0,
|
||||
0, 87, 89, 90, 0, 91, 92, 93, 0, 95,
|
||||
96, 97, 98, 99, 100, 0, 101, 102, 104, 88,
|
||||
87, 89, 90, 0, 91, 92, 0, 0, 95, 96,
|
||||
0, 98, 99, 100, 0, 101, 102, 104, 88, 0,
|
||||
0, 0, 0, 103, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 103,
|
||||
}
|
||||
|
||||
var yyPact = [...]int16{
|
||||
16, 168, 501, 501, 155, 471, -1000, -1000, -1000, 153,
|
||||
27, 190, 533, 533, 155, 490, -1000, -1000, -1000, 195,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, 195, -1000, 229, -1000, 581,
|
||||
-1000, -1000, -1000, -1000, -1000, 264, -1000, 268, -1000, 614,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, 22, 99, -1000, -1000, 366, -1000, 366, 125,
|
||||
-1000, -1000, 23, 177, -1000, -1000, 373, -1000, 373, 180,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, 264, -1000, -1000,
|
||||
324, -1000, -1000, 260, -1000, 24, -1000, -54, -54, -54,
|
||||
-54, -54, -54, -54, -54, -54, -54, -54, -54, -54,
|
||||
-54, -54, -54, 37, 43, 268, 99, -57, -1000, 297,
|
||||
297, 7, -1000, 562, 35, -1000, 317, -1000, -1000, 187,
|
||||
80, -1000, -1000, -1000, 120, -1000, 175, -1000, 269, 366,
|
||||
-1000, -50, -45, -1000, 366, 366, 366, 366, 366, 366,
|
||||
366, 366, 366, 366, 366, 366, 366, 366, 366, -1000,
|
||||
225, -1000, -1000, 44, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, 107, 107, 284, -1000, -1000, -1000, -1000, 399, -1000,
|
||||
-1000, 172, -1000, 581, -1000, -1000, 173, -1000, 157, -1000,
|
||||
-1000, -1000, -1000, -1000, 86, -1000, -1000, -1000, -1000, -1000,
|
||||
18, 143, 132, -1000, -1000, -1000, 618, 444, 297, 297,
|
||||
297, 297, 35, 35, 46, 46, 46, 645, 626, 46,
|
||||
46, 645, 35, 35, 46, 35, 444, -1000, 28, -1000,
|
||||
-1000, -1000, 273, -1000, 174, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, 159, -1000, -1000,
|
||||
280, -1000, -1000, 323, -1000, 29, -1000, -56, -56, -56,
|
||||
-56, -56, -56, -56, -56, -56, -56, -56, -56, -56,
|
||||
-56, -56, -56, 49, 43, 192, 177, -61, -1000, 174,
|
||||
174, 8, -1000, 595, 5, -1000, 270, -1000, -1000, 131,
|
||||
187, -1000, -1000, -1000, 263, -1000, 156, -1000, 269, 373,
|
||||
-1000, -43, -38, -1000, 373, 373, 373, 373, 373, 373,
|
||||
373, 373, 373, 373, 373, 373, 373, 373, 373, -1000,
|
||||
254, -1000, -1000, 151, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, 226, 226, 101, -1000, -1000, -1000, -1000, 447, -1000,
|
||||
-1000, 47, -1000, 614, -1000, -1000, 157, -1000, 109, -1000,
|
||||
-1000, -1000, -1000, -1000, 93, -1000, -1000, -1000, -1000, -1000,
|
||||
22, 73, 60, -1000, -1000, -1000, 372, 250, 174, 174,
|
||||
174, 174, 5, 5, 491, 491, 491, 679, 660, 491,
|
||||
491, 679, 5, 5, 491, 5, 250, -1000, 68, -1000,
|
||||
-1000, -1000, 186, -1000, 176, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 366,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, 32, 32, 15, 32,
|
||||
96, 96, 41, 38, -1000, -1000, 255, 232, 230, 208,
|
||||
206, 200, 197, 181, 180, 178, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, 40, -1000, -1000, -1000, 289, -1000, 581, -1000,
|
||||
-1000, -1000, 32, -1000, 14, 12, 475, -1000, -1000, -1000,
|
||||
11, 152, 107, 107, 107, 104, 104, 11, 104, 11,
|
||||
-1000, -1000, -1000, -1000, -1000, 32, 32, -1000, -1000, -1000,
|
||||
32, -1000, -1000, -1000, -1000, -1000, -1000, 107, -1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, 103, -1000, 274, -1000, -1000,
|
||||
-1000, -1000,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 373,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, 91, 91, 20, 91,
|
||||
124, 124, 92, 95, -1000, -1000, 285, 283, 256, 255,
|
||||
233, 231, 230, 227, 210, 209, 208, -1000, -1000, -1000,
|
||||
-1000, -1000, -1000, 102, -1000, -1000, -1000, 295, -1000, 614,
|
||||
-1000, -1000, -1000, 91, -1000, 18, 14, 443, -1000, -1000,
|
||||
-1000, 41, 48, 226, 226, 226, 158, 158, 41, 158,
|
||||
41, -58, -1000, -1000, -1000, -1000, -1000, 91, 91, -1000,
|
||||
-1000, -1000, 91, -1000, -1000, -1000, -1000, -1000, -1000, 226,
|
||||
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
|
||||
-1000, -1000, 26, -1000, 178, -1000, -1000, -1000, -1000,
|
||||
}
|
||||
|
||||
var yyPgo = [...]int16{
|
||||
0, 390, 13, 389, 6, 15, 388, 330, 387, 386,
|
||||
383, 235, 341, 382, 14, 380, 10, 11, 374, 373,
|
||||
8, 365, 3, 4, 364, 2, 1, 0, 349, 12,
|
||||
5, 346, 343, 17, 157, 342, 340, 7, 329, 318,
|
||||
28, 316, 36, 308, 9, 306, 300, 298, 297, 273,
|
||||
272, 296, 265, 263,
|
||||
0, 395, 13, 394, 6, 15, 393, 328, 390, 387,
|
||||
381, 380, 286, 294, 372, 14, 371, 10, 11, 356,
|
||||
353, 8, 352, 3, 4, 351, 2, 1, 0, 350,
|
||||
12, 5, 349, 348, 16, 157, 347, 345, 7, 344,
|
||||
341, 31, 339, 32, 338, 9, 337, 336, 335, 320,
|
||||
319, 312, 293, 301, 295,
|
||||
}
|
||||
|
||||
var yyR1 = [...]int8{
|
||||
0, 52, 52, 52, 52, 52, 52, 52, 37, 37,
|
||||
37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
|
||||
32, 32, 32, 32, 33, 33, 35, 35, 35, 35,
|
||||
35, 35, 35, 35, 35, 35, 35, 35, 35, 35,
|
||||
35, 35, 34, 36, 36, 46, 46, 41, 41, 41,
|
||||
41, 16, 16, 16, 16, 15, 15, 15, 4, 4,
|
||||
38, 40, 40, 39, 39, 39, 47, 45, 45, 45,
|
||||
31, 31, 31, 9, 9, 43, 49, 49, 49, 49,
|
||||
49, 49, 50, 51, 51, 51, 42, 42, 42, 1,
|
||||
1, 1, 2, 2, 2, 2, 2, 2, 2, 12,
|
||||
12, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
0, 53, 53, 53, 53, 53, 53, 53, 38, 38,
|
||||
38, 38, 38, 38, 38, 38, 38, 38, 38, 38,
|
||||
33, 33, 33, 33, 34, 34, 36, 36, 36, 36,
|
||||
36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
|
||||
36, 36, 35, 37, 37, 47, 47, 42, 42, 42,
|
||||
42, 17, 17, 17, 17, 16, 16, 16, 4, 4,
|
||||
39, 41, 41, 40, 40, 40, 48, 46, 46, 46,
|
||||
32, 32, 32, 9, 9, 44, 50, 50, 50, 50,
|
||||
50, 50, 51, 52, 52, 52, 43, 43, 43, 1,
|
||||
1, 1, 2, 2, 2, 2, 2, 2, 2, 13,
|
||||
13, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
7, 7, 7, 7, 7, 11, 11, 11, 11, 13,
|
||||
13, 13, 14, 14, 14, 14, 53, 19, 19, 19,
|
||||
19, 18, 18, 18, 18, 18, 18, 18, 18, 18,
|
||||
28, 28, 28, 20, 20, 20, 20, 21, 21, 21,
|
||||
22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
|
||||
23, 23, 24, 24, 24, 3, 3, 3, 3, 3,
|
||||
3, 3, 3, 3, 3, 3, 3, 3, 3, 6,
|
||||
7, 7, 7, 7, 7, 12, 12, 12, 12, 14,
|
||||
14, 14, 15, 15, 15, 15, 54, 20, 20, 20,
|
||||
20, 19, 19, 19, 19, 19, 19, 19, 19, 19,
|
||||
29, 29, 29, 21, 21, 21, 21, 22, 22, 22,
|
||||
23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
|
||||
23, 24, 24, 25, 25, 25, 11, 11, 11, 11,
|
||||
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
3, 3, 3, 3, 6, 6, 6, 6, 6, 6,
|
||||
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
|
||||
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
|
||||
6, 6, 6, 6, 6, 6, 6, 6, 8, 8,
|
||||
5, 5, 5, 5, 44, 44, 27, 27, 29, 29,
|
||||
30, 30, 26, 25, 25, 48, 10, 17, 17,
|
||||
6, 6, 6, 8, 8, 5, 5, 5, 5, 45,
|
||||
45, 28, 28, 30, 30, 31, 31, 27, 26, 26,
|
||||
49, 10, 18, 18,
|
||||
}
|
||||
|
||||
var yyR2 = [...]int8{
|
||||
|
@ -562,52 +581,53 @@ var yyR2 = [...]int8{
|
|||
1, 1, 3, 1, 3, 4, 1, 3, 5, 5,
|
||||
1, 1, 1, 4, 3, 3, 2, 3, 1, 2,
|
||||
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
4, 3, 3, 1, 2, 1, 1, 1, 1, 1,
|
||||
3, 4, 3, 3, 1, 2, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
|
||||
1, 1, 1, 2, 1, 1, 1, 0, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 2, 2, 1, 1, 1, 2, 1,
|
||||
1, 1, 0, 1,
|
||||
}
|
||||
|
||||
var yyChk = [...]int16{
|
||||
-1000, -52, 91, 92, 93, 94, 2, 10, -12, -7,
|
||||
-11, 61, 62, 78, 63, 64, 65, 12, 46, 47,
|
||||
50, 66, 18, 67, 82, 68, 69, 70, 71, 72,
|
||||
84, 87, 88, 73, 74, 13, -53, -12, 10, -37,
|
||||
-32, -35, -38, -43, -44, -45, -47, -48, -49, -50,
|
||||
-51, -31, -3, 12, 19, 9, 15, 25, -8, -7,
|
||||
-42, 61, 62, 63, 64, 65, 66, 67, 68, 69,
|
||||
70, 71, 72, 73, 74, 40, 56, 13, -51, -11,
|
||||
-13, 20, -14, 12, 2, -19, 2, 40, 58, 41,
|
||||
42, 44, 45, 46, 47, 48, 49, 50, 51, 52,
|
||||
53, 55, 56, 82, 57, 14, -33, -40, 2, 78,
|
||||
84, 15, -40, -37, -37, -42, -1, 20, -2, 12,
|
||||
-10, 2, 25, 20, 7, 2, 4, 2, 24, -34,
|
||||
-41, -36, -46, 77, -34, -34, -34, -34, -34, -34,
|
||||
-34, -34, -34, -34, -34, -34, -34, -34, -34, -44,
|
||||
56, 2, -30, -9, 2, -27, -29, 87, 88, 19,
|
||||
9, 40, 56, -44, 2, -40, -33, -16, 15, 2,
|
||||
-16, -39, 22, -37, 22, 20, 7, 2, -5, 2,
|
||||
4, 53, 43, 54, -5, 20, -14, 25, 2, -18,
|
||||
5, -28, -20, 12, -27, -29, 16, -37, 81, 83,
|
||||
79, 80, -37, -37, -37, -37, -37, -37, -37, -37,
|
||||
-37, -37, -37, -37, -37, -37, -37, -44, 15, -27,
|
||||
-27, 21, 6, 2, -15, 22, -4, -6, 2, 61,
|
||||
77, 62, 78, 63, 64, 65, 79, 80, 12, 81,
|
||||
46, 47, 50, 66, 18, 67, 82, 83, 68, 69,
|
||||
70, 71, 72, 87, 88, 58, 73, 74, 22, 7,
|
||||
20, -2, 25, 2, 25, 2, 26, 26, -29, 26,
|
||||
40, 56, -21, 24, 17, -22, 30, 28, 29, 35,
|
||||
36, 37, 33, 31, 34, 32, -16, -16, -17, -16,
|
||||
-17, 22, -44, 21, 2, 22, 7, 2, -37, -26,
|
||||
19, -26, 26, -26, -20, -20, 24, 17, 2, 17,
|
||||
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
|
||||
21, 2, 22, -4, -26, 26, 26, 17, -22, -25,
|
||||
56, -26, -30, -27, -27, -27, -23, 14, -23, -25,
|
||||
-23, -25, -26, -26, -26, -24, -27, 24, 21, 2,
|
||||
21, -27,
|
||||
-1000, -53, 98, 99, 100, 101, 2, 10, -13, -7,
|
||||
-12, 62, 63, 79, 64, 65, 66, 12, 47, 48,
|
||||
51, 67, 18, 68, 83, 69, 70, 71, 72, 73,
|
||||
85, 88, 89, 74, 75, 13, -54, -13, 10, -38,
|
||||
-33, -36, -39, -44, -45, -46, -48, -49, -50, -51,
|
||||
-52, -32, -3, 12, 19, 9, 15, 25, -8, -7,
|
||||
-43, 62, 63, 64, 65, 66, 67, 68, 69, 70,
|
||||
71, 72, 73, 74, 75, 41, 57, 13, -52, -12,
|
||||
-14, 20, -15, 12, 2, -20, 2, 41, 59, 42,
|
||||
43, 45, 46, 47, 48, 49, 50, 51, 52, 53,
|
||||
54, 56, 57, 83, 58, 14, -34, -41, 2, 79,
|
||||
85, 15, -41, -38, -38, -43, -1, 20, -2, 12,
|
||||
-10, 2, 25, 20, 7, 2, 4, 2, 24, -35,
|
||||
-42, -37, -47, 78, -35, -35, -35, -35, -35, -35,
|
||||
-35, -35, -35, -35, -35, -35, -35, -35, -35, -45,
|
||||
57, 2, -31, -9, 2, -28, -30, 88, 89, 19,
|
||||
9, 41, 57, -45, 2, -41, -34, -17, 15, 2,
|
||||
-17, -40, 22, -38, 22, 20, 7, 2, -5, 2,
|
||||
4, 54, 44, 55, -5, 20, -15, 25, 2, -19,
|
||||
5, -29, -21, 12, -28, -30, 16, -38, 82, 84,
|
||||
80, 81, -38, -38, -38, -38, -38, -38, -38, -38,
|
||||
-38, -38, -38, -38, -38, -38, -38, -45, 15, -28,
|
||||
-28, 21, 6, 2, -16, 22, -4, -6, 2, 62,
|
||||
78, 63, 79, 64, 65, 66, 80, 81, 12, 82,
|
||||
47, 48, 51, 67, 18, 68, 83, 84, 69, 70,
|
||||
71, 72, 73, 88, 89, 59, 74, 75, 22, 7,
|
||||
20, -2, 25, 2, 25, 2, 26, 26, -30, 26,
|
||||
41, 57, -22, 24, 17, -23, 30, 28, 29, 35,
|
||||
36, 37, 33, 31, 34, 32, 38, -17, -17, -18,
|
||||
-17, -18, 22, -45, 21, 2, 22, 7, 2, -38,
|
||||
-27, 19, -27, 26, -27, -21, -21, 24, 17, 2,
|
||||
17, 6, 6, 6, 6, 6, 6, 6, 6, 6,
|
||||
6, 6, 21, 2, 22, -4, -27, 26, 26, 17,
|
||||
-23, -26, 57, -27, -31, -28, -28, -28, -24, 14,
|
||||
-24, -26, -24, -26, -11, 92, 93, 94, 95, -27,
|
||||
-27, -27, -25, -28, 24, 21, 2, 21, -28,
|
||||
}
|
||||
|
||||
var yyDef = [...]int16{
|
||||
|
@ -616,37 +636,37 @@ var yyDef = [...]int16{
|
|||
110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
|
||||
120, 121, 122, 123, 124, 0, 2, -2, 3, 4,
|
||||
8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
|
||||
18, 19, 0, 107, 224, 225, 0, 235, 0, 84,
|
||||
18, 19, 0, 107, 229, 230, 0, 240, 0, 84,
|
||||
85, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
||||
-2, -2, -2, -2, -2, 218, 219, 0, 5, 99,
|
||||
-2, -2, -2, -2, -2, 223, 224, 0, 5, 99,
|
||||
0, 127, 130, 0, 135, 136, 140, 43, 43, 43,
|
||||
43, 43, 43, 43, 43, 43, 43, 43, 43, 43,
|
||||
43, 43, 43, 0, 0, 0, 0, 22, 23, 0,
|
||||
0, 0, 60, 0, 82, 83, 0, 88, 90, 0,
|
||||
94, 98, 236, 125, 0, 131, 0, 134, 139, 0,
|
||||
94, 98, 241, 125, 0, 131, 0, 134, 139, 0,
|
||||
42, 47, 48, 44, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 67,
|
||||
0, 69, 70, 0, 72, 230, 231, 73, 74, 226,
|
||||
227, 0, 0, 0, 81, 20, 21, 24, 0, 54,
|
||||
0, 69, 70, 0, 72, 235, 236, 73, 74, 231,
|
||||
232, 0, 0, 0, 81, 20, 21, 24, 0, 54,
|
||||
25, 0, 62, 64, 66, 86, 0, 91, 0, 97,
|
||||
220, 221, 222, 223, 0, 126, 129, 132, 133, 138,
|
||||
225, 226, 227, 228, 0, 126, 129, 132, 133, 138,
|
||||
141, 143, 146, 150, 151, 152, 0, 26, 0, 0,
|
||||
-2, -2, 27, 28, 29, 30, 31, 32, 33, 34,
|
||||
35, 36, 37, 38, 39, 40, 41, 68, 0, 228,
|
||||
229, 75, 0, 80, 0, 53, 56, 58, 59, 189,
|
||||
190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
|
||||
200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
|
||||
210, 211, 212, 213, 214, 215, 216, 217, 61, 65,
|
||||
35, 36, 37, 38, 39, 40, 41, 68, 0, 233,
|
||||
234, 75, 0, 80, 0, 53, 56, 58, 59, 194,
|
||||
195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
|
||||
205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
|
||||
215, 216, 217, 218, 219, 220, 221, 222, 61, 65,
|
||||
87, 89, 92, 96, 93, 95, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 156, 158, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 45, 46, 49, 238,
|
||||
50, 71, 0, 77, 79, 51, 0, 57, 63, 142,
|
||||
232, 144, 0, 147, 0, 0, 0, 154, 159, 155,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
76, 78, 52, 55, 145, 0, 0, 153, 157, 160,
|
||||
0, 234, 161, 162, 163, 164, 165, 0, 166, 167,
|
||||
168, 169, 148, 149, 233, 0, 173, 0, 171, 174,
|
||||
170, 172,
|
||||
0, 0, 0, 0, 0, 0, 0, 45, 46, 49,
|
||||
243, 50, 71, 0, 77, 79, 51, 0, 57, 63,
|
||||
142, 237, 144, 0, 147, 0, 0, 0, 154, 159,
|
||||
155, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 76, 78, 52, 55, 145, 0, 0, 153,
|
||||
157, 160, 0, 239, 161, 162, 163, 164, 165, 0,
|
||||
166, 167, 168, 169, 170, 176, 177, 178, 179, 148,
|
||||
149, 238, 0, 174, 0, 172, 175, 171, 173,
|
||||
}
|
||||
|
||||
var yyTok1 = [...]int8{
|
||||
|
@ -663,7 +683,8 @@ var yyTok2 = [...]int8{
|
|||
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
|
||||
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
|
||||
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
|
||||
92, 93, 94, 95,
|
||||
92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
|
||||
102,
|
||||
}
|
||||
|
||||
var yyTok3 = [...]int8{
|
||||
|
@ -1800,26 +1821,32 @@ yydefault:
|
|||
yyVAL.descriptors["n_offset"] = yyDollar[3].int
|
||||
}
|
||||
case 170:
|
||||
yyDollar = yyS[yypt-4 : yypt+1]
|
||||
yyDollar = yyS[yypt-3 : yypt+1]
|
||||
{
|
||||
yyVAL.bucket_set = yyDollar[2].bucket_set
|
||||
yyVAL.descriptors = yylex.(*parser).newMap()
|
||||
yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item
|
||||
}
|
||||
case 171:
|
||||
yyDollar = yyS[yypt-3 : yypt+1]
|
||||
yyDollar = yyS[yypt-4 : yypt+1]
|
||||
{
|
||||
yyVAL.bucket_set = yyDollar[2].bucket_set
|
||||
}
|
||||
case 172:
|
||||
yyDollar = yyS[yypt-3 : yypt+1]
|
||||
{
|
||||
yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float)
|
||||
yyVAL.bucket_set = yyDollar[2].bucket_set
|
||||
}
|
||||
case 173:
|
||||
yyDollar = yyS[yypt-3 : yypt+1]
|
||||
{
|
||||
yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float)
|
||||
}
|
||||
case 174:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
yyVAL.bucket_set = []float64{yyDollar[1].float}
|
||||
}
|
||||
case 224:
|
||||
case 229:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
yyVAL.node = &NumberLiteral{
|
||||
|
@ -1827,7 +1854,7 @@ yydefault:
|
|||
PosRange: yyDollar[1].item.PositionRange(),
|
||||
}
|
||||
}
|
||||
case 225:
|
||||
case 230:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
var err error
|
||||
|
@ -1841,12 +1868,12 @@ yydefault:
|
|||
PosRange: yyDollar[1].item.PositionRange(),
|
||||
}
|
||||
}
|
||||
case 226:
|
||||
case 231:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val)
|
||||
}
|
||||
case 227:
|
||||
case 232:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
var err error
|
||||
|
@ -1857,17 +1884,17 @@ yydefault:
|
|||
}
|
||||
yyVAL.float = dur.Seconds()
|
||||
}
|
||||
case 228:
|
||||
case 233:
|
||||
yyDollar = yyS[yypt-2 : yypt+1]
|
||||
{
|
||||
yyVAL.float = yyDollar[2].float
|
||||
}
|
||||
case 229:
|
||||
case 234:
|
||||
yyDollar = yyS[yypt-2 : yypt+1]
|
||||
{
|
||||
yyVAL.float = -yyDollar[2].float
|
||||
}
|
||||
case 232:
|
||||
case 237:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
var err error
|
||||
|
@ -1876,17 +1903,17 @@ yydefault:
|
|||
yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err)
|
||||
}
|
||||
}
|
||||
case 233:
|
||||
case 238:
|
||||
yyDollar = yyS[yypt-2 : yypt+1]
|
||||
{
|
||||
yyVAL.int = -int64(yyDollar[2].uint)
|
||||
}
|
||||
case 234:
|
||||
case 239:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
yyVAL.int = int64(yyDollar[1].uint)
|
||||
}
|
||||
case 235:
|
||||
case 240:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
yyVAL.node = &StringLiteral{
|
||||
|
@ -1894,7 +1921,7 @@ yydefault:
|
|||
PosRange: yyDollar[1].item.PositionRange(),
|
||||
}
|
||||
}
|
||||
case 236:
|
||||
case 241:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
yyVAL.item = Item{
|
||||
|
@ -1903,7 +1930,7 @@ yydefault:
|
|||
Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val),
|
||||
}
|
||||
}
|
||||
case 237:
|
||||
case 242:
|
||||
yyDollar = yyS[yypt-0 : yypt+1]
|
||||
{
|
||||
yyVAL.strings = nil
|
||||
|
|
|
@ -147,6 +147,14 @@ var histogramDesc = map[string]ItemType{
|
|||
"z_bucket": ZERO_BUCKET_DESC,
|
||||
"z_bucket_w": ZERO_BUCKET_WIDTH_DESC,
|
||||
"custom_values": CUSTOM_VALUES_DESC,
|
||||
"counter_reset_hint": COUNTER_RESET_HINT_DESC,
|
||||
}
|
||||
|
||||
var counterResetHints = map[string]ItemType{
|
||||
"unknown": UNKNOWN_COUNTER_RESET,
|
||||
"reset": COUNTER_RESET,
|
||||
"not_reset": NOT_COUNTER_RESET,
|
||||
"gauge": GAUGE_TYPE,
|
||||
}
|
||||
|
||||
// ItemTypeStr is the default string representations for common Items. It does not
|
||||
|
@ -585,6 +593,11 @@ Loop:
|
|||
return lexHistogram
|
||||
}
|
||||
}
|
||||
if desc, ok := counterResetHints[strings.ToLower(word)]; ok {
|
||||
l.emit(desc)
|
||||
return lexHistogram
|
||||
}
|
||||
|
||||
l.errorf("bad histogram descriptor found: %q", word)
|
||||
break Loop
|
||||
}
|
||||
|
|
|
@ -580,6 +580,28 @@ func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram.
|
|||
}
|
||||
}
|
||||
|
||||
val, ok = (*desc)["counter_reset_hint"]
|
||||
if ok {
|
||||
resetHint, ok := val.(Item)
|
||||
|
||||
if ok {
|
||||
switch resetHint.Typ {
|
||||
case UNKNOWN_COUNTER_RESET:
|
||||
output.CounterResetHint = histogram.UnknownCounterReset
|
||||
case COUNTER_RESET:
|
||||
output.CounterResetHint = histogram.CounterReset
|
||||
case NOT_COUNTER_RESET:
|
||||
output.CounterResetHint = histogram.NotCounterReset
|
||||
case GAUGE_TYPE:
|
||||
output.CounterResetHint = histogram.GaugeType
|
||||
default:
|
||||
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing counter_reset_hint: unknown value %v", resetHint.Typ)
|
||||
}
|
||||
} else {
|
||||
p.addParseErrf(p.yyParser.lval.item.PositionRange(), "error parsing counter_reset_hint: %v", val)
|
||||
}
|
||||
}
|
||||
|
||||
buckets, spans := p.buildHistogramBucketsAndSpans(desc, "buckets", "offset")
|
||||
output.PositiveBuckets = buckets
|
||||
output.PositiveSpans = spans
|
||||
|
|
|
@ -4038,7 +4038,7 @@ func TestParseHistogramSeries(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "all properties used",
|
||||
input: `{} {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}`,
|
||||
input: `{} {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5 counter_reset_hint:gauge}}`,
|
||||
expected: []histogram.FloatHistogram{{
|
||||
Schema: 1,
|
||||
Sum: -0.3,
|
||||
|
@ -4049,11 +4049,12 @@ func TestParseHistogramSeries(t *testing.T) {
|
|||
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
|
||||
NegativeBuckets: []float64{4.1, 5},
|
||||
NegativeSpans: []histogram.Span{{Offset: -5, Length: 2}},
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "all properties used - with spaces",
|
||||
input: `{} {{schema:1 sum:0.3 count:3 z_bucket:7 z_bucket_w:5 buckets:[5 10 7 ] offset:-3 n_buckets:[4 5] n_offset:5 }}`,
|
||||
input: `{} {{schema:1 sum:0.3 count:3 z_bucket:7 z_bucket_w:5 buckets:[5 10 7 ] offset:-3 n_buckets:[4 5] n_offset:5 counter_reset_hint:gauge }}`,
|
||||
expected: []histogram.FloatHistogram{{
|
||||
Schema: 1,
|
||||
Sum: 0.3,
|
||||
|
@ -4064,6 +4065,7 @@ func TestParseHistogramSeries(t *testing.T) {
|
|||
PositiveSpans: []histogram.Span{{Offset: -3, Length: 3}},
|
||||
NegativeBuckets: []float64{4, 5},
|
||||
NegativeSpans: []histogram.Span{{Offset: 5, Length: 2}},
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
}},
|
||||
},
|
||||
{
|
||||
|
@ -4250,6 +4252,39 @@ func TestParseHistogramSeries(t *testing.T) {
|
|||
input: `{} {{ schema:1}}`,
|
||||
expectedError: `1:7: parse error: unexpected "<Item 57372>" "schema" in series values`,
|
||||
},
|
||||
{
|
||||
name: "invalid counter reset hint value",
|
||||
input: `{} {{counter_reset_hint:foo}}`,
|
||||
expectedError: `1:25: parse error: bad histogram descriptor found: "foo"`,
|
||||
},
|
||||
{
|
||||
name: "'unknown' counter reset hint value",
|
||||
input: `{} {{counter_reset_hint:unknown}}`,
|
||||
expected: []histogram.FloatHistogram{{
|
||||
CounterResetHint: histogram.UnknownCounterReset,
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "'reset' counter reset hint value",
|
||||
input: `{} {{counter_reset_hint:reset}}`,
|
||||
expected: []histogram.FloatHistogram{{
|
||||
CounterResetHint: histogram.CounterReset,
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "'not_reset' counter reset hint value",
|
||||
input: `{} {{counter_reset_hint:not_reset}}`,
|
||||
expected: []histogram.FloatHistogram{{
|
||||
CounterResetHint: histogram.NotCounterReset,
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "'gauge' counter reset hint value",
|
||||
input: `{} {{counter_reset_hint:gauge}}`,
|
||||
expected: []histogram.FloatHistogram{{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
}},
|
||||
},
|
||||
} {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_, vals, err := ParseSeriesDesc(test.input)
|
||||
|
|
|
@ -748,3 +748,17 @@ eval instant at 5m histogram_quantile(0.5, custom_buckets_histogram)
|
|||
|
||||
eval instant at 5m sum(custom_buckets_histogram)
|
||||
{} {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}}
|
||||
|
||||
clear
|
||||
|
||||
# Test 'this native histogram metric is not a gauge' warning for rate
|
||||
load 30s
|
||||
some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}}
|
||||
|
||||
# Test the case where we only have two points for rate
|
||||
eval_warn instant at 30s rate(some_metric[30s])
|
||||
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
|
||||
|
||||
# Test the case where we have more than two points for rate
|
||||
eval_warn instant at 1m rate(some_metric[1m])
|
||||
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
|
||||
|
|
|
@ -621,14 +621,12 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
|
|||
}
|
||||
}
|
||||
|
||||
// If the rule has no dependencies, it can run concurrently because no other rules in this group depend on its output.
|
||||
// Try run concurrently if there are slots available.
|
||||
if ctrl := g.concurrencyController; isRuleEligibleForConcurrentExecution(rule) && ctrl.Allow() {
|
||||
if ctrl := g.concurrencyController; ctrl.Allow(ctx, g, rule) {
|
||||
wg.Add(1)
|
||||
|
||||
go eval(i, rule, func() {
|
||||
wg.Done()
|
||||
ctrl.Done()
|
||||
ctrl.Done(ctx)
|
||||
})
|
||||
} else {
|
||||
eval(i, rule, nil)
|
||||
|
@ -1094,7 +1092,3 @@ func buildDependencyMap(rules []Rule) dependencyMap {
|
|||
|
||||
return dependencies
|
||||
}
|
||||
|
||||
func isRuleEligibleForConcurrentExecution(rule Rule) bool {
|
||||
return rule.NoDependentRules() && rule.NoDependencyRules()
|
||||
}
|
||||
|
|
|
@ -457,67 +457,47 @@ func (c ruleDependencyController) AnalyseRules(rules []Rule) {
|
|||
// Its purpose is to bound the amount of concurrency in rule evaluations to avoid overwhelming the Prometheus
|
||||
// server with additional query load. Concurrency is controlled globally, not on a per-group basis.
|
||||
type RuleConcurrencyController interface {
|
||||
// Allow determines whether any concurrent evaluation slots are available.
|
||||
// If Allow() returns true, then Done() must be called to release the acquired slot.
|
||||
Allow() bool
|
||||
// Allow determines if the given rule is allowed to be evaluated concurrently.
|
||||
// If Allow() returns true, then Done() must be called to release the acquired slot and corresponding cleanup is done.
|
||||
// It is important that both *Group and Rule are not retained and only be used for the duration of the call.
|
||||
Allow(ctx context.Context, group *Group, rule Rule) bool
|
||||
|
||||
// Done releases a concurrent evaluation slot.
|
||||
Done()
|
||||
Done(ctx context.Context)
|
||||
}
|
||||
|
||||
// concurrentRuleEvalController holds a weighted semaphore which controls the concurrent evaluation of rules.
|
||||
type concurrentRuleEvalController struct {
|
||||
sema *semaphore.Weighted
|
||||
depMapsMu sync.Mutex
|
||||
depMaps map[*Group]dependencyMap
|
||||
}
|
||||
|
||||
func newRuleConcurrencyController(maxConcurrency int64) RuleConcurrencyController {
|
||||
return &concurrentRuleEvalController{
|
||||
sema: semaphore.NewWeighted(maxConcurrency),
|
||||
depMaps: map[*Group]dependencyMap{},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *concurrentRuleEvalController) RuleEligible(g *Group, r Rule) bool {
|
||||
c.depMapsMu.Lock()
|
||||
defer c.depMapsMu.Unlock()
|
||||
|
||||
depMap, found := c.depMaps[g]
|
||||
if !found {
|
||||
depMap = buildDependencyMap(g.rules)
|
||||
c.depMaps[g] = depMap
|
||||
}
|
||||
|
||||
return depMap.isIndependent(r)
|
||||
}
|
||||
|
||||
func (c *concurrentRuleEvalController) Allow() bool {
|
||||
func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, rule Rule) bool {
|
||||
// To allow a rule to be executed concurrently, we need 3 conditions:
|
||||
// 1. The rule must not have any rules that depend on it.
|
||||
// 2. The rule itself must not depend on any other rules.
|
||||
// 3. If 1 & 2 are true, then and only then we should try to acquire the concurrency slot.
|
||||
if rule.NoDependentRules() && rule.NoDependencyRules() {
|
||||
return c.sema.TryAcquire(1)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *concurrentRuleEvalController) Done() {
|
||||
func (c *concurrentRuleEvalController) Done(_ context.Context) {
|
||||
c.sema.Release(1)
|
||||
}
|
||||
|
||||
func (c *concurrentRuleEvalController) Invalidate() {
|
||||
c.depMapsMu.Lock()
|
||||
defer c.depMapsMu.Unlock()
|
||||
|
||||
// Clear out the memoized dependency maps because some or all groups may have been updated.
|
||||
c.depMaps = map[*Group]dependencyMap{}
|
||||
}
|
||||
|
||||
// sequentialRuleEvalController is a RuleConcurrencyController that runs every rule sequentially.
|
||||
type sequentialRuleEvalController struct{}
|
||||
|
||||
func (c sequentialRuleEvalController) RuleEligible(_ *Group, _ Rule) bool {
|
||||
func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c sequentialRuleEvalController) Allow() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c sequentialRuleEvalController) Done() {}
|
||||
func (c sequentialRuleEvalController) Invalidate() {}
|
||||
func (c sequentialRuleEvalController) Done(_ context.Context) {}
|
||||
|
|
|
@ -32,7 +32,6 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
"go.uber.org/goleak"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
@ -50,7 +49,7 @@ import (
|
|||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
prom_testutil.TolerantVerifyLeak(m)
|
||||
}
|
||||
|
||||
func TestAlertingRule(t *testing.T) {
|
||||
|
|
|
@ -37,6 +37,7 @@ import (
|
|||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/prometheus/prometheus/storage/remote/azuread"
|
||||
"github.com/prometheus/prometheus/storage/remote/googleiam"
|
||||
)
|
||||
|
||||
const maxErrMsgLen = 1024
|
||||
|
@ -131,6 +132,7 @@ type ClientConfig struct {
|
|||
HTTPClientConfig config_util.HTTPClientConfig
|
||||
SigV4Config *sigv4.SigV4Config
|
||||
AzureADConfig *azuread.AzureADConfig
|
||||
GoogleIAMConfig *googleiam.Config
|
||||
Headers map[string]string
|
||||
RetryOnRateLimit bool
|
||||
WriteProtoMsg config.RemoteWriteProtoMsg
|
||||
|
@ -192,6 +194,13 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
|
|||
}
|
||||
}
|
||||
|
||||
if conf.GoogleIAMConfig != nil {
|
||||
t, err = googleiam.NewRoundTripper(conf.GoogleIAMConfig, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
writeProtoMsg := config.RemoteWriteProtoMsgV1
|
||||
if conf.WriteProtoMsg != "" {
|
||||
writeProtoMsg = conf.WriteProtoMsg
|
||||
|
|
54
storage/remote/googleiam/googleiam.go
Normal file
54
storage/remote/googleiam/googleiam.go
Normal file
|
@ -0,0 +1,54 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package googleiam provides an http.RoundTripper that attaches an Google Cloud accessToken
|
||||
// to remote write requests.
|
||||
package googleiam
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/option"
|
||||
apihttp "google.golang.org/api/transport/http"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
CredentialsFile string `yaml:"credentials_file,omitempty"`
|
||||
}
|
||||
|
||||
// NewRoundTripper creates a round tripper that adds Google Cloud Monitoring authorization to calls
|
||||
// using either a credentials file or the default credentials.
|
||||
func NewRoundTripper(cfg *Config, next http.RoundTripper) (http.RoundTripper, error) {
|
||||
if next == nil {
|
||||
next = http.DefaultTransport
|
||||
}
|
||||
const scopes = "https://www.googleapis.com/auth/monitoring.write"
|
||||
ctx := context.Background()
|
||||
opts := []option.ClientOption{
|
||||
option.WithScopes(scopes),
|
||||
}
|
||||
if cfg.CredentialsFile != "" {
|
||||
opts = append(opts, option.WithCredentialsFile(cfg.CredentialsFile))
|
||||
} else {
|
||||
creds, err := google.FindDefaultCredentials(ctx, scopes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error finding default Google credentials: %w", err)
|
||||
}
|
||||
opts = append(opts, option.WithCredentials(creds))
|
||||
}
|
||||
|
||||
return apihttp.NewTransport(ctx, next, opts...)
|
||||
}
|
|
@ -65,14 +65,14 @@ type bucketBoundsData struct {
|
|||
bound float64
|
||||
}
|
||||
|
||||
// byBucketBoundsData enables the usage of sort.Sort() with a slice of bucket bounds
|
||||
// byBucketBoundsData enables the usage of sort.Sort() with a slice of bucket bounds.
|
||||
type byBucketBoundsData []bucketBoundsData
|
||||
|
||||
func (m byBucketBoundsData) Len() int { return len(m) }
|
||||
func (m byBucketBoundsData) Less(i, j int) bool { return m[i].bound < m[j].bound }
|
||||
func (m byBucketBoundsData) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
|
||||
|
||||
// ByLabelName enables the usage of sort.Sort() with a slice of labels
|
||||
// ByLabelName enables the usage of sort.Sort() with a slice of labels.
|
||||
type ByLabelName []prompb.Label
|
||||
|
||||
func (a ByLabelName) Len() int { return len(a) }
|
||||
|
@ -115,14 +115,23 @@ var seps = []byte{'\xff'}
|
|||
// createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values.
|
||||
// Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and
|
||||
// if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized.
|
||||
func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string,
|
||||
// If settings.PromoteResourceAttributes is not empty, it's a set of resource attributes that should be promoted to labels.
|
||||
func createAttributes(resource pcommon.Resource, attributes pcommon.Map, settings Settings,
|
||||
ignoreAttrs []string, logOnOverwrite bool, extras ...string) []prompb.Label {
|
||||
resourceAttrs := resource.Attributes()
|
||||
serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName)
|
||||
instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID)
|
||||
|
||||
promotedAttrs := make([]prompb.Label, 0, len(settings.PromoteResourceAttributes))
|
||||
for _, name := range settings.PromoteResourceAttributes {
|
||||
if value, exists := resourceAttrs.Get(name); exists {
|
||||
promotedAttrs = append(promotedAttrs, prompb.Label{Name: name, Value: value.AsString()})
|
||||
}
|
||||
}
|
||||
sort.Stable(ByLabelName(promotedAttrs))
|
||||
|
||||
// Calculate the maximum possible number of labels we could return so we can preallocate l
|
||||
maxLabelCount := attributes.Len() + len(externalLabels) + len(extras)/2
|
||||
maxLabelCount := attributes.Len() + len(settings.ExternalLabels) + len(promotedAttrs) + len(extras)/2
|
||||
|
||||
if haveServiceName {
|
||||
maxLabelCount++
|
||||
|
@ -132,9 +141,6 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
|
|||
maxLabelCount++
|
||||
}
|
||||
|
||||
// map ensures no duplicate label name
|
||||
l := make(map[string]string, maxLabelCount)
|
||||
|
||||
// Ensure attributes are sorted by key for consistent merging of keys which
|
||||
// collide when sanitized.
|
||||
labels := make([]prompb.Label, 0, maxLabelCount)
|
||||
|
@ -148,6 +154,8 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
|
|||
})
|
||||
sort.Stable(ByLabelName(labels))
|
||||
|
||||
// map ensures no duplicate label names.
|
||||
l := make(map[string]string, maxLabelCount)
|
||||
for _, label := range labels {
|
||||
var finalKey = prometheustranslator.NormalizeLabel(label.Name)
|
||||
if existingValue, alreadyExists := l[finalKey]; alreadyExists {
|
||||
|
@ -157,6 +165,13 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
|
|||
}
|
||||
}
|
||||
|
||||
for _, lbl := range promotedAttrs {
|
||||
normalized := prometheustranslator.NormalizeLabel(lbl.Name)
|
||||
if _, exists := l[normalized]; !exists {
|
||||
l[normalized] = lbl.Value
|
||||
}
|
||||
}
|
||||
|
||||
// Map service.name + service.namespace to job
|
||||
if haveServiceName {
|
||||
val := serviceName.AsString()
|
||||
|
@ -169,7 +184,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
|
|||
if haveInstanceID {
|
||||
l[model.InstanceLabel] = instance.AsString()
|
||||
}
|
||||
for key, value := range externalLabels {
|
||||
for key, value := range settings.ExternalLabels {
|
||||
// External labels have already been sanitized
|
||||
if _, alreadyExists := l[key]; alreadyExists {
|
||||
// Skip external labels if they are overridden by metric attributes
|
||||
|
@ -232,7 +247,7 @@ func (c *PrometheusConverter) addHistogramDataPoints(dataPoints pmetric.Histogra
|
|||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
pt := dataPoints.At(x)
|
||||
timestamp := convertTimeStamp(pt.Timestamp())
|
||||
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false)
|
||||
baseLabels := createAttributes(resource, pt.Attributes(), settings, nil, false)
|
||||
|
||||
// If the sum is unset, it indicates the _sum metric point should be
|
||||
// omitted
|
||||
|
@ -408,7 +423,7 @@ func (c *PrometheusConverter) addSummaryDataPoints(dataPoints pmetric.SummaryDat
|
|||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
pt := dataPoints.At(x)
|
||||
timestamp := convertTimeStamp(pt.Timestamp())
|
||||
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false)
|
||||
baseLabels := createAttributes(resource, pt.Attributes(), settings, nil, false)
|
||||
|
||||
// treat sum as a sample in an individual TimeSeries
|
||||
sum := &prompb.Sample{
|
||||
|
@ -554,7 +569,8 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta
|
|||
name = settings.Namespace + "_" + name
|
||||
}
|
||||
|
||||
labels := createAttributes(resource, attributes, settings.ExternalLabels, identifyingAttrs, false, model.MetricNameLabel, name)
|
||||
settings.PromoteResourceAttributes = nil
|
||||
labels := createAttributes(resource, attributes, settings, identifyingAttrs, false, model.MetricNameLabel, name)
|
||||
haveIdentifier := false
|
||||
for _, l := range labels {
|
||||
if l.Name == model.JobLabel || l.Name == model.InstanceLabel {
|
||||
|
|
|
@ -0,0 +1,161 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package prometheusremotewrite
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
)
|
||||
|
||||
func TestCreateAttributes(t *testing.T) {
|
||||
resourceAttrs := map[string]string{
|
||||
"service.name": "service name",
|
||||
"service.instance.id": "service ID",
|
||||
"existent-attr": "resource value",
|
||||
// This one is for testing conflict with metric attribute.
|
||||
"metric-attr": "resource value",
|
||||
// This one is for testing conflict with auto-generated job attribute.
|
||||
"job": "resource value",
|
||||
// This one is for testing conflict with auto-generated instance attribute.
|
||||
"instance": "resource value",
|
||||
}
|
||||
|
||||
resource := pcommon.NewResource()
|
||||
for k, v := range resourceAttrs {
|
||||
resource.Attributes().PutStr(k, v)
|
||||
}
|
||||
attrs := pcommon.NewMap()
|
||||
attrs.PutStr("__name__", "test_metric")
|
||||
attrs.PutStr("metric-attr", "metric value")
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
promoteResourceAttributes []string
|
||||
expectedLabels []prompb.Label
|
||||
}{
|
||||
{
|
||||
name: "Successful conversion without resource attribute promotion",
|
||||
promoteResourceAttributes: nil,
|
||||
expectedLabels: []prompb.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "test_metric",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "service ID",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "service name",
|
||||
},
|
||||
{
|
||||
Name: "metric_attr",
|
||||
Value: "metric value",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Successful conversion with resource attribute promotion",
|
||||
promoteResourceAttributes: []string{"non-existent-attr", "existent-attr"},
|
||||
expectedLabels: []prompb.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "test_metric",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "service ID",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "service name",
|
||||
},
|
||||
{
|
||||
Name: "metric_attr",
|
||||
Value: "metric value",
|
||||
},
|
||||
{
|
||||
Name: "existent_attr",
|
||||
Value: "resource value",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Successful conversion with resource attribute promotion, conflicting resource attributes are ignored",
|
||||
promoteResourceAttributes: []string{"non-existent-attr", "existent-attr", "metric-attr", "job", "instance"},
|
||||
expectedLabels: []prompb.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "test_metric",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "service ID",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "service name",
|
||||
},
|
||||
{
|
||||
Name: "existent_attr",
|
||||
Value: "resource value",
|
||||
},
|
||||
{
|
||||
Name: "metric_attr",
|
||||
Value: "metric value",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Successful conversion with resource attribute promotion, attributes are only promoted once",
|
||||
promoteResourceAttributes: []string{"existent-attr", "existent-attr"},
|
||||
expectedLabels: []prompb.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "test_metric",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "service ID",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "service name",
|
||||
},
|
||||
{
|
||||
Name: "existent_attr",
|
||||
Value: "resource value",
|
||||
},
|
||||
{
|
||||
Name: "metric_attr",
|
||||
Value: "metric value",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
settings := Settings{
|
||||
PromoteResourceAttributes: tc.promoteResourceAttributes,
|
||||
}
|
||||
lbls := createAttributes(resource, attrs, settings, nil, false)
|
||||
|
||||
assert.ElementsMatch(t, lbls, tc.expectedLabels)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -45,7 +45,7 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetr
|
|||
lbls := createAttributes(
|
||||
resource,
|
||||
pt.Attributes(),
|
||||
settings.ExternalLabels,
|
||||
settings,
|
||||
nil,
|
||||
true,
|
||||
model.MetricNameLabel,
|
||||
|
|
|
@ -36,6 +36,7 @@ type Settings struct {
|
|||
ExportCreatedMetric bool
|
||||
AddMetricSuffixes bool
|
||||
SendMetadata bool
|
||||
PromoteResourceAttributes []string
|
||||
}
|
||||
|
||||
// PrometheusConverter converts from OTel write format to Prometheus remote write format.
|
||||
|
|
|
@ -34,7 +34,7 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(dataPoints pmetric.Number
|
|||
labels := createAttributes(
|
||||
resource,
|
||||
pt.Attributes(),
|
||||
settings.ExternalLabels,
|
||||
settings,
|
||||
nil,
|
||||
true,
|
||||
model.MetricNameLabel,
|
||||
|
@ -64,7 +64,7 @@ func (c *PrometheusConverter) addSumNumberDataPoints(dataPoints pmetric.NumberDa
|
|||
lbls := createAttributes(
|
||||
resource,
|
||||
pt.Attributes(),
|
||||
settings.ExternalLabels,
|
||||
settings,
|
||||
nil,
|
||||
true,
|
||||
model.MetricNameLabel,
|
||||
|
|
|
@ -1109,9 +1109,9 @@ func (t *QueueManager) shouldReshard(desiredShards int) bool {
|
|||
if desiredShards == t.numShards {
|
||||
return false
|
||||
}
|
||||
// We shouldn't reshard if Prometheus hasn't been able to send to the
|
||||
// remote endpoint successfully within some period of time.
|
||||
minSendTimestamp := time.Now().Add(-2 * time.Duration(t.cfg.BatchSendDeadline)).Unix()
|
||||
// We shouldn't reshard if Prometheus hasn't been able to send
|
||||
// since the last time it checked if it should reshard.
|
||||
minSendTimestamp := time.Now().Add(-1 * shardUpdateDuration).Unix()
|
||||
lsts := t.lastSendTimestamp.Load()
|
||||
if lsts < minSendTimestamp {
|
||||
level.Warn(t.logger).Log("msg", "Skipping resharding, last successful send was beyond threshold", "lastSendTimestamp", lsts, "minSendTimestamp", minSendTimestamp)
|
||||
|
|
|
@ -703,32 +703,35 @@ func TestShouldReshard(t *testing.T) {
|
|||
startingShards int
|
||||
samplesIn, samplesOut, lastSendTimestamp int64
|
||||
expectedToReshard bool
|
||||
sendDeadline model.Duration
|
||||
}
|
||||
cases := []testcase{
|
||||
{
|
||||
// Resharding shouldn't take place if the last successful send was > batch send deadline*2 seconds ago.
|
||||
// resharding shouldn't take place if we haven't successfully sent
|
||||
// since the last shardUpdateDuration, even if the send deadline is very low
|
||||
startingShards: 10,
|
||||
samplesIn: 1000,
|
||||
samplesOut: 10,
|
||||
lastSendTimestamp: time.Now().Unix() - int64(3*time.Duration(config.DefaultQueueConfig.BatchSendDeadline)/time.Second),
|
||||
lastSendTimestamp: time.Now().Unix() - int64(shardUpdateDuration),
|
||||
expectedToReshard: false,
|
||||
sendDeadline: model.Duration(100 * time.Millisecond),
|
||||
},
|
||||
{
|
||||
startingShards: 5,
|
||||
startingShards: 10,
|
||||
samplesIn: 1000,
|
||||
samplesOut: 10,
|
||||
lastSendTimestamp: time.Now().Unix(),
|
||||
expectedToReshard: true,
|
||||
sendDeadline: config.DefaultQueueConfig.BatchSendDeadline,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
_, m := newTestClientAndQueueManager(t, defaultFlushDeadline, config.RemoteWriteProtoMsgV1)
|
||||
_, m := newTestClientAndQueueManager(t, time.Duration(c.sendDeadline), config.RemoteWriteProtoMsgV1)
|
||||
m.numShards = c.startingShards
|
||||
m.dataIn.incr(c.samplesIn)
|
||||
m.dataOut.incr(c.samplesOut)
|
||||
m.lastSendTimestamp.Store(c.lastSendTimestamp)
|
||||
|
||||
m.Start()
|
||||
|
||||
desiredShards := m.calculateDesiredShards()
|
||||
|
|
|
@ -176,6 +176,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
|||
HTTPClientConfig: rwConf.HTTPClientConfig,
|
||||
SigV4Config: rwConf.SigV4Config,
|
||||
AzureADConfig: rwConf.AzureADConfig,
|
||||
GoogleIAMConfig: rwConf.GoogleIAMConfig,
|
||||
Headers: rwConf.Headers,
|
||||
RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit,
|
||||
})
|
||||
|
|
|
@ -472,7 +472,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
|
|||
|
||||
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
|
||||
// writes them to the provided appendable.
|
||||
func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable) http.Handler {
|
||||
func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable, configFunc func() config.Config) http.Handler {
|
||||
rwHandler := &writeHandler{
|
||||
logger: logger,
|
||||
appendable: appendable,
|
||||
|
@ -481,12 +481,14 @@ func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable) http.
|
|||
return &otlpWriteHandler{
|
||||
logger: logger,
|
||||
rwHandler: rwHandler,
|
||||
configFunc: configFunc,
|
||||
}
|
||||
}
|
||||
|
||||
type otlpWriteHandler struct {
|
||||
logger log.Logger
|
||||
rwHandler *writeHandler
|
||||
configFunc func() config.Config
|
||||
}
|
||||
|
||||
func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -497,9 +499,12 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
otlpCfg := h.configFunc().OTLPConfig
|
||||
|
||||
converter := otlptranslator.NewPrometheusConverter()
|
||||
if err := converter.FromMetrics(req.Metrics(), otlptranslator.Settings{
|
||||
AddMetricSuffixes: true,
|
||||
PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes,
|
||||
}); err != nil {
|
||||
level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err)
|
||||
}
|
||||
|
|
|
@ -379,7 +379,11 @@ func TestOTLPWriteHandler(t *testing.T) {
|
|||
req.Header.Set("Content-Type", "application/x-protobuf")
|
||||
|
||||
appendable := &mockAppendable{}
|
||||
handler := NewOTLPWriteHandler(nil, appendable)
|
||||
handler := NewOTLPWriteHandler(nil, appendable, func() config.Config {
|
||||
return config.Config{
|
||||
OTLPConfig: config.DefaultOTLPConfig,
|
||||
}
|
||||
})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
|
|
@ -126,6 +126,7 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) {
|
|||
|
||||
type histogramTest struct {
|
||||
samples []chunks.Sample
|
||||
expectedSamples []chunks.Sample
|
||||
expectedCounterResetHeaders []chunkenc.CounterResetHeader
|
||||
}
|
||||
|
||||
|
@ -141,6 +142,32 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
},
|
||||
PositiveBuckets: []int64{2, 1}, // Abs: 2, 3
|
||||
}
|
||||
// h1 but with an extra empty bucket at offset -10.
|
||||
// This can happen if h1 is from a recoded chunk, where a later histogram had a bucket at offset -10.
|
||||
h1ExtraBuckets := &histogram.Histogram{
|
||||
Count: 7,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 100,
|
||||
Schema: 0,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: -10, Length: 1},
|
||||
{Offset: 9, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{0, 2, 1}, // Abs: 0, 2, 3
|
||||
}
|
||||
h1Recoded := &histogram.Histogram{
|
||||
Count: 7,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 100,
|
||||
Schema: 0,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{2, 1, -3, 0}, // Abs: 2, 3, 0, 0
|
||||
}
|
||||
// Appendable to h1.
|
||||
h2 := &histogram.Histogram{
|
||||
Count: 12,
|
||||
|
@ -179,6 +206,32 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
},
|
||||
PositiveBuckets: []float64{3, 1},
|
||||
}
|
||||
// fh1 but with an extra empty bucket at offset -10.
|
||||
// This can happen if fh1 is from a recoded chunk, where a later histogram had a bucket at offset -10.
|
||||
fh1ExtraBuckets := &histogram.FloatHistogram{
|
||||
Count: 6,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 100,
|
||||
Schema: 0,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: -10, Length: 1},
|
||||
{Offset: 9, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []float64{0, 3, 1},
|
||||
}
|
||||
fh1Recoded := &histogram.FloatHistogram{
|
||||
Count: 6,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 100,
|
||||
Schema: 0,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []float64{3, 1, 0, 0},
|
||||
}
|
||||
// Appendable to fh1.
|
||||
fh2 := &histogram.FloatHistogram{
|
||||
Count: 17,
|
||||
|
@ -219,6 +272,20 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
},
|
||||
PositiveBuckets: []int64{2, 1}, // Abs: 2, 3
|
||||
}
|
||||
// gh1 recoded to add extra empty buckets at end.
|
||||
gh1Recoded := &histogram.Histogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Count: 7,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 100,
|
||||
Schema: 0,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{2, 1, -3, 0}, // Abs: 2, 3, 0, 0
|
||||
}
|
||||
gh2 := &histogram.Histogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Count: 12,
|
||||
|
@ -246,6 +313,20 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
},
|
||||
PositiveBuckets: []float64{3, 1},
|
||||
}
|
||||
// gfh1 recoded to add an extra empty buckets at end.
|
||||
gfh1Recoded := &histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Count: 6,
|
||||
ZeroCount: 2,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 100,
|
||||
Schema: 0,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []float64{3, 1, 0, 0},
|
||||
}
|
||||
gfh2 := &histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Count: 17,
|
||||
|
@ -272,6 +353,9 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
samples: []chunks.Sample{
|
||||
hSample{t: 1, h: h1},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
hSample{t: 1, h: h1},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"two histograms encoded to a single chunk": {
|
||||
|
@ -279,6 +363,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
hSample{t: 1, h: h1},
|
||||
hSample{t: 2, h: h2},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
hSample{t: 1, h: h1Recoded},
|
||||
hSample{t: 2, h: h2},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"two histograms encoded to two chunks": {
|
||||
|
@ -286,6 +374,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
hSample{t: 1, h: h2},
|
||||
hSample{t: 2, h: h1},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
hSample{t: 1, h: h2},
|
||||
hSample{t: 2, h: h1},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
|
||||
},
|
||||
"histogram and stale sample encoded to two chunks": {
|
||||
|
@ -293,6 +385,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
hSample{t: 1, h: staleHistogram},
|
||||
hSample{t: 2, h: h1},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
hSample{t: 1, h: staleHistogram},
|
||||
hSample{t: 2, h: h1},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"histogram and reduction in bucket encoded to two chunks": {
|
||||
|
@ -300,6 +396,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
hSample{t: 1, h: h1},
|
||||
hSample{t: 2, h: h2down},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
hSample{t: 1, h: h1},
|
||||
hSample{t: 2, h: h2down},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
|
||||
},
|
||||
// Float histograms.
|
||||
|
@ -307,6 +407,9 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
samples: []chunks.Sample{
|
||||
fhSample{t: 1, fh: fh1},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
fhSample{t: 1, fh: fh1},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"two float histograms encoded to a single chunk": {
|
||||
|
@ -314,6 +417,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
fhSample{t: 1, fh: fh1},
|
||||
fhSample{t: 2, fh: fh2},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
fhSample{t: 1, fh: fh1Recoded},
|
||||
fhSample{t: 2, fh: fh2},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"two float histograms encoded to two chunks": {
|
||||
|
@ -321,6 +428,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
fhSample{t: 1, fh: fh2},
|
||||
fhSample{t: 2, fh: fh1},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
fhSample{t: 1, fh: fh2},
|
||||
fhSample{t: 2, fh: fh1},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
|
||||
},
|
||||
"float histogram and stale sample encoded to two chunks": {
|
||||
|
@ -328,6 +439,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
fhSample{t: 1, fh: staleFloatHistogram},
|
||||
fhSample{t: 2, fh: fh1},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
fhSample{t: 1, fh: staleFloatHistogram},
|
||||
fhSample{t: 2, fh: fh1},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"float histogram and reduction in bucket encoded to two chunks": {
|
||||
|
@ -335,6 +450,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
fhSample{t: 1, fh: fh1},
|
||||
fhSample{t: 2, fh: fh2down},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
fhSample{t: 1, fh: fh1},
|
||||
fhSample{t: 2, fh: fh2down},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset},
|
||||
},
|
||||
// Mixed.
|
||||
|
@ -343,6 +462,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
hSample{t: 1, h: h1},
|
||||
fhSample{t: 2, fh: fh2},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
hSample{t: 1, h: h1},
|
||||
fhSample{t: 2, fh: fh2},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"float histogram and histogram encoded to two chunks": {
|
||||
|
@ -350,6 +473,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
fhSample{t: 1, fh: fh1},
|
||||
hSample{t: 2, h: h2},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
fhSample{t: 1, fh: fh1},
|
||||
hSample{t: 2, h: h2},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"histogram and stale float histogram encoded to two chunks": {
|
||||
|
@ -357,12 +484,19 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
hSample{t: 1, h: h1},
|
||||
fhSample{t: 2, fh: staleFloatHistogram},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
hSample{t: 1, h: h1},
|
||||
fhSample{t: 2, fh: staleFloatHistogram},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"single gauge histogram encoded to one chunk": {
|
||||
samples: []chunks.Sample{
|
||||
hSample{t: 1, h: gh1},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
hSample{t: 1, h: gh1},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
||||
},
|
||||
"two gauge histograms encoded to one chunk when counter increases": {
|
||||
|
@ -370,6 +504,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
hSample{t: 1, h: gh1},
|
||||
hSample{t: 2, h: gh2},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
hSample{t: 1, h: gh1Recoded},
|
||||
hSample{t: 2, h: gh2},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
||||
},
|
||||
"two gauge histograms encoded to one chunk when counter decreases": {
|
||||
|
@ -377,12 +515,19 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
hSample{t: 1, h: gh2},
|
||||
hSample{t: 2, h: gh1},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
hSample{t: 1, h: gh2},
|
||||
hSample{t: 2, h: gh1Recoded},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
||||
},
|
||||
"single gauge float histogram encoded to one chunk": {
|
||||
samples: []chunks.Sample{
|
||||
fhSample{t: 1, fh: gfh1},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
fhSample{t: 1, fh: gfh1},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
||||
},
|
||||
"two float gauge histograms encoded to one chunk when counter increases": {
|
||||
|
@ -390,6 +535,10 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
fhSample{t: 1, fh: gfh1},
|
||||
fhSample{t: 2, fh: gfh2},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
fhSample{t: 1, fh: gfh1Recoded},
|
||||
fhSample{t: 2, fh: gfh2},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
||||
},
|
||||
"two float gauge histograms encoded to one chunk when counter decreases": {
|
||||
|
@ -397,8 +546,34 @@ func TestHistogramSeriesToChunks(t *testing.T) {
|
|||
fhSample{t: 1, fh: gfh2},
|
||||
fhSample{t: 2, fh: gfh1},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
fhSample{t: 1, fh: gfh2},
|
||||
fhSample{t: 2, fh: gfh1Recoded},
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType},
|
||||
},
|
||||
"histogram with extra empty bucket followed by histogram encodes to one chunk": {
|
||||
samples: []chunks.Sample{
|
||||
hSample{t: 1, h: h1ExtraBuckets},
|
||||
hSample{t: 2, h: h1},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
hSample{t: 1, h: h1ExtraBuckets},
|
||||
hSample{t: 2, h: h1ExtraBuckets}, // Recoded to add the missing buckets.
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
||||
},
|
||||
"float histogram with extra empty bucket followed by float histogram encodes to one chunk": {
|
||||
samples: []chunks.Sample{
|
||||
fhSample{t: 1, fh: fh1ExtraBuckets},
|
||||
fhSample{t: 2, fh: fh1},
|
||||
},
|
||||
expectedSamples: []chunks.Sample{
|
||||
fhSample{t: 1, fh: fh1ExtraBuckets},
|
||||
fhSample{t: 2, fh: fh1ExtraBuckets}, // Recoded to add the missing buckets.
|
||||
},
|
||||
expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset},
|
||||
},
|
||||
}
|
||||
|
||||
for testName, test := range tests {
|
||||
|
@ -431,9 +606,9 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
|
|||
|
||||
// Decode all encoded samples and assert they are equal to the original ones.
|
||||
encodedSamples := chunks.ChunkMetasToSamples(chks)
|
||||
require.Equal(t, len(test.samples), len(encodedSamples))
|
||||
require.Equal(t, len(test.expectedSamples), len(encodedSamples))
|
||||
|
||||
for i, s := range test.samples {
|
||||
for i, s := range test.expectedSamples {
|
||||
encodedSample := encodedSamples[i]
|
||||
switch expectedSample := s.(type) {
|
||||
case hSample:
|
||||
|
@ -447,7 +622,7 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
|
|||
require.True(t, value.IsStaleNaN(h.Sum), fmt.Sprintf("at idx %d", i))
|
||||
continue
|
||||
}
|
||||
require.Equal(t, *expectedSample.h, *h.Compact(0), fmt.Sprintf("at idx %d", i))
|
||||
require.Equal(t, *expectedSample.h, *h, fmt.Sprintf("at idx %d", i))
|
||||
case fhSample:
|
||||
require.Equal(t, chunkenc.ValFloatHistogram, encodedSample.Type(), "expect float histogram", fmt.Sprintf("at idx %d", i))
|
||||
fh := encodedSample.FH()
|
||||
|
@ -459,7 +634,7 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
|
|||
require.True(t, value.IsStaleNaN(fh.Sum), fmt.Sprintf("at idx %d", i))
|
||||
continue
|
||||
}
|
||||
require.Equal(t, *expectedSample.fh, *fh.Compact(0), fmt.Sprintf("at idx %d", i))
|
||||
require.Equal(t, *expectedSample.fh, *fh, fmt.Sprintf("at idx %d", i))
|
||||
default:
|
||||
t.Error("internal error, unexpected type")
|
||||
}
|
||||
|
|
|
@ -219,16 +219,25 @@ func (a *FloatHistogramAppender) Append(int64, float64) {
|
|||
}
|
||||
|
||||
// appendable returns whether the chunk can be appended to, and if so whether
|
||||
// any recoding needs to happen using the provided inserts (in case of any new
|
||||
// buckets, positive or negative range, respectively). If the sample is a gauge
|
||||
// histogram, AppendableGauge must be used instead.
|
||||
// 1. Any recoding needs to happen to the chunk using the provided forward
|
||||
// inserts (in case of any new buckets, positive or negative range,
|
||||
// respectively).
|
||||
// 2. Any recoding needs to happen for the histogram being appended, using the
|
||||
// backward inserts (in case of any missing buckets, positive or negative
|
||||
// range, respectively).
|
||||
//
|
||||
// If the sample is a gauge histogram, AppendableGauge must be used instead.
|
||||
//
|
||||
// The chunk is not appendable in the following cases:
|
||||
//
|
||||
// - The schema has changed.
|
||||
// - The custom bounds have changed if the current schema is custom buckets.
|
||||
// - The threshold for the zero bucket has changed.
|
||||
// - Any buckets have disappeared.
|
||||
// - There was a counter reset in the count of observations or in any bucket, including the zero bucket.
|
||||
// - Any buckets have disappeared, unless the bucket count was 0, unused.
|
||||
// Empty bucket can happen if the chunk was recoded and we're merging a non
|
||||
// recoded histogram. In this case backward inserts will be provided.
|
||||
// - There was a counter reset in the count of observations or in any bucket,
|
||||
// including the zero bucket.
|
||||
// - The last sample in the chunk was stale while the current sample is not stale.
|
||||
//
|
||||
// The method returns an additional boolean set to true if it is not appendable
|
||||
|
@ -236,6 +245,7 @@ func (a *FloatHistogramAppender) Append(int64, float64) {
|
|||
// append. If counterReset is true, okToAppend is always false.
|
||||
func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) (
|
||||
positiveInserts, negativeInserts []Insert,
|
||||
backwardPositiveInserts, backwardNegativeInserts []Insert,
|
||||
okToAppend, counterReset bool,
|
||||
) {
|
||||
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
|
||||
|
@ -279,27 +289,214 @@ func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) (
|
|||
}
|
||||
|
||||
var ok bool
|
||||
positiveInserts, ok = expandSpansForward(a.pSpans, h.PositiveSpans)
|
||||
positiveInserts, backwardPositiveInserts, ok = expandFloatSpansAndBuckets(a.pSpans, h.PositiveSpans, a.pBuckets, h.PositiveBuckets)
|
||||
if !ok {
|
||||
counterReset = true
|
||||
return
|
||||
}
|
||||
negativeInserts, ok = expandSpansForward(a.nSpans, h.NegativeSpans)
|
||||
negativeInserts, backwardNegativeInserts, ok = expandFloatSpansAndBuckets(a.nSpans, h.NegativeSpans, a.nBuckets, h.NegativeBuckets)
|
||||
if !ok {
|
||||
counterReset = true
|
||||
return
|
||||
}
|
||||
|
||||
if counterResetInAnyFloatBucket(a.pBuckets, h.PositiveBuckets, a.pSpans, h.PositiveSpans) ||
|
||||
counterResetInAnyFloatBucket(a.nBuckets, h.NegativeBuckets, a.nSpans, h.NegativeSpans) {
|
||||
counterReset, positiveInserts, negativeInserts = true, nil, nil
|
||||
return
|
||||
}
|
||||
|
||||
okToAppend = true
|
||||
return
|
||||
}
|
||||
|
||||
// expandFloatSpansAndBuckets returns the inserts to expand the bucket spans 'a' so that
|
||||
// they match the spans in 'b'. 'b' must cover the same or more buckets than
|
||||
// 'a', otherwise the function will return false.
|
||||
// The function also returns the inserts to expand 'b' to also cover all the
|
||||
// buckets that are missing in 'b', but are present with 0 counter value in 'a'.
|
||||
// The function also checks for counter resets between 'a' and 'b'.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Let's say the old buckets look like this:
|
||||
//
|
||||
// span syntax: [offset, length]
|
||||
// spans : [ 0 , 2 ] [2,1] [ 3 , 2 ] [3,1] [1,1]
|
||||
// bucket idx : [0] [1] 2 3 [4] 5 6 7 [8] [9] 10 11 12 [13] 14 [15]
|
||||
// raw values 6 3 3 2 4 5 1
|
||||
// deltas 6 -3 0 -1 2 1 -4
|
||||
//
|
||||
// But now we introduce a new bucket layout. (Carefully chosen example where we
|
||||
// have a span appended, one unchanged[*], one prepended, and two merge - in
|
||||
// that order.)
|
||||
//
|
||||
// [*] unchanged in terms of which bucket indices they represent. but to achieve
|
||||
// that, their offset needs to change if "disrupted" by spans changing ahead of
|
||||
// them
|
||||
//
|
||||
// \/ this one is "unchanged"
|
||||
// spans : [ 0 , 3 ] [1,1] [ 1 , 4 ] [ 3 , 3 ]
|
||||
// bucket idx : [0] [1] [2] 3 [4] 5 [6] [7] [8] [9] 10 11 12 [13] [14] [15]
|
||||
// raw values 6 3 0 3 0 0 2 4 5 0 1
|
||||
// deltas 6 -3 -3 3 -3 0 2 2 1 -5 1
|
||||
// delta mods: / \ / \ / \
|
||||
//
|
||||
// Note for histograms with delta-encoded buckets: Whenever any new buckets are
|
||||
// introduced, the subsequent "old" bucket needs to readjust its delta to the
|
||||
// new base of 0. Thus, for the caller who wants to transform the set of
|
||||
// original deltas to a new set of deltas to match a new span layout that adds
|
||||
// buckets, we simply need to generate a list of inserts.
|
||||
//
|
||||
// Note: Within expandSpansForward we don't have to worry about the changes to the
|
||||
// spans themselves, thanks to the iterators we get to work with the more useful
|
||||
// bucket indices (which of course directly correspond to the buckets we have to
|
||||
// adjust).
|
||||
func expandFloatSpansAndBuckets(a, b []histogram.Span, aBuckets []xorValue, bBuckets []float64) (forward, backward []Insert, ok bool) {
|
||||
ai := newBucketIterator(a)
|
||||
bi := newBucketIterator(b)
|
||||
|
||||
var aInserts []Insert // To insert into buckets of a, to make up for missing buckets in b.
|
||||
var bInserts []Insert // To insert into buckets of b, to make up for missing empty(!) buckets in a.
|
||||
|
||||
// When aInter.num or bInter.num becomes > 0, this becomes a valid insert that should
|
||||
// be yielded when we finish a streak of new buckets.
|
||||
var aInter Insert
|
||||
var bInter Insert
|
||||
|
||||
aIdx, aOK := ai.Next()
|
||||
bIdx, bOK := bi.Next()
|
||||
|
||||
// Bucket count. Initialize the absolute count and index into the
|
||||
// positive/negative counts or deltas array. The bucket count is
|
||||
// used to detect counter reset as well as unused buckets in a.
|
||||
var (
|
||||
aCount float64
|
||||
bCount float64
|
||||
aCountIdx int
|
||||
bCountIdx int
|
||||
)
|
||||
if aOK {
|
||||
aCount = aBuckets[aCountIdx].value
|
||||
}
|
||||
if bOK {
|
||||
bCount = bBuckets[bCountIdx]
|
||||
}
|
||||
|
||||
loop:
|
||||
for {
|
||||
switch {
|
||||
case aOK && bOK:
|
||||
switch {
|
||||
case aIdx == bIdx: // Both have an identical bucket index.
|
||||
// Bucket count. Check bucket for reset from a to b.
|
||||
if aCount > bCount {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Finish WIP insert for a and reset.
|
||||
if aInter.num > 0 {
|
||||
aInserts = append(aInserts, aInter)
|
||||
aInter.num = 0
|
||||
}
|
||||
|
||||
// Finish WIP insert for b and reset.
|
||||
if bInter.num > 0 {
|
||||
bInserts = append(bInserts, bInter)
|
||||
bInter.num = 0
|
||||
}
|
||||
|
||||
aIdx, aOK = ai.Next()
|
||||
bIdx, bOK = bi.Next()
|
||||
aInter.pos++ // Advance potential insert position.
|
||||
aCountIdx++ // Advance absolute bucket count index for a.
|
||||
if aOK {
|
||||
aCount = aBuckets[aCountIdx].value
|
||||
}
|
||||
bInter.pos++ // Advance potential insert position.
|
||||
bCountIdx++ // Advance absolute bucket count index for b.
|
||||
if bOK {
|
||||
bCount = bBuckets[bCountIdx]
|
||||
}
|
||||
|
||||
continue
|
||||
case aIdx < bIdx: // b misses a bucket index that is in a.
|
||||
// This is ok if the count in a is 0, in which case we make a note to
|
||||
// fill in the bucket in b and advance a.
|
||||
if aCount == 0 {
|
||||
bInter.num++ // Mark that we need to insert a bucket in b.
|
||||
// Advance a
|
||||
if aInter.num > 0 {
|
||||
aInserts = append(aInserts, aInter)
|
||||
aInter.num = 0
|
||||
}
|
||||
aIdx, aOK = ai.Next()
|
||||
aInter.pos++
|
||||
aCountIdx++
|
||||
if aOK {
|
||||
aCount = aBuckets[aCountIdx].value
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Otherwise we are missing a bucket that was in use in a, which is a reset.
|
||||
return nil, nil, false
|
||||
case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare.
|
||||
aInter.num++
|
||||
// Advance b
|
||||
if bInter.num > 0 {
|
||||
bInserts = append(bInserts, bInter)
|
||||
bInter.num = 0
|
||||
}
|
||||
bIdx, bOK = bi.Next()
|
||||
bInter.pos++
|
||||
bCountIdx++
|
||||
if bOK {
|
||||
bCount = bBuckets[bCountIdx]
|
||||
}
|
||||
}
|
||||
case aOK && !bOK: // b misses a value that is in a.
|
||||
// This is ok if the count in a is 0, in which case we make a note to
|
||||
// fill in the bucket in b and advance a.
|
||||
if aCount == 0 {
|
||||
bInter.num++
|
||||
// Advance a
|
||||
if aInter.num > 0 {
|
||||
aInserts = append(aInserts, aInter)
|
||||
aInter.num = 0
|
||||
}
|
||||
aIdx, aOK = ai.Next()
|
||||
aInter.pos++ // Advance potential insert position.
|
||||
// Update absolute bucket counts for a.
|
||||
aCountIdx++
|
||||
if aOK {
|
||||
aCount = aBuckets[aCountIdx].value
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Otherwise we are missing a bucket that was in use in a, which is a reset.
|
||||
return nil, nil, false
|
||||
case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
|
||||
aInter.num++
|
||||
// Advance b
|
||||
if bInter.num > 0 {
|
||||
bInserts = append(bInserts, bInter)
|
||||
bInter.num = 0
|
||||
}
|
||||
bIdx, bOK = bi.Next()
|
||||
bInter.pos++ // Advance potential insert position.
|
||||
// Update absolute bucket counts for b.
|
||||
bCountIdx++
|
||||
if bOK {
|
||||
bCount = bBuckets[bCountIdx]
|
||||
}
|
||||
default: // Both iterators ran out. We're done.
|
||||
if aInter.num > 0 {
|
||||
aInserts = append(aInserts, aInter)
|
||||
}
|
||||
if bInter.num > 0 {
|
||||
bInserts = append(bInserts, bInter)
|
||||
}
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
return aInserts, bInserts, true
|
||||
}
|
||||
|
||||
// appendableGauge returns whether the chunk can be appended to, and if so
|
||||
// whether:
|
||||
// 1. Any recoding needs to happen to the chunk using the provided inserts
|
||||
|
@ -349,76 +546,6 @@ func (a *FloatHistogramAppender) appendableGauge(h *histogram.FloatHistogram) (
|
|||
return
|
||||
}
|
||||
|
||||
// counterResetInAnyFloatBucket returns true if there was a counter reset for any
|
||||
// bucket. This should be called only when the bucket layout is the same or new
|
||||
// buckets were added. It does not handle the case of buckets missing.
|
||||
func counterResetInAnyFloatBucket(oldBuckets []xorValue, newBuckets []float64, oldSpans, newSpans []histogram.Span) bool {
|
||||
if len(oldSpans) == 0 || len(oldBuckets) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
oldSpanSliceIdx, newSpanSliceIdx int = -1, -1 // Index for the span slices. Starts at -1 to indicate that the first non empty span is not yet found.
|
||||
oldInsideSpanIdx, newInsideSpanIdx uint32 // Index inside a span.
|
||||
oldIdx, newIdx int32 // Index inside a bucket slice.
|
||||
oldBucketSliceIdx, newBucketSliceIdx int // Index inside bucket slice.
|
||||
)
|
||||
|
||||
// Find first non empty spans.
|
||||
oldSpanSliceIdx, oldIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldIdx, oldSpans)
|
||||
newSpanSliceIdx, newIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newIdx, newSpans)
|
||||
oldVal, newVal := oldBuckets[0].value, newBuckets[0]
|
||||
|
||||
// Since we assume that new spans won't have missing buckets, there will never be a case
|
||||
// where the old index will not find a matching new index.
|
||||
for {
|
||||
if oldIdx == newIdx {
|
||||
if newVal < oldVal {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if oldIdx <= newIdx {
|
||||
// Moving ahead old bucket and span by 1 index.
|
||||
if oldInsideSpanIdx+1 >= oldSpans[oldSpanSliceIdx].Length {
|
||||
// Current span is over.
|
||||
oldSpanSliceIdx, oldIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldIdx, oldSpans)
|
||||
oldInsideSpanIdx = 0
|
||||
if oldSpanSliceIdx >= len(oldSpans) {
|
||||
// All old spans are over.
|
||||
break
|
||||
}
|
||||
} else {
|
||||
oldInsideSpanIdx++
|
||||
oldIdx++
|
||||
}
|
||||
oldBucketSliceIdx++
|
||||
oldVal = oldBuckets[oldBucketSliceIdx].value
|
||||
}
|
||||
|
||||
if oldIdx > newIdx {
|
||||
// Moving ahead new bucket and span by 1 index.
|
||||
if newInsideSpanIdx+1 >= newSpans[newSpanSliceIdx].Length {
|
||||
// Current span is over.
|
||||
newSpanSliceIdx, newIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newIdx, newSpans)
|
||||
newInsideSpanIdx = 0
|
||||
if newSpanSliceIdx >= len(newSpans) {
|
||||
// All new spans are over.
|
||||
// This should not happen, old spans above should catch this first.
|
||||
panic("new spans over before old spans in counterReset")
|
||||
}
|
||||
} else {
|
||||
newInsideSpanIdx++
|
||||
newIdx++
|
||||
}
|
||||
newBucketSliceIdx++
|
||||
newVal = newBuckets[newBucketSliceIdx]
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// appendFloatHistogram appends a float histogram to the chunk. The caller must ensure that
|
||||
// the histogram is properly structured, e.g. the number of buckets used
|
||||
// corresponds to the number conveyed by the span structures. First call
|
||||
|
@ -614,7 +741,7 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend
|
|||
a.setCounterResetHeader(CounterReset)
|
||||
case prev != nil:
|
||||
// This is a new chunk, but continued from a previous one. We need to calculate the reset header unless already set.
|
||||
_, _, _, counterReset := prev.appendable(h)
|
||||
_, _, _, _, _, counterReset := prev.appendable(h)
|
||||
if counterReset {
|
||||
a.setCounterResetHeader(CounterReset)
|
||||
} else {
|
||||
|
@ -626,7 +753,7 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend
|
|||
|
||||
// Adding counter-like histogram.
|
||||
if h.CounterResetHint != histogram.GaugeType {
|
||||
pForwardInserts, nForwardInserts, okToAppend, counterReset := a.appendable(h)
|
||||
pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, okToAppend, counterReset := a.appendable(h)
|
||||
if !okToAppend || counterReset {
|
||||
if appendOnly {
|
||||
if counterReset {
|
||||
|
@ -657,6 +784,13 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend
|
|||
app.(*FloatHistogramAppender).appendFloatHistogram(t, h)
|
||||
return chk, true, app, nil
|
||||
}
|
||||
if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 {
|
||||
// The histogram needs to be expanded to have the extra empty buckets
|
||||
// of the chunk.
|
||||
h.PositiveSpans = a.pSpans
|
||||
h.NegativeSpans = a.nSpans
|
||||
a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
|
||||
}
|
||||
a.appendFloatHistogram(t, h)
|
||||
return nil, false, a, nil
|
||||
}
|
||||
|
|
|
@ -245,9 +245,11 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) {
|
|||
h2.NegativeBuckets = []int64{2, -1} // 2 1 (total 3)
|
||||
// This is how span changes will be handled.
|
||||
hApp, _ := app.(*FloatHistogramAppender)
|
||||
posInterjections, negInterjections, ok, cr := hApp.appendable(h2.ToFloat(nil))
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2.ToFloat(nil))
|
||||
require.NotEmpty(t, posInterjections)
|
||||
require.NotEmpty(t, negInterjections)
|
||||
require.Empty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.True(t, ok) // Only new buckets came in.
|
||||
require.False(t, cr)
|
||||
c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans)
|
||||
|
@ -333,7 +335,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
|||
c, hApp, ts, h1 := setup(eh)
|
||||
h2 := h1.Copy()
|
||||
h2.Schema++
|
||||
_, _, ok, _ := hApp.appendable(h2)
|
||||
_, _, _, _, ok, _ := hApp.appendable(h2)
|
||||
require.False(t, ok)
|
||||
|
||||
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||
|
@ -343,7 +345,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
|||
c, hApp, ts, h1 := setup(eh)
|
||||
h2 := h1.Copy()
|
||||
h2.ZeroThreshold += 0.1
|
||||
_, _, ok, _ := hApp.appendable(h2)
|
||||
_, _, _, _, ok, _ := hApp.appendable(h2)
|
||||
require.False(t, ok)
|
||||
|
||||
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||
|
@ -363,9 +365,11 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
|||
h2.Sum = 30
|
||||
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1}
|
||||
|
||||
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
|
||||
require.NotEmpty(t, posInterjections)
|
||||
require.Empty(t, negInterjections)
|
||||
require.Empty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.True(t, ok) // Only new buckets came in.
|
||||
require.False(t, cr)
|
||||
|
||||
|
@ -385,24 +389,56 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
|||
h2.Sum = 21
|
||||
h2.PositiveBuckets = []float64{6, 3, 2, 4, 5, 1}
|
||||
|
||||
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
|
||||
require.Empty(t, posInterjections)
|
||||
require.Empty(t, negInterjections)
|
||||
require.Empty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.False(t, ok) // Need to cut a new chunk.
|
||||
require.True(t, cr)
|
||||
|
||||
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
|
||||
}
|
||||
|
||||
{ // New histogram that has buckets missing but the buckets missing were empty.
|
||||
emptyBucketH := eh.Copy()
|
||||
emptyBucketH.PositiveBuckets = []float64{6, 0, 3, 2, 4, 0, 1}
|
||||
c, hApp, ts, h1 := setup(emptyBucketH)
|
||||
h2 := h1.Copy()
|
||||
h2.PositiveSpans = []histogram.Span{
|
||||
{Offset: 0, Length: 1},
|
||||
{Offset: 3, Length: 1},
|
||||
{Offset: 3, Length: 2},
|
||||
{Offset: 5, Length: 1},
|
||||
}
|
||||
h2.PositiveBuckets = []float64{7, 4, 3, 5, 2}
|
||||
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
|
||||
require.Empty(t, posInterjections)
|
||||
require.Empty(t, negInterjections)
|
||||
require.NotEmpty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.True(t, ok)
|
||||
require.False(t, cr)
|
||||
|
||||
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||
|
||||
// Check that h2 was recoded.
|
||||
require.Equal(t, []float64{7, 0, 4, 3, 5, 0, 2}, h2.PositiveBuckets)
|
||||
require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans)
|
||||
}
|
||||
|
||||
{ // New histogram that has a counter reset while buckets are same.
|
||||
c, hApp, ts, h1 := setup(eh)
|
||||
h2 := h1.Copy()
|
||||
h2.Sum = 23
|
||||
h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1}
|
||||
|
||||
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
|
||||
require.Empty(t, posInterjections)
|
||||
require.Empty(t, negInterjections)
|
||||
require.Empty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.False(t, ok) // Need to cut a new chunk.
|
||||
require.True(t, cr)
|
||||
|
||||
|
@ -421,9 +457,11 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
|||
h2.Sum = 29
|
||||
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0}
|
||||
|
||||
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
|
||||
require.Empty(t, posInterjections)
|
||||
require.Empty(t, negInterjections)
|
||||
require.Empty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.False(t, ok) // Need to cut a new chunk.
|
||||
require.True(t, cr)
|
||||
|
||||
|
@ -448,9 +486,11 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
|||
h2.Sum = 26
|
||||
h2.PositiveBuckets = []float64{1, 2, 5, 3, 3, 2, 4, 5, 1}
|
||||
|
||||
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
|
||||
require.Empty(t, posInterjections)
|
||||
require.Empty(t, negInterjections)
|
||||
require.Empty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.False(t, ok) // Need to cut a new chunk.
|
||||
require.True(t, cr)
|
||||
|
||||
|
@ -524,10 +564,44 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
|||
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
|
||||
}
|
||||
|
||||
{
|
||||
// Start a new chunk with a histogram that has an empty bucket.
|
||||
// Add a histogram that has the same bucket missing.
|
||||
// This should be appendable and can happen if we are merging from chunks
|
||||
// where the first sample came from a recoded chunk that added the
|
||||
// empty bucket.
|
||||
h1 := eh.Copy()
|
||||
// Add a bucket that is empty -10 offsets from the first bucket.
|
||||
h1.PositiveSpans = make([]histogram.Span, len(eh.PositiveSpans)+1)
|
||||
h1.PositiveSpans[0] = histogram.Span{Offset: eh.PositiveSpans[0].Offset - 10, Length: 1}
|
||||
h1.PositiveSpans[1] = histogram.Span{Offset: eh.PositiveSpans[0].Offset + 9, Length: eh.PositiveSpans[0].Length}
|
||||
for i, v := range eh.PositiveSpans[1:] {
|
||||
h1.PositiveSpans[i+2] = v
|
||||
}
|
||||
h1.PositiveBuckets = make([]float64, len(eh.PositiveBuckets)+1)
|
||||
h1.PositiveBuckets[0] = 0
|
||||
for i, v := range eh.PositiveBuckets {
|
||||
h1.PositiveBuckets[i+1] = v
|
||||
}
|
||||
|
||||
c, hApp, ts, _ := setup(h1)
|
||||
h2 := eh.Copy()
|
||||
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
|
||||
require.Empty(t, posInterjections)
|
||||
require.Empty(t, negInterjections)
|
||||
require.NotEmpty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.True(t, ok)
|
||||
require.False(t, cr)
|
||||
|
||||
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||
}
|
||||
|
||||
{ // Custom buckets, no change.
|
||||
c, hApp, ts, h1 := setup(cbh)
|
||||
h2 := h1.Copy()
|
||||
_, _, ok, _ := hApp.appendable(h2)
|
||||
_, _, _, _, ok, _ := hApp.appendable(h2)
|
||||
require.True(t, ok)
|
||||
|
||||
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||
|
@ -538,7 +612,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
|||
h2 := h1.Copy()
|
||||
h2.Count++
|
||||
h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 2}
|
||||
_, _, ok, _ := hApp.appendable(h2)
|
||||
_, _, _, _, ok, _ := hApp.appendable(h2)
|
||||
require.True(t, ok)
|
||||
|
||||
assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||
|
@ -549,7 +623,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
|||
h2 := h1.Copy()
|
||||
h2.Count--
|
||||
h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 0}
|
||||
_, _, ok, _ := hApp.appendable(h2)
|
||||
_, _, _, _, ok, _ := hApp.appendable(h2)
|
||||
require.False(t, ok)
|
||||
|
||||
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
|
||||
|
@ -559,7 +633,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
|||
c, hApp, ts, h1 := setup(cbh)
|
||||
h2 := h1.Copy()
|
||||
h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}
|
||||
_, _, ok, _ := hApp.appendable(h2)
|
||||
_, _, _, _, ok, _ := hApp.appendable(h2)
|
||||
require.False(t, ok)
|
||||
|
||||
assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
|
||||
|
@ -581,9 +655,11 @@ func TestFloatHistogramChunkAppendable(t *testing.T) {
|
|||
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
|
||||
h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1} // (total 30)
|
||||
|
||||
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
|
||||
require.NotEmpty(t, posInterjections)
|
||||
require.Empty(t, negInterjections)
|
||||
require.Empty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.True(t, ok) // Only new buckets came in.
|
||||
require.False(t, cr)
|
||||
|
||||
|
@ -839,9 +915,11 @@ func TestFloatHistogramChunkAppendableWithEmptySpan(t *testing.T) {
|
|||
require.Equal(t, 1, c.NumSamples())
|
||||
hApp, _ := app.(*FloatHistogramAppender)
|
||||
|
||||
pI, nI, okToAppend, counterReset := hApp.appendable(tc.h2)
|
||||
pI, nI, bpI, bnI, okToAppend, counterReset := hApp.appendable(tc.h2)
|
||||
require.Empty(t, pI)
|
||||
require.Empty(t, nI)
|
||||
require.Empty(t, bpI)
|
||||
require.Empty(t, bnI)
|
||||
require.True(t, okToAppend)
|
||||
require.False(t, counterReset)
|
||||
})
|
||||
|
|
|
@ -237,16 +237,23 @@ func (a *HistogramAppender) Append(int64, float64) {
|
|||
}
|
||||
|
||||
// appendable returns whether the chunk can be appended to, and if so whether
|
||||
// any recoding needs to happen using the provided inserts (in case of any new
|
||||
// buckets, positive or negative range, respectively). If the sample is a gauge
|
||||
// histogram, AppendableGauge must be used instead.
|
||||
// 1. Any recoding needs to happen to the chunk using the provided forward
|
||||
// inserts (in case of any new buckets, positive or negative range,
|
||||
// respectively).
|
||||
// 2. Any recoding needs to happen for the histogram being appended, using the
|
||||
// backward inserts (in case of any missing buckets, positive or negative
|
||||
// range, respectively).
|
||||
//
|
||||
// If the sample is a gauge histogram, AppendableGauge must be used instead.
|
||||
//
|
||||
// The chunk is not appendable in the following cases:
|
||||
//
|
||||
// - The schema has changed.
|
||||
// - The custom bounds have changed if the current schema is custom buckets.
|
||||
// - The threshold for the zero bucket has changed.
|
||||
// - Any buckets have disappeared.
|
||||
// - Any buckets have disappeared, unless the bucket count was 0, unused.
|
||||
// Empty bucket can happen if the chunk was recoded and we're merging a non
|
||||
// recoded histogram. In this case backward inserts will be provided.
|
||||
// - There was a counter reset in the count of observations or in any bucket,
|
||||
// including the zero bucket.
|
||||
// - The last sample in the chunk was stale while the current sample is not stale.
|
||||
|
@ -256,6 +263,7 @@ func (a *HistogramAppender) Append(int64, float64) {
|
|||
// append. If counterReset is true, okToAppend is always false.
|
||||
func (a *HistogramAppender) appendable(h *histogram.Histogram) (
|
||||
positiveInserts, negativeInserts []Insert,
|
||||
backwardPositiveInserts, backwardNegativeInserts []Insert,
|
||||
okToAppend, counterReset bool,
|
||||
) {
|
||||
if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType {
|
||||
|
@ -299,31 +307,219 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) (
|
|||
}
|
||||
|
||||
var ok bool
|
||||
positiveInserts, ok = expandSpansForward(a.pSpans, h.PositiveSpans)
|
||||
positiveInserts, backwardPositiveInserts, ok = expandIntSpansAndBuckets(a.pSpans, h.PositiveSpans, a.pBuckets, h.PositiveBuckets)
|
||||
if !ok {
|
||||
counterReset = true
|
||||
return
|
||||
}
|
||||
negativeInserts, ok = expandSpansForward(a.nSpans, h.NegativeSpans)
|
||||
negativeInserts, backwardNegativeInserts, ok = expandIntSpansAndBuckets(a.nSpans, h.NegativeSpans, a.nBuckets, h.NegativeBuckets)
|
||||
if !ok {
|
||||
counterReset = true
|
||||
return
|
||||
}
|
||||
|
||||
if counterResetInAnyBucket(a.pBuckets, h.PositiveBuckets, a.pSpans, h.PositiveSpans) ||
|
||||
counterResetInAnyBucket(a.nBuckets, h.NegativeBuckets, a.nSpans, h.NegativeSpans) {
|
||||
counterReset, positiveInserts, negativeInserts = true, nil, nil
|
||||
return
|
||||
}
|
||||
|
||||
okToAppend = true
|
||||
return
|
||||
}
|
||||
|
||||
// expandIntSpansAndBuckets returns the inserts to expand the bucket spans 'a' so that
|
||||
// they match the spans in 'b'. 'b' must cover the same or more buckets than
|
||||
// 'a', otherwise the function will return false.
|
||||
// The function also returns the inserts to expand 'b' to also cover all the
|
||||
// buckets that are missing in 'b', but are present with 0 counter value in 'a'.
|
||||
// The function also checks for counter resets between 'a' and 'b'.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Let's say the old buckets look like this:
|
||||
//
|
||||
// span syntax: [offset, length]
|
||||
// spans : [ 0 , 2 ] [2,1] [ 3 , 2 ] [3,1] [1,1]
|
||||
// bucket idx : [0] [1] 2 3 [4] 5 6 7 [8] [9] 10 11 12 [13] 14 [15]
|
||||
// raw values 6 3 3 2 4 5 1
|
||||
// deltas 6 -3 0 -1 2 1 -4
|
||||
//
|
||||
// But now we introduce a new bucket layout. (Carefully chosen example where we
|
||||
// have a span appended, one unchanged[*], one prepended, and two merge - in
|
||||
// that order.)
|
||||
//
|
||||
// [*] unchanged in terms of which bucket indices they represent. but to achieve
|
||||
// that, their offset needs to change if "disrupted" by spans changing ahead of
|
||||
// them
|
||||
//
|
||||
// \/ this one is "unchanged"
|
||||
// spans : [ 0 , 3 ] [1,1] [ 1 , 4 ] [ 3 , 3 ]
|
||||
// bucket idx : [0] [1] [2] 3 [4] 5 [6] [7] [8] [9] 10 11 12 [13] [14] [15]
|
||||
// raw values 6 3 0 3 0 0 2 4 5 0 1
|
||||
// deltas 6 -3 -3 3 -3 0 2 2 1 -5 1
|
||||
// delta mods: / \ / \ / \
|
||||
//
|
||||
// Note for histograms with delta-encoded buckets: Whenever any new buckets are
|
||||
// introduced, the subsequent "old" bucket needs to readjust its delta to the
|
||||
// new base of 0. Thus, for the caller who wants to transform the set of
|
||||
// original deltas to a new set of deltas to match a new span layout that adds
|
||||
// buckets, we simply need to generate a list of inserts.
|
||||
//
|
||||
// Note: Within expandSpansForward we don't have to worry about the changes to the
|
||||
// spans themselves, thanks to the iterators we get to work with the more useful
|
||||
// bucket indices (which of course directly correspond to the buckets we have to
|
||||
// adjust).
|
||||
func expandIntSpansAndBuckets(a, b []histogram.Span, aBuckets, bBuckets []int64) (forward, backward []Insert, ok bool) {
|
||||
ai := newBucketIterator(a)
|
||||
bi := newBucketIterator(b)
|
||||
|
||||
var aInserts []Insert // To insert into buckets of a, to make up for missing buckets in b.
|
||||
var bInserts []Insert // To insert into buckets of b, to make up for missing empty(!) buckets in a.
|
||||
|
||||
// When aInter.num or bInter.num becomes > 0, this becomes a valid insert that should
|
||||
// be yielded when we finish a streak of new buckets.
|
||||
var aInter Insert
|
||||
var bInter Insert
|
||||
|
||||
aIdx, aOK := ai.Next()
|
||||
bIdx, bOK := bi.Next()
|
||||
|
||||
// Bucket count. Initialize the absolute count and index into the
|
||||
// positive/negative counts or deltas array. The bucket count is
|
||||
// used to detect counter reset as well as unused buckets in a.
|
||||
var (
|
||||
aCount int64
|
||||
bCount int64
|
||||
aCountIdx int
|
||||
bCountIdx int
|
||||
)
|
||||
if aOK {
|
||||
aCount = aBuckets[aCountIdx]
|
||||
}
|
||||
if bOK {
|
||||
bCount = bBuckets[bCountIdx]
|
||||
}
|
||||
|
||||
loop:
|
||||
for {
|
||||
switch {
|
||||
case aOK && bOK:
|
||||
switch {
|
||||
case aIdx == bIdx: // Both have an identical bucket index.
|
||||
// Bucket count. Check bucket for reset from a to b.
|
||||
if aCount > bCount {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Finish WIP insert for a and reset.
|
||||
if aInter.num > 0 {
|
||||
aInserts = append(aInserts, aInter)
|
||||
aInter.num = 0
|
||||
}
|
||||
|
||||
// Finish WIP insert for b and reset.
|
||||
if bInter.num > 0 {
|
||||
bInserts = append(bInserts, bInter)
|
||||
bInter.num = 0
|
||||
}
|
||||
|
||||
aIdx, aOK = ai.Next()
|
||||
bIdx, bOK = bi.Next()
|
||||
aInter.pos++ // Advance potential insert position.
|
||||
aCountIdx++ // Advance absolute bucket count index for a.
|
||||
if aOK {
|
||||
aCount += aBuckets[aCountIdx]
|
||||
}
|
||||
bInter.pos++ // Advance potential insert position.
|
||||
bCountIdx++ // Advance absolute bucket count index for b.
|
||||
if bOK {
|
||||
bCount += bBuckets[bCountIdx]
|
||||
}
|
||||
|
||||
continue
|
||||
case aIdx < bIdx: // b misses a bucket index that is in a.
|
||||
// This is ok if the count in a is 0, in which case we make a note to
|
||||
// fill in the bucket in b and advance a.
|
||||
if aCount == 0 {
|
||||
bInter.num++ // Mark that we need to insert a bucket in b.
|
||||
// Advance a
|
||||
if aInter.num > 0 {
|
||||
aInserts = append(aInserts, aInter)
|
||||
aInter.num = 0
|
||||
}
|
||||
aIdx, aOK = ai.Next()
|
||||
aInter.pos++
|
||||
aCountIdx++
|
||||
if aOK {
|
||||
aCount += aBuckets[aCountIdx]
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Otherwise we are missing a bucket that was in use in a, which is a reset.
|
||||
return nil, nil, false
|
||||
case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare.
|
||||
aInter.num++
|
||||
// Advance b
|
||||
if bInter.num > 0 {
|
||||
bInserts = append(bInserts, bInter)
|
||||
bInter.num = 0
|
||||
}
|
||||
bIdx, bOK = bi.Next()
|
||||
bInter.pos++
|
||||
bCountIdx++
|
||||
if bOK {
|
||||
bCount += bBuckets[bCountIdx]
|
||||
}
|
||||
}
|
||||
case aOK && !bOK: // b misses a value that is in a.
|
||||
// This is ok if the count in a is 0, in which case we make a note to
|
||||
// fill in the bucket in b and advance a.
|
||||
if aCount == 0 {
|
||||
bInter.num++
|
||||
// Advance a
|
||||
if aInter.num > 0 {
|
||||
aInserts = append(aInserts, aInter)
|
||||
aInter.num = 0
|
||||
}
|
||||
aIdx, aOK = ai.Next()
|
||||
aInter.pos++ // Advance potential insert position.
|
||||
// Update absolute bucket counts for a.
|
||||
aCountIdx++
|
||||
if aOK {
|
||||
aCount += aBuckets[aCountIdx]
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Otherwise we are missing a bucket that was in use in a, which is a reset.
|
||||
return nil, nil, false
|
||||
case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
|
||||
aInter.num++
|
||||
// Advance b
|
||||
if bInter.num > 0 {
|
||||
bInserts = append(bInserts, bInter)
|
||||
bInter.num = 0
|
||||
}
|
||||
bIdx, bOK = bi.Next()
|
||||
bInter.pos++ // Advance potential insert position.
|
||||
// Update absolute bucket counts for b.
|
||||
bCountIdx++
|
||||
if bOK {
|
||||
bCount += bBuckets[bCountIdx]
|
||||
}
|
||||
default: // Both iterators ran out. We're done.
|
||||
if aInter.num > 0 {
|
||||
aInserts = append(aInserts, aInter)
|
||||
}
|
||||
if bInter.num > 0 {
|
||||
bInserts = append(bInserts, bInter)
|
||||
}
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
return aInserts, bInserts, true
|
||||
}
|
||||
|
||||
// appendableGauge returns whether the chunk can be appended to, and if so
|
||||
// whether:
|
||||
// 1. Any recoding needs to happen to the chunk using the provided inserts
|
||||
// (in case of any new buckets, positive or negative range, respectively).
|
||||
// 1. Any recoding needs to happen to the chunk using the provided forward
|
||||
// inserts (in case of any new buckets, positive or negative range,
|
||||
// respectively).
|
||||
// 2. Any recoding needs to happen for the histogram being appended, using the
|
||||
// backward inserts (in case of any missing buckets, positive or negative
|
||||
// range, respectively).
|
||||
|
@ -369,76 +565,6 @@ func (a *HistogramAppender) appendableGauge(h *histogram.Histogram) (
|
|||
return
|
||||
}
|
||||
|
||||
// counterResetInAnyBucket returns true if there was a counter reset for any
|
||||
// bucket. This should be called only when the bucket layout is the same or new
|
||||
// buckets were added. It does not handle the case of buckets missing.
|
||||
func counterResetInAnyBucket(oldBuckets, newBuckets []int64, oldSpans, newSpans []histogram.Span) bool {
|
||||
if len(oldSpans) == 0 || len(oldBuckets) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
oldSpanSliceIdx, newSpanSliceIdx int = -1, -1 // Index for the span slices. Starts at -1 to indicate that the first non empty span is not yet found.
|
||||
oldInsideSpanIdx, newInsideSpanIdx uint32 // Index inside a span.
|
||||
oldIdx, newIdx int32 // Index inside a bucket slice.
|
||||
oldBucketSliceIdx, newBucketSliceIdx int // Index inside bucket slice.
|
||||
)
|
||||
|
||||
// Find first non empty spans.
|
||||
oldSpanSliceIdx, oldIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldIdx, oldSpans)
|
||||
newSpanSliceIdx, newIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newIdx, newSpans)
|
||||
oldVal, newVal := oldBuckets[0], newBuckets[0]
|
||||
|
||||
// Since we assume that new spans won't have missing buckets, there will never be a case
|
||||
// where the old index will not find a matching new index.
|
||||
for {
|
||||
if oldIdx == newIdx {
|
||||
if newVal < oldVal {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if oldIdx <= newIdx {
|
||||
// Moving ahead old bucket and span by 1 index.
|
||||
if oldInsideSpanIdx+1 >= oldSpans[oldSpanSliceIdx].Length {
|
||||
// Current span is over.
|
||||
oldSpanSliceIdx, oldIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldIdx, oldSpans)
|
||||
oldInsideSpanIdx = 0
|
||||
if oldSpanSliceIdx >= len(oldSpans) {
|
||||
// All old spans are over.
|
||||
break
|
||||
}
|
||||
} else {
|
||||
oldInsideSpanIdx++
|
||||
oldIdx++
|
||||
}
|
||||
oldBucketSliceIdx++
|
||||
oldVal += oldBuckets[oldBucketSliceIdx]
|
||||
}
|
||||
|
||||
if oldIdx > newIdx {
|
||||
// Moving ahead new bucket and span by 1 index.
|
||||
if newInsideSpanIdx+1 >= newSpans[newSpanSliceIdx].Length {
|
||||
// Current span is over.
|
||||
newSpanSliceIdx, newIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newIdx, newSpans)
|
||||
newInsideSpanIdx = 0
|
||||
if newSpanSliceIdx >= len(newSpans) {
|
||||
// All new spans are over.
|
||||
// This should not happen, old spans above should catch this first.
|
||||
panic("new spans over before old spans in counterReset")
|
||||
}
|
||||
} else {
|
||||
newInsideSpanIdx++
|
||||
newIdx++
|
||||
}
|
||||
newBucketSliceIdx++
|
||||
newVal += newBuckets[newBucketSliceIdx]
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// appendHistogram appends a histogram to the chunk. The caller must ensure that
|
||||
// the histogram is properly structured, e.g. the number of buckets used
|
||||
// corresponds to the number conveyed by the span structures. First call
|
||||
|
@ -649,7 +775,7 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h
|
|||
a.setCounterResetHeader(CounterReset)
|
||||
case prev != nil:
|
||||
// This is a new chunk, but continued from a previous one. We need to calculate the reset header unless already set.
|
||||
_, _, _, counterReset := prev.appendable(h)
|
||||
_, _, _, _, _, counterReset := prev.appendable(h)
|
||||
if counterReset {
|
||||
a.setCounterResetHeader(CounterReset)
|
||||
} else {
|
||||
|
@ -661,7 +787,7 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h
|
|||
|
||||
// Adding counter-like histogram.
|
||||
if h.CounterResetHint != histogram.GaugeType {
|
||||
pForwardInserts, nForwardInserts, okToAppend, counterReset := a.appendable(h)
|
||||
pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, okToAppend, counterReset := a.appendable(h)
|
||||
if !okToAppend || counterReset {
|
||||
if appendOnly {
|
||||
if counterReset {
|
||||
|
@ -692,6 +818,13 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h
|
|||
app.(*HistogramAppender).appendHistogram(t, h)
|
||||
return chk, true, app, nil
|
||||
}
|
||||
if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 {
|
||||
// The histogram needs to be expanded to have the extra empty buckets
|
||||
// of the chunk.
|
||||
h.PositiveSpans = a.pSpans
|
||||
h.NegativeSpans = a.nSpans
|
||||
a.recodeHistogram(h, pBackwardInserts, nBackwardInserts)
|
||||
}
|
||||
a.appendHistogram(t, h)
|
||||
return nil, false, a, nil
|
||||
}
|
||||
|
|
|
@ -280,6 +280,9 @@ type Insert struct {
|
|||
num int
|
||||
}
|
||||
|
||||
// Deprecated: expandSpansForward, use expandIntSpansAndBuckets or
|
||||
// expandFloatSpansAndBuckets instead.
|
||||
// expandSpansForward is left here for reference.
|
||||
// expandSpansForward returns the inserts to expand the bucket spans 'a' so that
|
||||
// they match the spans in 'b'. 'b' must cover the same or more buckets than
|
||||
// 'a', otherwise the function will return false.
|
||||
|
@ -574,15 +577,3 @@ func counterResetHint(crh CounterResetHeader, numRead uint16) histogram.CounterR
|
|||
return histogram.UnknownCounterReset
|
||||
}
|
||||
}
|
||||
|
||||
// Handle pathological case of empty span when advancing span idx.
|
||||
// Call it with idx==-1 to find the first non empty span.
|
||||
func nextNonEmptySpanSliceIdx(idx int, bucketIdx int32, spans []histogram.Span) (newIdx int, newBucketIdx int32) {
|
||||
for idx++; idx < len(spans); idx++ {
|
||||
if spans[idx].Length > 0 {
|
||||
return idx, bucketIdx + spans[idx].Offset + 1
|
||||
}
|
||||
bucketIdx += spans[idx].Offset
|
||||
}
|
||||
return idx, 0
|
||||
}
|
||||
|
|
|
@ -256,9 +256,11 @@ func TestHistogramChunkBucketChanges(t *testing.T) {
|
|||
h2.NegativeBuckets = []int64{2, -1} // 2 1 (total 3)
|
||||
// This is how span changes will be handled.
|
||||
hApp, _ := app.(*HistogramAppender)
|
||||
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
|
||||
require.NotEmpty(t, posInterjections)
|
||||
require.NotEmpty(t, negInterjections)
|
||||
require.Empty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.True(t, ok) // Only new buckets came in.
|
||||
require.False(t, cr)
|
||||
c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans)
|
||||
|
@ -347,7 +349,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
|||
c, hApp, ts, h1 := setup(eh)
|
||||
h2 := h1.Copy()
|
||||
h2.Schema++
|
||||
_, _, ok, _ := hApp.appendable(h2)
|
||||
_, _, _, _, ok, _ := hApp.appendable(h2)
|
||||
require.False(t, ok)
|
||||
|
||||
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||
|
@ -357,7 +359,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
|||
c, hApp, ts, h1 := setup(eh)
|
||||
h2 := h1.Copy()
|
||||
h2.ZeroThreshold += 0.1
|
||||
_, _, ok, _ := hApp.appendable(h2)
|
||||
_, _, _, _, ok, _ := hApp.appendable(h2)
|
||||
require.False(t, ok)
|
||||
|
||||
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||
|
@ -380,9 +382,11 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
|||
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
|
||||
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
|
||||
|
||||
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
|
||||
require.NotEmpty(t, posInterjections)
|
||||
require.Empty(t, negInterjections)
|
||||
require.Empty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.True(t, ok) // Only new buckets came in.
|
||||
require.False(t, cr)
|
||||
|
||||
|
@ -401,24 +405,57 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
|||
h2.Sum = 21
|
||||
h2.PositiveBuckets = []int64{6, -3, -1, 2, 1, -4} // counts: 6, 3, 2, 4, 5, 1 (total 21)
|
||||
|
||||
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
|
||||
require.Empty(t, posInterjections)
|
||||
require.Empty(t, negInterjections)
|
||||
require.Empty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.False(t, ok) // Need to cut a new chunk.
|
||||
require.True(t, cr)
|
||||
|
||||
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
|
||||
}
|
||||
|
||||
{ // New histogram that has buckets missing but the buckets missing were empty.
|
||||
emptyBucketH := eh.Copy()
|
||||
emptyBucketH.PositiveBuckets = []int64{6, -6, 1, 1, -2, 1, 1} // counts: 6, 0, 1, 2, 0, 1, 2 (total 12)
|
||||
c, hApp, ts, h1 := setup(emptyBucketH)
|
||||
h2 := h1.Copy()
|
||||
h2.PositiveSpans = []histogram.Span{ // Missing buckets at offset 1 and 9.
|
||||
{Offset: 0, Length: 1},
|
||||
{Offset: 3, Length: 1},
|
||||
{Offset: 3, Length: 1},
|
||||
{Offset: 4, Length: 1},
|
||||
{Offset: 1, Length: 1},
|
||||
}
|
||||
h2.PositiveBuckets = []int64{7, -5, 1, 0, 1} // counts: 7, 2, 3, 3, 4 (total 18)
|
||||
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
|
||||
require.Empty(t, posInterjections)
|
||||
require.Empty(t, negInterjections)
|
||||
require.NotEmpty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.True(t, ok)
|
||||
require.False(t, cr)
|
||||
|
||||
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||
|
||||
// Check that h2 was recoded.
|
||||
require.Equal(t, []int64{7, -7, 2, 1, -3, 3, 1}, h2.PositiveBuckets) // counts: 7, 0, 2, 3 , 0, 3, 4 (total 18)
|
||||
require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans)
|
||||
}
|
||||
|
||||
{ // New histogram that has a counter reset while buckets are same.
|
||||
c, hApp, ts, h1 := setup(eh)
|
||||
h2 := h1.Copy()
|
||||
h2.Sum = 23
|
||||
h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // counts: 6, 2, 3, 2, 4, 5, 1 (total 23)
|
||||
|
||||
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
|
||||
require.Empty(t, posInterjections)
|
||||
require.Empty(t, negInterjections)
|
||||
require.Empty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.False(t, ok) // Need to cut a new chunk.
|
||||
require.True(t, cr)
|
||||
|
||||
|
@ -440,9 +477,11 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
|||
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
|
||||
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 0} // 7 5 1 3 1 0 2 5 5 0 0 (total 29)
|
||||
|
||||
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
|
||||
require.Empty(t, posInterjections)
|
||||
require.Empty(t, negInterjections)
|
||||
require.Empty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.False(t, ok) // Need to cut a new chunk.
|
||||
require.True(t, cr)
|
||||
|
||||
|
@ -470,9 +509,11 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
|||
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
|
||||
h2.PositiveBuckets = []int64{1, 1, 3, -2, 0, -1, 2, 1, -4} // counts: 1, 2, 5, 3, 3, 2, 4, 5, 1 (total 26)
|
||||
|
||||
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
|
||||
require.Empty(t, posInterjections)
|
||||
require.Empty(t, negInterjections)
|
||||
require.Empty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.False(t, ok) // Need to cut a new chunk.
|
||||
require.True(t, cr)
|
||||
|
||||
|
@ -549,10 +590,44 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
|||
require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader())
|
||||
}
|
||||
|
||||
{
|
||||
// Start a new chunk with a histogram that has an empty bucket.
|
||||
// Add a histogram that has the same bucket missing.
|
||||
// This should be appendable and can happen if we are merging from chunks
|
||||
// where the first sample came from a recoded chunk that added the
|
||||
// empty bucket.
|
||||
h1 := eh.Copy()
|
||||
// Add a bucket that is empty -10 offsets from the first bucket.
|
||||
h1.PositiveSpans = make([]histogram.Span, len(eh.PositiveSpans)+1)
|
||||
h1.PositiveSpans[0] = histogram.Span{Offset: eh.PositiveSpans[0].Offset - 10, Length: 1}
|
||||
h1.PositiveSpans[1] = histogram.Span{Offset: eh.PositiveSpans[0].Offset + 9, Length: eh.PositiveSpans[0].Length}
|
||||
for i, v := range eh.PositiveSpans[1:] {
|
||||
h1.PositiveSpans[i+2] = v
|
||||
}
|
||||
h1.PositiveBuckets = make([]int64, len(eh.PositiveBuckets)+1)
|
||||
h1.PositiveBuckets[0] = 0
|
||||
for i, v := range eh.PositiveBuckets {
|
||||
h1.PositiveBuckets[i+1] = v
|
||||
}
|
||||
|
||||
c, hApp, ts, _ := setup(h1)
|
||||
h2 := eh.Copy()
|
||||
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
|
||||
require.Empty(t, posInterjections)
|
||||
require.Empty(t, negInterjections)
|
||||
require.NotEmpty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.True(t, ok)
|
||||
require.False(t, cr)
|
||||
|
||||
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||
}
|
||||
|
||||
{ // Custom buckets, no change.
|
||||
c, hApp, ts, h1 := setup(cbh)
|
||||
h2 := h1.Copy()
|
||||
_, _, ok, _ := hApp.appendable(h2)
|
||||
_, _, _, _, ok, _ := hApp.appendable(h2)
|
||||
require.True(t, ok)
|
||||
|
||||
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||
|
@ -563,7 +638,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
|||
h2 := h1.Copy()
|
||||
h2.Count++
|
||||
h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -3}
|
||||
_, _, ok, _ := hApp.appendable(h2)
|
||||
_, _, _, _, ok, _ := hApp.appendable(h2)
|
||||
require.True(t, ok)
|
||||
|
||||
assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset)
|
||||
|
@ -574,7 +649,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
|||
h2 := h1.Copy()
|
||||
h2.Count--
|
||||
h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -5}
|
||||
_, _, ok, _ := hApp.appendable(h2)
|
||||
_, _, _, _, ok, _ := hApp.appendable(h2)
|
||||
require.False(t, ok)
|
||||
|
||||
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
|
||||
|
@ -584,7 +659,7 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
|||
c, hApp, ts, h1 := setup(cbh)
|
||||
h2 := h1.Copy()
|
||||
h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}
|
||||
_, _, ok, _ := hApp.appendable(h2)
|
||||
_, _, _, _, ok, _ := hApp.appendable(h2)
|
||||
require.False(t, ok)
|
||||
|
||||
assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset)
|
||||
|
@ -606,9 +681,11 @@ func TestHistogramChunkAppendable(t *testing.T) {
|
|||
// so the new histogram should have new counts >= these per-bucket counts, e.g.:
|
||||
h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30)
|
||||
|
||||
posInterjections, negInterjections, ok, cr := hApp.appendable(h2)
|
||||
posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2)
|
||||
require.NotEmpty(t, posInterjections)
|
||||
require.Empty(t, negInterjections)
|
||||
require.Empty(t, backwardPositiveInserts)
|
||||
require.Empty(t, backwardNegativeInserts)
|
||||
require.True(t, ok) // Only new buckets came in.
|
||||
require.False(t, cr)
|
||||
|
||||
|
@ -875,9 +952,11 @@ func TestHistogramChunkAppendableWithEmptySpan(t *testing.T) {
|
|||
require.Equal(t, 1, c.NumSamples())
|
||||
hApp, _ := app.(*HistogramAppender)
|
||||
|
||||
pI, nI, okToAppend, counterReset := hApp.appendable(tc.h2)
|
||||
pI, nI, bpI, bnI, okToAppend, counterReset := hApp.appendable(tc.h2)
|
||||
require.Empty(t, pI)
|
||||
require.Empty(t, nI)
|
||||
require.Empty(t, bpI)
|
||||
require.Empty(t, bnI)
|
||||
require.True(t, okToAppend)
|
||||
require.False(t, counterReset)
|
||||
})
|
||||
|
@ -1368,3 +1447,50 @@ func TestHistogramAppendOnlyErrors(t *testing.T) {
|
|||
require.EqualError(t, err, "histogram counter reset")
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkAppendable(b *testing.B) {
|
||||
// Create a histogram with a bunch of spans and buckets.
|
||||
const (
|
||||
numSpans = 1000
|
||||
spanLength = 10
|
||||
)
|
||||
h := &histogram.Histogram{
|
||||
Schema: 0,
|
||||
Count: 100,
|
||||
Sum: 1000,
|
||||
ZeroThreshold: 0.001,
|
||||
ZeroCount: 5,
|
||||
}
|
||||
for i := 0; i < numSpans; i++ {
|
||||
h.PositiveSpans = append(h.PositiveSpans, histogram.Span{Offset: 5, Length: spanLength})
|
||||
h.NegativeSpans = append(h.NegativeSpans, histogram.Span{Offset: 5, Length: spanLength})
|
||||
for j := 0; j < spanLength; j++ {
|
||||
h.PositiveBuckets = append(h.PositiveBuckets, int64(j))
|
||||
h.NegativeBuckets = append(h.NegativeBuckets, int64(j))
|
||||
}
|
||||
}
|
||||
|
||||
c := Chunk(NewHistogramChunk())
|
||||
|
||||
// Create fresh appender and add the first histogram.
|
||||
app, err := c.Appender()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
_, _, _, err = app.AppendHistogram(nil, 1, h, true)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
hApp := app.(*HistogramAppender)
|
||||
|
||||
isAppendable := true
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _, _, _, ok, _ := hApp.appendable(h)
|
||||
isAppendable = isAppendable && ok
|
||||
}
|
||||
if !isAppendable {
|
||||
b.Fail()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -63,7 +63,10 @@ func TestMain(m *testing.M) {
|
|||
flag.Parse()
|
||||
defaultIsolationDisabled = !isolationEnabled
|
||||
|
||||
goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func1"), goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func2"))
|
||||
goleak.VerifyTestMain(m,
|
||||
goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func1"),
|
||||
goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func2"),
|
||||
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"))
|
||||
}
|
||||
|
||||
func openTestDB(t testing.TB, opts *Options, rngs []int64) (db *DB) {
|
||||
|
|
40
tsdb/head.go
40
tsdb/head.go
|
@ -178,6 +178,7 @@ type HeadOptions struct {
|
|||
WALReplayConcurrency int
|
||||
|
||||
// EnableSharding enables ShardedPostings() support in the Head.
|
||||
// EnableSharding is temporarily disabled during Init().
|
||||
EnableSharding bool
|
||||
}
|
||||
|
||||
|
@ -609,7 +610,7 @@ const cardinalityCacheExpirationTime = time.Duration(30) * time.Second
|
|||
// Init loads data from the write ahead log and prepares the head for writes.
|
||||
// It should be called before using an appender so that it
|
||||
// limits the ingested samples to the head min valid time.
|
||||
func (h *Head) Init(minValidTime int64) error {
|
||||
func (h *Head) Init(minValidTime int64) (err error) {
|
||||
h.minValidTime.Store(minValidTime)
|
||||
defer func() {
|
||||
h.postings.EnsureOrder(h.opts.WALReplayConcurrency)
|
||||
|
@ -623,6 +624,24 @@ func (h *Head) Init(minValidTime int64) error {
|
|||
}
|
||||
}()
|
||||
|
||||
// If sharding is enabled, disable it while initializing, and calculate the shards later.
|
||||
// We're going to use that field for other purposes during WAL replay,
|
||||
// so we don't want to waste time on calculating the shard that we're going to lose anyway.
|
||||
if h.opts.EnableSharding {
|
||||
h.opts.EnableSharding = false
|
||||
defer func() {
|
||||
h.opts.EnableSharding = true
|
||||
if err == nil {
|
||||
// No locking is needed here as nobody should be writing while we're in Init.
|
||||
for _, stripe := range h.series.series {
|
||||
for _, s := range stripe {
|
||||
s.shardHashOrMemoryMappedMaxTime = labels.StableHash(s.lset)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
level.Info(h.logger).Log("msg", "Replaying on-disk memory mappable chunks if any")
|
||||
start := time.Now()
|
||||
|
||||
|
@ -683,7 +702,6 @@ func (h *Head) Init(minValidTime int64) error {
|
|||
mmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk
|
||||
oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk
|
||||
lastMmapRef chunks.ChunkDiskMapperRef
|
||||
err error
|
||||
|
||||
mmapChunkReplayDuration time.Duration
|
||||
)
|
||||
|
@ -2068,9 +2086,11 @@ type memSeries struct {
|
|||
ref chunks.HeadSeriesRef
|
||||
meta *metadata.Metadata
|
||||
|
||||
// Series labels hash to use for sharding purposes. The value is always 0 when sharding has not
|
||||
// been explicitly enabled in TSDB.
|
||||
shardHash uint64
|
||||
// Series labels hash to use for sharding purposes.
|
||||
// The value is always 0 when sharding has not been explicitly enabled in TSDB.
|
||||
// While the WAL replay the value stored here is the max time of any mmapped chunk,
|
||||
// and the shard hash is re-calculated after WAL replay is complete.
|
||||
shardHashOrMemoryMappedMaxTime uint64
|
||||
|
||||
// Everything after here should only be accessed with the lock held.
|
||||
sync.Mutex
|
||||
|
@ -2095,8 +2115,6 @@ type memSeries struct {
|
|||
|
||||
ooo *memSeriesOOOFields
|
||||
|
||||
mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay.
|
||||
|
||||
nextAt int64 // Timestamp at which to cut the next chunk.
|
||||
histogramChunkHasComputedEndTime bool // True if nextAt has been predicted for the current histograms chunk; false otherwise.
|
||||
pendingCommit bool // Whether there are samples waiting to be committed to this series.
|
||||
|
@ -2130,7 +2148,7 @@ func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, shardHash uint64,
|
|||
lset: lset,
|
||||
ref: id,
|
||||
nextAt: math.MinInt64,
|
||||
shardHash: shardHash,
|
||||
shardHashOrMemoryMappedMaxTime: shardHash,
|
||||
}
|
||||
if !isolationDisabled {
|
||||
s.txs = newTxRing(0)
|
||||
|
@ -2218,6 +2236,12 @@ func (s *memSeries) truncateChunksBefore(mint int64, minOOOMmapRef chunks.ChunkD
|
|||
return removedInOrder + removedOOO
|
||||
}
|
||||
|
||||
// shardHash returns the shard hash of the series, only available after WAL replay.
|
||||
func (s *memSeries) shardHash() uint64 { return s.shardHashOrMemoryMappedMaxTime }
|
||||
|
||||
// mmMaxTime returns the max time of any mmapped chunk in the series, only available during WAL replay.
|
||||
func (s *memSeries) mmMaxTime() int64 { return int64(s.shardHashOrMemoryMappedMaxTime) }
|
||||
|
||||
// cleanupAppendIDsBelow cleans up older appendIDs. Has to be called after
|
||||
// acquiring lock.
|
||||
func (s *memSeries) cleanupAppendIDsBelow(bound uint64) {
|
||||
|
|
|
@ -170,7 +170,7 @@ func (h *headIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCou
|
|||
}
|
||||
|
||||
// Check if the series belong to the shard.
|
||||
if s.shardHash%shardCount != shardIndex {
|
||||
if s.shardHash()%shardCount != shardIndex {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime/pprof"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -89,6 +90,43 @@ func newTestHeadWithOptions(t testing.TB, compressWAL wlog.CompressionType, opts
|
|||
return h, wal
|
||||
}
|
||||
|
||||
// BenchmarkLoadRealWLs will be skipped unless the BENCHMARK_LOAD_REAL_WLS_DIR environment variable is set.
|
||||
// BENCHMARK_LOAD_REAL_WLS_DIR should be the folder where `wal` and `chunks_head` are located.
|
||||
// Optionally, BENCHMARK_LOAD_REAL_WLS_PROFILE can be set to a file path to write a CPU profile.
|
||||
func BenchmarkLoadRealWLs(b *testing.B) {
|
||||
dir := os.Getenv("BENCHMARK_LOAD_REAL_WLS_DIR")
|
||||
if dir == "" {
|
||||
b.Skipped()
|
||||
}
|
||||
|
||||
profileFile := os.Getenv("BENCHMARK_LOAD_REAL_WLS_PROFILE")
|
||||
if profileFile != "" {
|
||||
b.Logf("Will profile in %s", profileFile)
|
||||
f, err := os.Create(profileFile)
|
||||
require.NoError(b, err)
|
||||
b.Cleanup(func() { f.Close() })
|
||||
require.NoError(b, pprof.StartCPUProfile(f))
|
||||
b.Cleanup(pprof.StopCPUProfile)
|
||||
}
|
||||
|
||||
wal, err := wlog.New(nil, nil, filepath.Join(dir, "wal"), wlog.CompressionNone)
|
||||
require.NoError(b, err)
|
||||
b.Cleanup(func() { wal.Close() })
|
||||
|
||||
wbl, err := wlog.New(nil, nil, filepath.Join(dir, "wbl"), wlog.CompressionNone)
|
||||
require.NoError(b, err)
|
||||
b.Cleanup(func() { wbl.Close() })
|
||||
|
||||
// Load the WAL.
|
||||
for i := 0; i < b.N; i++ {
|
||||
opts := DefaultHeadOptions()
|
||||
opts.ChunkDirRoot = dir
|
||||
h, err := NewHead(nil, nil, wal, wbl, opts, nil)
|
||||
require.NoError(b, err)
|
||||
h.Init(0)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCreateSeries(b *testing.B) {
|
||||
series := genSeries(b.N, 10, 0, 0)
|
||||
h, _ := newTestHead(b, 10000, wlog.CompressionNone, false)
|
||||
|
|
|
@ -435,6 +435,8 @@ Outer:
|
|||
return nil
|
||||
}
|
||||
|
||||
func minInt64() int64 { return math.MinInt64 }
|
||||
|
||||
// resetSeriesWithMMappedChunks is only used during the WAL replay.
|
||||
func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*mmappedChunk, walSeriesRef chunks.HeadSeriesRef) (overlapped bool) {
|
||||
if mSeries.ref != walSeriesRef {
|
||||
|
@ -481,10 +483,11 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m
|
|||
}
|
||||
// Cache the last mmapped chunk time, so we can skip calling append() for samples it will reject.
|
||||
if len(mmc) == 0 {
|
||||
mSeries.mmMaxTime = math.MinInt64
|
||||
mSeries.shardHashOrMemoryMappedMaxTime = uint64(minInt64())
|
||||
} else {
|
||||
mSeries.mmMaxTime = mmc[len(mmc)-1].maxTime
|
||||
h.updateMinMaxTime(mmc[0].minTime, mSeries.mmMaxTime)
|
||||
mmMaxTime := mmc[len(mmc)-1].maxTime
|
||||
mSeries.shardHashOrMemoryMappedMaxTime = uint64(mmMaxTime)
|
||||
h.updateMinMaxTime(mmc[0].minTime, mmMaxTime)
|
||||
}
|
||||
if len(oooMmc) != 0 {
|
||||
// Mint and maxt can be in any chunk, they are not sorted.
|
||||
|
@ -585,7 +588,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
|
|||
unknownRefs++
|
||||
continue
|
||||
}
|
||||
if s.T <= ms.mmMaxTime {
|
||||
if s.T <= ms.mmMaxTime() {
|
||||
continue
|
||||
}
|
||||
if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated {
|
||||
|
@ -614,7 +617,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
|
|||
unknownHistogramRefs++
|
||||
continue
|
||||
}
|
||||
if s.t <= ms.mmMaxTime {
|
||||
if s.t <= ms.mmMaxTime() {
|
||||
continue
|
||||
}
|
||||
var chunkCreated bool
|
||||
|
|
|
@ -30,12 +30,10 @@ func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
|
|||
return r
|
||||
}
|
||||
|
||||
func GenerateTestHistogramsWithUnknownResetHint(n int) []*histogram.Histogram {
|
||||
hs := GenerateTestHistograms(n)
|
||||
for i := range hs {
|
||||
hs[i].CounterResetHint = histogram.UnknownCounterReset
|
||||
}
|
||||
return hs
|
||||
func GenerateTestHistogramWithHint(n int, hint histogram.CounterResetHint) *histogram.Histogram {
|
||||
h := GenerateTestHistogram(n)
|
||||
h.CounterResetHint = hint
|
||||
return h
|
||||
}
|
||||
|
||||
// GenerateTestHistogram but it is up to the user to set any known counter reset hint.
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -265,9 +264,9 @@ func (w *Watcher) loop() {
|
|||
// Run the watcher, which will tail the WAL until the quit channel is closed
|
||||
// or an error case is hit.
|
||||
func (w *Watcher) Run() error {
|
||||
_, lastSegment, err := w.firstAndLast()
|
||||
_, lastSegment, err := Segments(w.walDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("wal.Segments: %w", err)
|
||||
return fmt.Errorf("Segments: %w", err)
|
||||
}
|
||||
|
||||
// We want to ensure this is false across iterations since
|
||||
|
@ -318,57 +317,20 @@ func (w *Watcher) Run() error {
|
|||
|
||||
// findSegmentForIndex finds the first segment greater than or equal to index.
|
||||
func (w *Watcher) findSegmentForIndex(index int) (int, error) {
|
||||
refs, err := w.segments(w.walDir)
|
||||
refs, err := listSegments(w.walDir)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
for _, r := range refs {
|
||||
if r >= index {
|
||||
return r, nil
|
||||
if r.index >= index {
|
||||
return r.index, nil
|
||||
}
|
||||
}
|
||||
|
||||
return -1, errors.New("failed to find segment for index")
|
||||
}
|
||||
|
||||
func (w *Watcher) firstAndLast() (int, int, error) {
|
||||
refs, err := w.segments(w.walDir)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
|
||||
if len(refs) == 0 {
|
||||
return -1, -1, nil
|
||||
}
|
||||
return refs[0], refs[len(refs)-1], nil
|
||||
}
|
||||
|
||||
// Copied from tsdb/wlog/wlog.go so we do not have to open a WAL.
|
||||
// Plan is to move WAL watcher to TSDB and dedupe these implementations.
|
||||
func (w *Watcher) segments(dir string) ([]int, error) {
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var refs []int
|
||||
for _, f := range files {
|
||||
k, err := strconv.Atoi(f.Name())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
refs = append(refs, k)
|
||||
}
|
||||
slices.Sort(refs)
|
||||
for i := 0; i < len(refs)-1; i++ {
|
||||
if refs[i]+1 != refs[i+1] {
|
||||
return nil, errors.New("segments are not sequential")
|
||||
}
|
||||
}
|
||||
return refs, nil
|
||||
}
|
||||
|
||||
func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, size int64) error {
|
||||
err := w.readSegment(r, segmentNum, tail)
|
||||
|
||||
|
@ -447,35 +409,17 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
|
|||
// Currently doing a garbage collect, try again later.
|
||||
}
|
||||
|
||||
// if a newer segment is produced, read the current one until the end and move on.
|
||||
case <-segmentTicker.C:
|
||||
_, last, err := w.firstAndLast()
|
||||
_, last, err := Segments(w.walDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("segments: %w", err)
|
||||
return fmt.Errorf("Segments: %w", err)
|
||||
}
|
||||
|
||||
// Check if new segments exists.
|
||||
if last <= segmentNum {
|
||||
if last > segmentNum {
|
||||
return w.readAndHandleError(reader, segmentNum, tail, size)
|
||||
}
|
||||
continue
|
||||
}
|
||||
err = w.readSegment(reader, segmentNum, tail)
|
||||
|
||||
// Ignore errors reading to end of segment whilst replaying the WAL.
|
||||
if !tail {
|
||||
switch {
|
||||
case err != nil && !errors.Is(err, io.EOF):
|
||||
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err)
|
||||
case reader.Offset() != size:
|
||||
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise, when we are tailing, non-EOFs are fatal.
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
// we haven't read due to a notification in quite some time, try reading anyways
|
||||
case <-readTicker.C:
|
||||
|
@ -484,7 +428,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// still want to reset the ticker so we don't read too often
|
||||
// reset the ticker so we don't read too often
|
||||
readTicker.Reset(readTimeout)
|
||||
|
||||
case <-w.readNotify:
|
||||
|
@ -492,7 +436,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// still want to reset the ticker so we don't read too often
|
||||
// reset the ticker so we don't read too often
|
||||
readTicker.Reset(readTimeout)
|
||||
}
|
||||
}
|
||||
|
@ -731,17 +675,17 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err
|
|||
}
|
||||
|
||||
// Ensure we read the whole contents of every segment in the checkpoint dir.
|
||||
segs, err := w.segments(checkpointDir)
|
||||
segs, err := listSegments(checkpointDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to get segments checkpoint dir: %w", err)
|
||||
}
|
||||
for _, seg := range segs {
|
||||
size, err := getSegmentSize(checkpointDir, seg)
|
||||
for _, segRef := range segs {
|
||||
size, err := getSegmentSize(checkpointDir, segRef.index)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getSegmentSize: %w", err)
|
||||
}
|
||||
|
||||
sr, err := OpenReadSegment(SegmentName(checkpointDir, seg))
|
||||
sr, err := OpenReadSegment(SegmentName(checkpointDir, segRef.index))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to open segment: %w", err)
|
||||
}
|
||||
|
@ -753,7 +697,7 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err
|
|||
}
|
||||
|
||||
if r.Offset() != size {
|
||||
return fmt.Errorf("readCheckpoint wasn't able to read all data from the checkpoint %s/%08d, size: %d, totalRead: %d", checkpointDir, seg, size, r.Offset())
|
||||
return fmt.Errorf("readCheckpoint wasn't able to read all data from the checkpoint %s/%08d, size: %d, totalRead: %d", checkpointDir, segRef.index, size, r.Offset())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
81
util/junitxml/junitxml.go
Normal file
81
util/junitxml/junitxml.go
Normal file
|
@ -0,0 +1,81 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package junitxml
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
)
|
||||
|
||||
type JUnitXML struct {
|
||||
XMLName xml.Name `xml:"testsuites"`
|
||||
Suites []*TestSuite `xml:"testsuite"`
|
||||
}
|
||||
|
||||
type TestSuite struct {
|
||||
Name string `xml:"name,attr"`
|
||||
TestCount int `xml:"tests,attr"`
|
||||
FailureCount int `xml:"failures,attr"`
|
||||
ErrorCount int `xml:"errors,attr"`
|
||||
SkippedCount int `xml:"skipped,attr"`
|
||||
Timestamp string `xml:"timestamp,attr"`
|
||||
Cases []*TestCase `xml:"testcase"`
|
||||
}
|
||||
type TestCase struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Failures []string `xml:"failure,omitempty"`
|
||||
Error string `xml:"error,omitempty"`
|
||||
}
|
||||
|
||||
func (j *JUnitXML) WriteXML(h io.Writer) error {
|
||||
return xml.NewEncoder(h).Encode(j)
|
||||
}
|
||||
|
||||
func (j *JUnitXML) Suite(name string) *TestSuite {
|
||||
ts := &TestSuite{Name: name}
|
||||
j.Suites = append(j.Suites, ts)
|
||||
return ts
|
||||
}
|
||||
|
||||
func (ts *TestSuite) Fail(f string) {
|
||||
ts.FailureCount++
|
||||
curt := ts.lastCase()
|
||||
curt.Failures = append(curt.Failures, f)
|
||||
}
|
||||
|
||||
func (ts *TestSuite) lastCase() *TestCase {
|
||||
if len(ts.Cases) == 0 {
|
||||
ts.Case("unknown")
|
||||
}
|
||||
return ts.Cases[len(ts.Cases)-1]
|
||||
}
|
||||
|
||||
func (ts *TestSuite) Case(name string) *TestSuite {
|
||||
j := &TestCase{
|
||||
Name: name,
|
||||
}
|
||||
ts.Cases = append(ts.Cases, j)
|
||||
ts.TestCount++
|
||||
return ts
|
||||
}
|
||||
|
||||
func (ts *TestSuite) Settime(name string) {
|
||||
ts.Timestamp = name
|
||||
}
|
||||
|
||||
func (ts *TestSuite) Abort(e error) {
|
||||
ts.ErrorCount++
|
||||
curt := ts.lastCase()
|
||||
curt.Error = e.Error()
|
||||
}
|
66
util/junitxml/junitxml_test.go
Normal file
66
util/junitxml/junitxml_test.go
Normal file
|
@ -0,0 +1,66 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package junitxml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestJunitOutput(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
var test JUnitXML
|
||||
x := FakeTestSuites()
|
||||
if err := x.WriteXML(&buf); err != nil {
|
||||
t.Fatalf("Failed to encode XML: %v", err)
|
||||
}
|
||||
|
||||
output := buf.Bytes()
|
||||
|
||||
err := xml.Unmarshal(output, &test)
|
||||
if err != nil {
|
||||
t.Errorf("Unmarshal failed with error: %v", err)
|
||||
}
|
||||
var total int
|
||||
var cases int
|
||||
total = len(test.Suites)
|
||||
if total != 3 {
|
||||
t.Errorf("JUnit output had %d testsuite elements; expected 3\n", total)
|
||||
}
|
||||
for _, i := range test.Suites {
|
||||
cases += len(i.Cases)
|
||||
}
|
||||
|
||||
if cases != 7 {
|
||||
t.Errorf("JUnit output had %d testcase; expected 7\n", cases)
|
||||
}
|
||||
}
|
||||
|
||||
func FakeTestSuites() *JUnitXML {
|
||||
ju := &JUnitXML{}
|
||||
good := ju.Suite("all good")
|
||||
good.Case("alpha")
|
||||
good.Case("beta")
|
||||
good.Case("gamma")
|
||||
mixed := ju.Suite("mixed")
|
||||
mixed.Case("good")
|
||||
bad := mixed.Case("bad")
|
||||
bad.Fail("once")
|
||||
bad.Fail("twice")
|
||||
mixed.Case("ugly").Abort(errors.New("buggy"))
|
||||
ju.Suite("fast").Fail("fail early")
|
||||
return ju
|
||||
}
|
|
@ -295,7 +295,7 @@ func NewAPI(
|
|||
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs)
|
||||
}
|
||||
if otlpEnabled {
|
||||
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap)
|
||||
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap, configFunc)
|
||||
}
|
||||
|
||||
return a
|
||||
|
|
|
@ -359,6 +359,7 @@ var samplePrometheusCfg = config.Config{
|
|||
ScrapeConfigs: []*config.ScrapeConfig{},
|
||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{},
|
||||
RemoteReadConfigs: []*config.RemoteReadConfig{},
|
||||
OTLPConfig: config.OTLPConfig{},
|
||||
}
|
||||
|
||||
var sampleFlagMap = map[string]string{
|
||||
|
|
|
@ -37,10 +37,10 @@
|
|||
"@codemirror/language": "^6.10.2",
|
||||
"@codemirror/lint": "^6.8.1",
|
||||
"@codemirror/state": "^6.3.3",
|
||||
"@codemirror/view": "^6.28.3",
|
||||
"@codemirror/view": "^6.29.1",
|
||||
"@lezer/common": "^1.2.1",
|
||||
"@lezer/highlight": "^1.2.0",
|
||||
"@lezer/lr": "^1.4.1",
|
||||
"@lezer/lr": "^1.4.2",
|
||||
"isomorphic-fetch": "^3.0.0",
|
||||
"nock": "^13.5.4"
|
||||
},
|
||||
|
|
|
@ -775,7 +775,7 @@ describe('computeStartCompletePosition test', () => {
|
|||
it(value.title, () => {
|
||||
const state = createEditorState(value.expr);
|
||||
const node = syntaxTree(state).resolve(value.pos, -1);
|
||||
const result = computeStartCompletePosition(node, value.pos);
|
||||
const result = computeStartCompletePosition(state, node, value.pos);
|
||||
expect(value.expectedStart).toEqual(result);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -21,7 +21,6 @@ import {
|
|||
BinaryExpr,
|
||||
BoolModifier,
|
||||
Div,
|
||||
Duration,
|
||||
Eql,
|
||||
EqlRegex,
|
||||
EqlSingle,
|
||||
|
@ -40,7 +39,6 @@ import {
|
|||
Mul,
|
||||
Neq,
|
||||
NeqRegex,
|
||||
NumberLiteral,
|
||||
OffsetExpr,
|
||||
Or,
|
||||
Pow,
|
||||
|
@ -54,6 +52,8 @@ import {
|
|||
UnquotedLabelMatcher,
|
||||
QuotedLabelMatcher,
|
||||
QuotedLabelName,
|
||||
NumberDurationLiteralInDurationContext,
|
||||
NumberDurationLiteral,
|
||||
} from '@prometheus-io/lezer-promql';
|
||||
import { Completion, CompletionContext, CompletionResult } from '@codemirror/autocomplete';
|
||||
import { EditorState } from '@codemirror/state';
|
||||
|
@ -179,7 +179,8 @@ function computeStartCompleteLabelPositionInLabelMatcherOrInGroupingLabel(node:
|
|||
// It is an important step because the start position will be used by CMN to find the string and then to use it to filter the CompletionResult.
|
||||
// A wrong `start` position will lead to have the completion not working.
|
||||
// Note: this method is exported only for testing purpose.
|
||||
export function computeStartCompletePosition(node: SyntaxNode, pos: number): number {
|
||||
export function computeStartCompletePosition(state: EditorState, node: SyntaxNode, pos: number): number {
|
||||
const currentText = state.doc.slice(node.from, pos).toString();
|
||||
let start = node.from;
|
||||
if (node.type.id === LabelMatchers || node.type.id === GroupingLabels) {
|
||||
start = computeStartCompleteLabelPositionInLabelMatcherOrInGroupingLabel(node, pos);
|
||||
|
@ -191,11 +192,16 @@ export function computeStartCompletePosition(node: SyntaxNode, pos: number): num
|
|||
start++;
|
||||
} else if (
|
||||
node.type.id === OffsetExpr ||
|
||||
(node.type.id === NumberLiteral && node.parent?.type.id === 0 && node.parent.parent?.type.id === SubqueryExpr) ||
|
||||
// Since duration and number are equivalent, writing go[5] or go[5d] is syntactically accurate.
|
||||
// Before we were able to guess when we had to autocomplete the duration later based on the error node,
|
||||
// which is not possible anymore.
|
||||
// So we have to analyze the string about the current node to see if the duration unit is already present or not.
|
||||
(node.type.id === NumberDurationLiteralInDurationContext && !durationTerms.map((v) => v.label).includes(currentText[currentText.length - 1])) ||
|
||||
(node.type.id === NumberDurationLiteral && node.parent?.type.id === 0 && node.parent.parent?.type.id === SubqueryExpr) ||
|
||||
(node.type.id === 0 &&
|
||||
(node.parent?.type.id === OffsetExpr ||
|
||||
node.parent?.type.id === MatrixSelector ||
|
||||
(node.parent?.type.id === SubqueryExpr && containsAtLeastOneChild(node.parent, Duration))))
|
||||
(node.parent?.type.id === SubqueryExpr && containsAtLeastOneChild(node.parent, NumberDurationLiteralInDurationContext))))
|
||||
) {
|
||||
start = pos;
|
||||
}
|
||||
|
@ -230,7 +236,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
|
|||
result.push({ kind: ContextKind.Duration });
|
||||
break;
|
||||
}
|
||||
if (node.parent?.type.id === SubqueryExpr && containsAtLeastOneChild(node.parent, Duration)) {
|
||||
if (node.parent?.type.id === SubqueryExpr && containsAtLeastOneChild(node.parent, NumberDurationLiteralInDurationContext)) {
|
||||
// we are likely in the given situation:
|
||||
// `rate(foo[5d:5])`
|
||||
// so we should autocomplete a duration
|
||||
|
@ -434,7 +440,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
|
|||
result.push({ kind: ContextKind.MetricName, metricName: state.sliceDoc(node.from, node.to).slice(1, -1) });
|
||||
}
|
||||
break;
|
||||
case NumberLiteral:
|
||||
case NumberDurationLiteral:
|
||||
if (node.parent?.type.id === 0 && node.parent.parent?.type.id === SubqueryExpr) {
|
||||
// Here we are likely in this situation:
|
||||
// `go[5d:4]`
|
||||
|
@ -449,7 +455,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
|
|||
result.push({ kind: ContextKind.Number });
|
||||
}
|
||||
break;
|
||||
case Duration:
|
||||
case NumberDurationLiteralInDurationContext:
|
||||
case OffsetExpr:
|
||||
result.push({ kind: ContextKind.Duration });
|
||||
break;
|
||||
|
@ -591,7 +597,7 @@ export class HybridComplete implements CompleteStrategy {
|
|||
}
|
||||
}
|
||||
return asyncResult.then((result) => {
|
||||
return arrayToCompletionResult(result, computeStartCompletePosition(tree, pos), pos, completeSnippet, span);
|
||||
return arrayToCompletionResult(result, computeStartCompletePosition(state, tree, pos), pos, completeSnippet, span);
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ import {
|
|||
BinaryExpr,
|
||||
FunctionCall,
|
||||
MatrixSelector,
|
||||
NumberLiteral,
|
||||
NumberDurationLiteral,
|
||||
OffsetExpr,
|
||||
ParenExpr,
|
||||
StepInvariantExpr,
|
||||
|
@ -42,7 +42,7 @@ export function getType(node: SyntaxNode | null): ValueType {
|
|||
return getType(node.firstChild);
|
||||
case StringLiteral:
|
||||
return ValueType.string;
|
||||
case NumberLiteral:
|
||||
case NumberDurationLiteral:
|
||||
return ValueType.scalar;
|
||||
case MatrixSelector:
|
||||
return ValueType.matrix;
|
||||
|
|
|
@ -30,9 +30,9 @@
|
|||
"test": "NODE_OPTIONS=--experimental-vm-modules jest"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@lezer/generator": "^1.7.0",
|
||||
"@lezer/generator": "^1.7.1",
|
||||
"@lezer/highlight": "^1.2.0",
|
||||
"@lezer/lr": "^1.4.1"
|
||||
"@lezer/lr": "^1.4.2"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@lezer/highlight": "^1.1.2",
|
||||
|
|
|
@ -17,8 +17,8 @@ export const promQLHighLight = styleTags({
|
|||
LineComment: tags.comment,
|
||||
LabelName: tags.labelName,
|
||||
StringLiteral: tags.string,
|
||||
NumberLiteral: tags.number,
|
||||
Duration: tags.number,
|
||||
NumberDurationLiteral: tags.number,
|
||||
NumberDurationLiteralInDurationContext: tags.number,
|
||||
Identifier: tags.variableName,
|
||||
'Abs Absent AbsentOverTime Acos Acosh Asin Asinh Atan Atanh AvgOverTime Ceil Changes Clamp ClampMax ClampMin Cos Cosh CountOverTime DaysInMonth DayOfMonth DayOfWeek DayOfYear Deg Delta Deriv Exp Floor HistogramAvg HistogramCount HistogramFraction HistogramQuantile HistogramSum HoltWinters Hour Idelta Increase Irate LabelReplace LabelJoin LastOverTime Ln Log10 Log2 MaxOverTime MinOverTime Minute Month Pi PredictLinear PresentOverTime QuantileOverTime Rad Rate Resets Round Scalar Sgn Sin Sinh Sort SortDesc SortByLabel SortByLabelDesc Sqrt StddevOverTime StdvarOverTime SumOverTime Tan Tanh Time Timestamp Vector Year':
|
||||
tags.function(tags.variableName),
|
||||
|
|
|
@ -29,7 +29,7 @@ expr[@isGroup=Expr] {
|
|||
BinaryExpr |
|
||||
FunctionCall |
|
||||
MatrixSelector |
|
||||
NumberLiteral |
|
||||
NumberDurationLiteral |
|
||||
OffsetExpr |
|
||||
ParenExpr |
|
||||
StringLiteral |
|
||||
|
@ -194,16 +194,16 @@ ParenExpr {
|
|||
}
|
||||
|
||||
OffsetExpr {
|
||||
expr Offset Sub? Duration
|
||||
expr Offset NumberDurationLiteralInDurationContext
|
||||
}
|
||||
|
||||
MatrixSelector {
|
||||
// TODO: Can this not be more specific than "expr"?
|
||||
expr "[" Duration "]"
|
||||
expr "[" NumberDurationLiteralInDurationContext "]"
|
||||
}
|
||||
|
||||
SubqueryExpr {
|
||||
expr "[" Duration ":" ("" | Duration) "]"
|
||||
expr "[" NumberDurationLiteralInDurationContext ":" ("" | NumberDurationLiteralInDurationContext) "]"
|
||||
}
|
||||
|
||||
UnaryExpr {
|
||||
|
@ -245,14 +245,18 @@ QuotedLabelName {
|
|||
}
|
||||
|
||||
StepInvariantExpr {
|
||||
expr At ( NumberLiteral | AtModifierPreprocessors "(" ")" )
|
||||
expr At ( NumberDurationLiteral | AtModifierPreprocessors "(" ")" )
|
||||
}
|
||||
|
||||
AtModifierPreprocessors {
|
||||
Start | End
|
||||
}
|
||||
|
||||
NumberLiteral {
|
||||
NumberDurationLiteral {
|
||||
("-"|"+")?~signed (number | inf | nan)
|
||||
}
|
||||
|
||||
NumberDurationLiteralInDurationContext {
|
||||
("-"|"+")?~signed (number | inf | nan)
|
||||
}
|
||||
|
||||
|
@ -264,7 +268,7 @@ NumberLiteral {
|
|||
|
||||
number {
|
||||
(std.digit+ (("_")? std.digit)* ("." std.digit+ (("_")? std.digit)*)? | "." std.digit+ (("_")? std.digit)*) (("e" | "E") ("+" | "-")? std.digit+ (("_")? std.digit)*)? |
|
||||
"0x" (std.digit | $[a-fA-F])+
|
||||
"0x" (std.digit | $[a-fA-F])+ | duration
|
||||
}
|
||||
StringLiteral { // TODO: This is for JS, make this work for PromQL.
|
||||
'"' (![\\\n"] | "\\" _)* '"'? |
|
||||
|
@ -272,7 +276,7 @@ NumberLiteral {
|
|||
"`" ![`]* "`"
|
||||
}
|
||||
|
||||
Duration {
|
||||
duration {
|
||||
// Each line below is just the same regex repeated over and over, but each time with one of the units made non-optional,
|
||||
// to ensure that at least one <number>+<unit> pair is provided and an empty string is not recognized as a valid duration.
|
||||
( ( std.digit+ "y" ) ( std.digit+ "w" )? ( std.digit+ "d" )? ( std.digit+ "h" )? ( std.digit+ "m" )? ( std.digit+ "s" )? ( std.digit+ "ms" )? ) |
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
==>
|
||||
|
||||
PromQL(NumberLiteral)
|
||||
PromQL(NumberDurationLiteral)
|
||||
|
||||
# Double-quoted string literal
|
||||
|
||||
|
@ -46,7 +46,7 @@ PromQL(StringLiteral)
|
|||
|
||||
==>
|
||||
|
||||
PromQL(BinaryExpr(NumberLiteral, Add, NumberLiteral))
|
||||
PromQL(BinaryExpr(NumberDurationLiteral, Add, NumberDurationLiteral))
|
||||
|
||||
# Complex expression
|
||||
|
||||
|
@ -73,7 +73,7 @@ PromQL(
|
|||
VectorSelector(
|
||||
Identifier
|
||||
),
|
||||
Duration
|
||||
NumberDurationLiteralInDurationContext
|
||||
)
|
||||
)
|
||||
)
|
||||
|
@ -103,7 +103,7 @@ PromQL(
|
|||
VectorSelector(
|
||||
Identifier
|
||||
),
|
||||
Duration
|
||||
NumberDurationLiteralInDurationContext
|
||||
)
|
||||
)
|
||||
)
|
||||
|
@ -240,21 +240,21 @@ PromQL(
|
|||
)
|
||||
)
|
||||
|
||||
# Duration units
|
||||
# NumberDurationLiteralInDurationContext units
|
||||
|
||||
foo[1y2w3d4h5m6s7ms]
|
||||
|
||||
==>
|
||||
|
||||
PromQL(MatrixSelector(VectorSelector(Identifier),Duration))
|
||||
PromQL(MatrixSelector(VectorSelector(Identifier),NumberDurationLiteralInDurationContext))
|
||||
|
||||
# Incorrectly ordered duration units
|
||||
# Incorrectly ordered NumberDurationLiteralInDurationContext units
|
||||
|
||||
foo[1m2h]
|
||||
|
||||
==>
|
||||
|
||||
PromQL(SubqueryExpr(VectorSelector(Identifier),Duration,⚠,Duration))
|
||||
PromQL(MatrixSelector(VectorSelector(Identifier),NumberDurationLiteralInDurationContext,⚠))
|
||||
|
||||
# Using a function name as a metric name
|
||||
|
||||
|
@ -311,7 +311,7 @@ PromQL(
|
|||
),
|
||||
Gtr,
|
||||
BoolModifier(Bool),
|
||||
NumberLiteral
|
||||
NumberDurationLiteral
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -357,7 +357,7 @@ PromQL(
|
|||
VectorSelector(
|
||||
Identifier
|
||||
),
|
||||
Duration
|
||||
NumberDurationLiteralInDurationContext
|
||||
)
|
||||
)
|
||||
)
|
||||
|
@ -389,8 +389,8 @@ PromQL(
|
|||
FunctionIdentifier(Clamp),
|
||||
FunctionCallBody(
|
||||
VectorSelector(Identifier),
|
||||
NumberLiteral,
|
||||
NumberLiteral
|
||||
NumberDurationLiteral,
|
||||
NumberDurationLiteral
|
||||
)
|
||||
)
|
||||
)
|
||||
|
@ -450,7 +450,7 @@ PromQL(
|
|||
Identifier
|
||||
),
|
||||
At,
|
||||
NumberLiteral
|
||||
NumberDurationLiteral
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -483,7 +483,7 @@ PromQL(
|
|||
FunctionCallBody(
|
||||
MatrixSelector(
|
||||
VectorSelector(Identifier),
|
||||
Duration
|
||||
NumberDurationLiteralInDurationContext
|
||||
)
|
||||
)
|
||||
),
|
||||
|
@ -491,14 +491,14 @@ PromQL(
|
|||
AggregateExpr(
|
||||
AggregateOp(Topk),
|
||||
FunctionCallBody(
|
||||
NumberLiteral,
|
||||
NumberDurationLiteral,
|
||||
FunctionCall(
|
||||
FunctionIdentifier(Rate),
|
||||
FunctionCallBody(
|
||||
StepInvariantExpr(
|
||||
MatrixSelector(VectorSelector(Identifier), Duration),
|
||||
MatrixSelector(VectorSelector(Identifier), NumberDurationLiteralInDurationContext),
|
||||
At,
|
||||
NumberLiteral
|
||||
NumberDurationLiteral
|
||||
)
|
||||
)
|
||||
)
|
||||
|
@ -518,7 +518,7 @@ PromQL(
|
|||
Identifier
|
||||
),
|
||||
At,
|
||||
NumberLiteral
|
||||
NumberDurationLiteral
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -533,7 +533,7 @@ PromQL(
|
|||
Identifier
|
||||
),
|
||||
At,
|
||||
NumberLiteral
|
||||
NumberDurationLiteral
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -556,98 +556,98 @@ PromQL(VectorSelector(Identifier))
|
|||
NaN
|
||||
|
||||
==>
|
||||
PromQL(NumberLiteral)
|
||||
PromQL(NumberDurationLiteral)
|
||||
|
||||
# Lower-cased NaN.
|
||||
|
||||
nan
|
||||
|
||||
==>
|
||||
PromQL(NumberLiteral)
|
||||
PromQL(NumberDurationLiteral)
|
||||
|
||||
# Inf.
|
||||
|
||||
Inf
|
||||
|
||||
==>
|
||||
PromQL(NumberLiteral)
|
||||
PromQL(NumberDurationLiteral)
|
||||
|
||||
# Negative Inf.
|
||||
|
||||
-Inf
|
||||
|
||||
==>
|
||||
PromQL(NumberLiteral)
|
||||
PromQL(NumberDurationLiteral)
|
||||
|
||||
# Positive Inf.
|
||||
|
||||
+Inf
|
||||
|
||||
==>
|
||||
PromQL(NumberLiteral)
|
||||
PromQL(NumberDurationLiteral)
|
||||
|
||||
# Lower-cased Inf.
|
||||
|
||||
inf
|
||||
|
||||
==>
|
||||
PromQL(NumberLiteral)
|
||||
PromQL(NumberDurationLiteral)
|
||||
|
||||
# Upper-cased Inf.
|
||||
|
||||
INF
|
||||
|
||||
==>
|
||||
PromQL(NumberLiteral)
|
||||
PromQL(NumberDurationLiteral)
|
||||
|
||||
# Negative number literal.
|
||||
|
||||
-42
|
||||
|
||||
==>
|
||||
PromQL(NumberLiteral)
|
||||
PromQL(NumberDurationLiteral)
|
||||
|
||||
# Explicitly positive number literal.
|
||||
|
||||
+42
|
||||
|
||||
==>
|
||||
PromQL(NumberLiteral)
|
||||
PromQL(NumberDurationLiteral)
|
||||
|
||||
# Trying to illegally use NaN as a metric name.
|
||||
|
||||
NaN{foo="bar"}
|
||||
|
||||
==>
|
||||
PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(UnquotedLabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral)))))
|
||||
PromQL(BinaryExpr(NumberDurationLiteral,⚠,VectorSelector(LabelMatchers(UnquotedLabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral)))))
|
||||
|
||||
# Trying to illegally use Inf as a metric name.
|
||||
|
||||
Inf{foo="bar"}
|
||||
|
||||
==>
|
||||
PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(UnquotedLabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral)))))
|
||||
PromQL(BinaryExpr(NumberDurationLiteral,⚠,VectorSelector(LabelMatchers(UnquotedLabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral)))))
|
||||
|
||||
# Negative offset
|
||||
|
||||
foo offset -5d
|
||||
|
||||
==>
|
||||
PromQL(OffsetExpr(VectorSelector(Identifier), Offset, Sub, Duration))
|
||||
PromQL(OffsetExpr(VectorSelector(Identifier), Offset, NumberDurationLiteralInDurationContext))
|
||||
|
||||
# Negative offset with space
|
||||
|
||||
foo offset - 5d
|
||||
|
||||
==>
|
||||
PromQL(OffsetExpr(VectorSelector(Identifier), Offset, Sub, Duration))
|
||||
PromQL(OffsetExpr(VectorSelector(Identifier), Offset, NumberDurationLiteralInDurationContext))
|
||||
|
||||
# Positive offset
|
||||
|
||||
foo offset 5d
|
||||
|
||||
==>
|
||||
PromQL(OffsetExpr(VectorSelector(Identifier), Offset, Duration))
|
||||
PromQL(OffsetExpr(VectorSelector(Identifier), Offset, NumberDurationLiteralInDurationContext))
|
||||
|
||||
# Parsing only metric names with alternative @top { "top": "MetricName" }
|
||||
|
||||
|
@ -661,7 +661,7 @@ MetricName(Identifier)
|
|||
1 + foo atan2 bar
|
||||
|
||||
==>
|
||||
PromQL(BinaryExpr(NumberLiteral,Add,BinaryExpr(VectorSelector(Identifier),Atan2,VectorSelector(Identifier))))
|
||||
PromQL(BinaryExpr(NumberDurationLiteral,Add,BinaryExpr(VectorSelector(Identifier),Atan2,VectorSelector(Identifier))))
|
||||
|
||||
# Testing quoted metric name
|
||||
|
||||
|
|
126
web/ui/package-lock.json
generated
126
web/ui/package-lock.json
generated
|
@ -41,10 +41,10 @@
|
|||
"@codemirror/language": "^6.10.2",
|
||||
"@codemirror/lint": "^6.8.1",
|
||||
"@codemirror/state": "^6.3.3",
|
||||
"@codemirror/view": "^6.28.3",
|
||||
"@codemirror/view": "^6.29.1",
|
||||
"@lezer/common": "^1.2.1",
|
||||
"@lezer/highlight": "^1.2.0",
|
||||
"@lezer/lr": "^1.4.1",
|
||||
"@lezer/lr": "^1.4.2",
|
||||
"isomorphic-fetch": "^3.0.0",
|
||||
"nock": "^13.5.4"
|
||||
},
|
||||
|
@ -72,9 +72,9 @@
|
|||
"version": "0.54.0-rc.1",
|
||||
"license": "Apache-2.0",
|
||||
"devDependencies": {
|
||||
"@lezer/generator": "^1.7.0",
|
||||
"@lezer/generator": "^1.7.1",
|
||||
"@lezer/highlight": "^1.2.0",
|
||||
"@lezer/lr": "^1.4.1"
|
||||
"@lezer/lr": "^1.4.2"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@lezer/highlight": "^1.1.2",
|
||||
|
@ -2093,9 +2093,9 @@
|
|||
"integrity": "sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A=="
|
||||
},
|
||||
"node_modules/@codemirror/view": {
|
||||
"version": "6.28.3",
|
||||
"resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.28.3.tgz",
|
||||
"integrity": "sha512-QVqP+ko078/h9yrW+u5grX3rQhC+BkGKADRrlDaJznfPngJOv5zObiVf0+SgAWhL/Yt0nvZ+10rO3L+gU5IbFw==",
|
||||
"version": "6.29.1",
|
||||
"resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.29.1.tgz",
|
||||
"integrity": "sha512-7r+DlO/QFwPqKp73uq5mmrS4TuLPUVotbNOKYzN3OLP5ScrOVXcm4g13/48b6ZXGhdmzMinzFYqH0vo+qihIkQ==",
|
||||
"dependencies": {
|
||||
"@codemirror/state": "^6.4.0",
|
||||
"style-mod": "^4.1.0",
|
||||
|
@ -3371,9 +3371,9 @@
|
|||
"integrity": "sha512-yemX0ZD2xS/73llMZIK6KplkjIjf2EvAHcinDi/TfJ9hS25G0388+ClHt6/3but0oOxinTcQHJLDXh6w1crzFQ=="
|
||||
},
|
||||
"node_modules/@lezer/generator": {
|
||||
"version": "1.7.0",
|
||||
"resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.7.0.tgz",
|
||||
"integrity": "sha512-IJ16tx3biLKlCXUzcK4v8S10AVa2BSM2rB12rtAL6f1hL2TS/HQQlGCoWRvanlL2J4mCYEEIv9uG7n4kVMkVDA==",
|
||||
"version": "1.7.1",
|
||||
"resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.7.1.tgz",
|
||||
"integrity": "sha512-MgPJN9Si+ccxzXl3OAmCeZuUKw4XiPl4y664FX/hnnyG9CTqUPq65N3/VGPA2jD23D7QgMTtNqflta+cPN+5mQ==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@lezer/common": "^1.1.0",
|
||||
|
@ -3391,9 +3391,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@lezer/lr": {
|
||||
"version": "1.4.1",
|
||||
"resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.1.tgz",
|
||||
"integrity": "sha512-CHsKq8DMKBf9b3yXPDIU4DbH+ZJd/sJdYOW2llbW/HudP5u0VS6Bfq1hLYfgU7uAYGFIyGGQIsSOXGPEErZiJw==",
|
||||
"version": "1.4.2",
|
||||
"resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.2.tgz",
|
||||
"integrity": "sha512-pu0K1jCIdnQ12aWNaAVU5bzi7Bd1w54J3ECgANPmYLtQKP0HBj2cE/5coBD66MT10xbtIuUr7tg0Shbsvk0mDA==",
|
||||
"dependencies": {
|
||||
"@lezer/common": "^1.0.0"
|
||||
}
|
||||
|
@ -5541,20 +5541,21 @@
|
|||
"license": "MIT"
|
||||
},
|
||||
"node_modules/body-parser": {
|
||||
"version": "1.20.0",
|
||||
"version": "1.20.2",
|
||||
"resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz",
|
||||
"integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"bytes": "3.1.2",
|
||||
"content-type": "~1.0.4",
|
||||
"content-type": "~1.0.5",
|
||||
"debug": "2.6.9",
|
||||
"depd": "2.0.0",
|
||||
"destroy": "1.2.0",
|
||||
"http-errors": "2.0.0",
|
||||
"iconv-lite": "0.4.24",
|
||||
"on-finished": "2.4.1",
|
||||
"qs": "6.10.3",
|
||||
"raw-body": "2.5.1",
|
||||
"qs": "6.11.0",
|
||||
"raw-body": "2.5.2",
|
||||
"type-is": "~1.6.18",
|
||||
"unpipe": "1.0.0"
|
||||
},
|
||||
|
@ -5565,24 +5566,27 @@
|
|||
},
|
||||
"node_modules/body-parser/node_modules/bytes": {
|
||||
"version": "3.1.2",
|
||||
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
|
||||
"integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/body-parser/node_modules/debug": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
|
||||
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"ms": "2.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/body-parser/node_modules/iconv-lite": {
|
||||
"version": "0.4.24",
|
||||
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
|
||||
"integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"safer-buffer": ">= 2.1.2 < 3"
|
||||
},
|
||||
|
@ -5592,8 +5596,9 @@
|
|||
},
|
||||
"node_modules/body-parser/node_modules/ms": {
|
||||
"version": "2.0.0",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
|
||||
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/bonjour-service": {
|
||||
"version": "1.0.14",
|
||||
|
@ -5639,10 +5644,11 @@
|
|||
}
|
||||
},
|
||||
"node_modules/braces": {
|
||||
"version": "3.0.2",
|
||||
"license": "MIT",
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
|
||||
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
|
||||
"dependencies": {
|
||||
"fill-range": "^7.0.1"
|
||||
"fill-range": "^7.1.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
|
@ -6214,9 +6220,10 @@
|
|||
"license": "MIT"
|
||||
},
|
||||
"node_modules/content-type": {
|
||||
"version": "1.0.4",
|
||||
"version": "1.0.5",
|
||||
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
|
||||
"integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
|
@ -6230,9 +6237,10 @@
|
|||
}
|
||||
},
|
||||
"node_modules/cookie": {
|
||||
"version": "0.5.0",
|
||||
"version": "0.6.0",
|
||||
"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz",
|
||||
"integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
|
@ -7112,9 +7120,10 @@
|
|||
"license": "MIT"
|
||||
},
|
||||
"node_modules/ejs": {
|
||||
"version": "3.1.8",
|
||||
"version": "3.1.10",
|
||||
"resolved": "https://registry.npmjs.org/ejs/-/ejs-3.1.10.tgz",
|
||||
"integrity": "sha512-UeJmFfOrAQS8OJWPZ4qtgHyWExa088/MtK5UEyoJGFH67cDEXkZSviOiKRCZ4Xij0zxI3JECgYs3oKx+AizQBA==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"jake": "^10.8.5"
|
||||
},
|
||||
|
@ -8147,16 +8156,17 @@
|
|||
"license": "MIT"
|
||||
},
|
||||
"node_modules/express": {
|
||||
"version": "4.18.1",
|
||||
"version": "4.19.2",
|
||||
"resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz",
|
||||
"integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"accepts": "~1.3.8",
|
||||
"array-flatten": "1.1.1",
|
||||
"body-parser": "1.20.0",
|
||||
"body-parser": "1.20.2",
|
||||
"content-disposition": "0.5.4",
|
||||
"content-type": "~1.0.4",
|
||||
"cookie": "0.5.0",
|
||||
"cookie": "0.6.0",
|
||||
"cookie-signature": "1.0.6",
|
||||
"debug": "2.6.9",
|
||||
"depd": "2.0.0",
|
||||
|
@ -8172,7 +8182,7 @@
|
|||
"parseurl": "~1.3.3",
|
||||
"path-to-regexp": "0.1.7",
|
||||
"proxy-addr": "~2.0.7",
|
||||
"qs": "6.10.3",
|
||||
"qs": "6.11.0",
|
||||
"range-parser": "~1.2.1",
|
||||
"safe-buffer": "5.2.1",
|
||||
"send": "0.18.0",
|
||||
|
@ -8368,8 +8378,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/fill-range": {
|
||||
"version": "7.0.1",
|
||||
"license": "MIT",
|
||||
"version": "7.1.1",
|
||||
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
|
||||
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
|
||||
"dependencies": {
|
||||
"to-regex-range": "^5.0.1"
|
||||
},
|
||||
|
@ -8456,14 +8467,15 @@
|
|||
"license": "ISC"
|
||||
},
|
||||
"node_modules/follow-redirects": {
|
||||
"version": "1.15.2",
|
||||
"version": "1.15.6",
|
||||
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz",
|
||||
"integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==",
|
||||
"funding": [
|
||||
{
|
||||
"type": "individual",
|
||||
"url": "https://github.com/sponsors/RubenVerborgh"
|
||||
}
|
||||
],
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=4.0"
|
||||
},
|
||||
|
@ -9548,7 +9560,8 @@
|
|||
},
|
||||
"node_modules/is-number": {
|
||||
"version": "7.0.0",
|
||||
"license": "MIT",
|
||||
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
|
||||
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
|
||||
"engines": {
|
||||
"node": ">=0.12.0"
|
||||
}
|
||||
|
@ -12359,8 +12372,9 @@
|
|||
},
|
||||
"node_modules/media-typer": {
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
|
||||
"integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
|
@ -14784,9 +14798,10 @@
|
|||
}
|
||||
},
|
||||
"node_modules/qs": {
|
||||
"version": "6.10.3",
|
||||
"version": "6.11.0",
|
||||
"resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz",
|
||||
"integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==",
|
||||
"dev": true,
|
||||
"license": "BSD-3-Clause",
|
||||
"dependencies": {
|
||||
"side-channel": "^1.0.4"
|
||||
},
|
||||
|
@ -14874,9 +14889,10 @@
|
|||
}
|
||||
},
|
||||
"node_modules/raw-body": {
|
||||
"version": "2.5.1",
|
||||
"version": "2.5.2",
|
||||
"resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz",
|
||||
"integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"bytes": "3.1.2",
|
||||
"http-errors": "2.0.0",
|
||||
|
@ -14889,16 +14905,18 @@
|
|||
},
|
||||
"node_modules/raw-body/node_modules/bytes": {
|
||||
"version": "3.1.2",
|
||||
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
|
||||
"integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/raw-body/node_modules/iconv-lite": {
|
||||
"version": "0.4.24",
|
||||
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
|
||||
"integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"safer-buffer": ">= 2.1.2 < 3"
|
||||
},
|
||||
|
@ -17969,7 +17987,8 @@
|
|||
},
|
||||
"node_modules/to-regex-range": {
|
||||
"version": "5.0.1",
|
||||
"license": "MIT",
|
||||
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
|
||||
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
|
||||
"dependencies": {
|
||||
"is-number": "^7.0.0"
|
||||
},
|
||||
|
@ -18191,8 +18210,9 @@
|
|||
},
|
||||
"node_modules/type-is": {
|
||||
"version": "1.6.18",
|
||||
"resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
|
||||
"integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"media-typer": "0.3.0",
|
||||
"mime-types": "~2.1.24"
|
||||
|
@ -19340,14 +19360,14 @@
|
|||
"@codemirror/lint": "^6.8.1",
|
||||
"@codemirror/search": "^6.5.6",
|
||||
"@codemirror/state": "^6.3.3",
|
||||
"@codemirror/view": "^6.28.3",
|
||||
"@codemirror/view": "^6.29.1",
|
||||
"@forevolve/bootstrap-dark": "^4.0.2",
|
||||
"@fortawesome/fontawesome-svg-core": "6.5.2",
|
||||
"@fortawesome/free-solid-svg-icons": "6.5.2",
|
||||
"@fortawesome/react-fontawesome": "0.2.0",
|
||||
"@lezer/common": "^1.2.1",
|
||||
"@lezer/highlight": "^1.2.0",
|
||||
"@lezer/lr": "^1.4.1",
|
||||
"@lezer/lr": "^1.4.2",
|
||||
"@nexucis/fuzzy": "^0.4.1",
|
||||
"@nexucis/kvsearch": "^0.8.1",
|
||||
"@prometheus-io/codemirror-promql": "0.54.0-rc.1",
|
||||
|
|
|
@ -9,14 +9,14 @@
|
|||
"@codemirror/lint": "^6.8.1",
|
||||
"@codemirror/search": "^6.5.6",
|
||||
"@codemirror/state": "^6.3.3",
|
||||
"@codemirror/view": "^6.28.3",
|
||||
"@codemirror/view": "^6.29.1",
|
||||
"@forevolve/bootstrap-dark": "^4.0.2",
|
||||
"@fortawesome/fontawesome-svg-core": "6.5.2",
|
||||
"@fortawesome/free-solid-svg-icons": "6.5.2",
|
||||
"@fortawesome/react-fontawesome": "0.2.0",
|
||||
"@lezer/common": "^1.2.1",
|
||||
"@lezer/highlight": "^1.2.0",
|
||||
"@lezer/lr": "^1.4.1",
|
||||
"@lezer/lr": "^1.4.2",
|
||||
"@nexucis/fuzzy": "^0.4.1",
|
||||
"@nexucis/kvsearch": "^0.8.1",
|
||||
"@prometheus-io/codemirror-promql": "0.54.0-rc.1",
|
||||
|
|
Loading…
Reference in a new issue