Merge branch 'main' into nhcb

This commit is contained in:
György Krajcsovits 2024-03-27 18:42:10 +01:00
commit 2a4aa085d2
32 changed files with 3347 additions and 549 deletions

View file

@ -90,7 +90,7 @@ can modify the `./promql/parser/generated_parser.y.go` manually.
```golang
// As of writing this was somewhere around line 600.
var (
yyDebug = 0 // This can be be a number 0 -> 5.
yyDebug = 0 // This can be a number 0 -> 5.
yyErrorVerbose = false // This can be set to true.
)

View file

@ -33,6 +33,7 @@ import (
"github.com/alecthomas/units"
"github.com/go-kit/log"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser"
@ -149,8 +150,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err
}
func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (uint64, error) {
var mu sync.Mutex
var total uint64
var total atomic.Uint64
for i := 0; i < scrapeCount; i += 100 {
var wg sync.WaitGroup
@ -165,22 +165,21 @@ func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (u
wg.Add(1)
go func() {
defer wg.Done()
n, err := b.ingestScrapesShard(batch, 100, int64(timeDelta*i))
if err != nil {
// exitWithError(err)
fmt.Println(" err", err)
}
mu.Lock()
total += n
mu.Unlock()
wg.Done()
total.Add(n)
}()
}
wg.Wait()
}
fmt.Println("ingestion completed")
return total, nil
return total.Load(), nil
}
func (b *writeBenchmark) ingestScrapesShard(lbls []labels.Labels, scrapeCount int, baset int64) (uint64, error) {

View file

@ -175,13 +175,18 @@ type testGroup struct {
}
// test performs the unit tests.
func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promql.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) []error {
func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promql.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) {
// Setup testing suite.
suite, err := promql.NewLazyLoader(nil, tg.seriesLoadingString(), queryOpts)
suite, err := promql.NewLazyLoader(tg.seriesLoadingString(), queryOpts)
if err != nil {
return []error{err}
}
defer suite.Close()
defer func() {
err := suite.Close()
if err != nil {
outErr = append(outErr, err)
}
}()
suite.SubqueryInterval = evalInterval
// Load the rule files.

View file

@ -213,6 +213,14 @@ func NewDiscovery(cfg *SDConfig, logger log.Logger, metrics discovery.Discoverer
return d, nil
}
type client interface {
getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error)
getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error)
getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error)
getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error)
getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error)
}
// azureClient represents multiple Azure Resource Manager providers.
type azureClient struct {
nic *armnetwork.InterfacesClient
@ -222,14 +230,17 @@ type azureClient struct {
logger log.Logger
}
var _ client = &azureClient{}
// createAzureClient is a helper function for creating an Azure compute client to ARM.
func createAzureClient(cfg SDConfig) (azureClient, error) {
func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) {
cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment)
if err != nil {
return azureClient{}, err
return &azureClient{}, err
}
var c azureClient
c.logger = logger
telemetry := policy.TelemetryOptions{
ApplicationID: userAgent,
@ -240,12 +251,12 @@ func createAzureClient(cfg SDConfig) (azureClient, error) {
Telemetry: telemetry,
})
if err != nil {
return azureClient{}, err
return &azureClient{}, err
}
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd")
if err != nil {
return azureClient{}, err
return &azureClient{}, err
}
options := &arm.ClientOptions{
ClientOptions: policy.ClientOptions{
@ -257,25 +268,25 @@ func createAzureClient(cfg SDConfig) (azureClient, error) {
c.vm, err = armcompute.NewVirtualMachinesClient(cfg.SubscriptionID, credential, options)
if err != nil {
return azureClient{}, err
return &azureClient{}, err
}
c.nic, err = armnetwork.NewInterfacesClient(cfg.SubscriptionID, credential, options)
if err != nil {
return azureClient{}, err
return &azureClient{}, err
}
c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(cfg.SubscriptionID, credential, options)
if err != nil {
return azureClient{}, err
return &azureClient{}, err
}
c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(cfg.SubscriptionID, credential, options)
if err != nil {
return azureClient{}, err
return &azureClient{}, err
}
return c, nil
return &c, nil
}
func newCredential(cfg SDConfig, policyClientOptions policy.ClientOptions) (azcore.TokenCredential, error) {
@ -341,12 +352,11 @@ func newAzureResourceFromID(id string, logger log.Logger) (*arm.ResourceID, erro
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
defer level.Debug(d.logger).Log("msg", "Azure discovery completed")
client, err := createAzureClient(*d.cfg)
client, err := createAzureClient(*d.cfg, d.logger)
if err != nil {
d.metrics.failuresCount.Inc()
return nil, fmt.Errorf("could not create Azure client: %w", err)
}
client.logger = d.logger
machines, err := client.getVMs(ctx, d.cfg.ResourceGroup)
if err != nil {
@ -385,10 +395,32 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
for _, vm := range machines {
go func(vm virtualMachine) {
defer wg.Done()
labelSet, err := d.vmToLabelSet(ctx, client, vm)
ch <- target{labelSet: labelSet, err: err}
}(vm)
}
wg.Wait()
close(ch)
var tg targetgroup.Group
for tgt := range ch {
if tgt.err != nil {
d.metrics.failuresCount.Inc()
return nil, fmt.Errorf("unable to complete Azure service discovery: %w", tgt.err)
}
if tgt.labelSet != nil {
tg.Targets = append(tg.Targets, tgt.labelSet)
}
}
return []*targetgroup.Group{&tg}, nil
}
func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualMachine) (model.LabelSet, error) {
r, err := newAzureResourceFromID(vm.ID, d.logger)
if err != nil {
ch <- target{labelSet: nil, err: err}
return
return nil, err
}
labels := model.LabelSet{
@ -424,16 +456,14 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
} else {
networkInterface, err = client.getVMScaleSetVMNetworkInterfaceByID(ctx, nicID, vm.ScaleSet, vm.InstanceID)
}
if err != nil {
if errors.Is(err, errorNotFound) {
level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err)
} else {
ch <- target{labelSet: nil, err: err}
return nil, err
}
// Get out of this routine because we cannot continue without a network interface.
return
return nil, nil
}
// Continue processing with the network interface
@ -450,7 +480,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
// is a cheap and easy way to determine if a machine is allocated or not.
if networkInterface.Properties.Primary == nil {
level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name)
return
return nil, nil
}
if *networkInterface.Properties.Primary {
@ -464,35 +494,16 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress)
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", d.port))
labels[model.AddressLabel] = model.LabelValue(address)
ch <- target{labelSet: labels, err: nil}
return
return labels, nil
}
// If we made it here, we don't have a private IP which should be impossible.
// Return an empty target and error to ensure an all or nothing situation.
err = fmt.Errorf("unable to find a private IP for VM %s", vm.Name)
ch <- target{labelSet: nil, err: err}
return
return nil, fmt.Errorf("unable to find a private IP for VM %s", vm.Name)
}
}
}
}(vm)
}
wg.Wait()
close(ch)
var tg targetgroup.Group
for tgt := range ch {
if tgt.err != nil {
d.metrics.failuresCount.Inc()
return nil, fmt.Errorf("unable to complete Azure service discovery: %w", tgt.err)
}
if tgt.labelSet != nil {
tg.Targets = append(tg.Targets, tgt.labelSet)
}
}
return []*targetgroup.Group{&tg}, nil
// TODO: Should we say something at this point?
return nil, nil
}
func (client *azureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) {

View file

@ -14,16 +14,24 @@
package azure
import (
"context"
"fmt"
"testing"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
cache "github.com/Code-Hex/go-generics-cache"
"github.com/Code-Hex/go-generics-cache/policy/lru"
"github.com/go-kit/log"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
goleak.VerifyTestMain(m,
goleak.IgnoreTopFunction("github.com/Code-Hex/go-generics-cache.(*janitor).run.func1"),
)
}
func TestMapFromVMWithEmptyTags(t *testing.T) {
@ -79,6 +87,91 @@ func TestMapFromVMWithEmptyTags(t *testing.T) {
require.Equal(t, expectedVM, actualVM)
}
func TestVMToLabelSet(t *testing.T) {
id := "/subscriptions/00000000-0000-0000-0000-000000000000/test"
name := "name"
size := "size"
vmSize := armcompute.VirtualMachineSizeTypes(size)
osType := armcompute.OperatingSystemTypesLinux
vmType := "type"
location := "westeurope"
computerName := "computer_name"
networkID := "/subscriptions/00000000-0000-0000-0000-000000000000/network1"
ipAddress := "10.20.30.40"
primary := true
networkProfile := armcompute.NetworkProfile{
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{
{
ID: &networkID,
Properties: &armcompute.NetworkInterfaceReferenceProperties{Primary: &primary},
},
},
}
properties := &armcompute.VirtualMachineProperties{
OSProfile: &armcompute.OSProfile{
ComputerName: &computerName,
},
StorageProfile: &armcompute.StorageProfile{
OSDisk: &armcompute.OSDisk{
OSType: &osType,
},
},
NetworkProfile: &networkProfile,
HardwareProfile: &armcompute.HardwareProfile{
VMSize: &vmSize,
},
}
testVM := armcompute.VirtualMachine{
ID: &id,
Name: &name,
Type: &vmType,
Location: &location,
Tags: nil,
Properties: properties,
}
expectedVM := virtualMachine{
ID: id,
Name: name,
ComputerName: computerName,
Type: vmType,
Location: location,
OsType: "Linux",
Tags: map[string]*string{},
NetworkInterfaces: []string{networkID},
Size: size,
}
actualVM := mapFromVM(testVM)
require.Equal(t, expectedVM, actualVM)
cfg := DefaultSDConfig
d := &Discovery{
cfg: &cfg,
logger: log.NewNopLogger(),
cache: cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5))),
}
network := armnetwork.Interface{
Name: &networkID,
Properties: &armnetwork.InterfacePropertiesFormat{
Primary: &primary,
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
{Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
PrivateIPAddress: &ipAddress,
}},
},
},
}
client := &mockAzureClient{
networkInterface: &network,
}
labelSet, err := d.vmToLabelSet(context.Background(), client, actualVM)
require.NoError(t, err)
require.Len(t, labelSet, 11)
}
func TestMapFromVMWithEmptyOSType(t *testing.T) {
id := "test"
name := "name"
@ -381,3 +474,35 @@ func TestNewAzureResourceFromID(t *testing.T) {
require.Equal(t, tc.expected.ResourceGroupName, actual.ResourceGroupName)
}
}
type mockAzureClient struct {
networkInterface *armnetwork.Interface
}
var _ client = &mockAzureClient{}
func (*mockAzureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) {
return nil, nil
}
func (*mockAzureClient) getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error) {
return nil, nil
}
func (*mockAzureClient) getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error) {
return nil, nil
}
func (m *mockAzureClient) getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) {
if networkInterfaceID == "" {
return nil, fmt.Errorf("parameter networkInterfaceID cannot be empty")
}
return m.networkInterface, nil
}
func (m *mockAzureClient) getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error) {
if scaleSetName == "" {
return nil, fmt.Errorf("parameter virtualMachineScaleSetName cannot be empty")
}
return m.networkInterface, nil
}

View file

@ -208,7 +208,6 @@ func (t *testRunner) requireUpdate(ref time.Time, expected []*targetgroup.Group)
select {
case <-timeout:
t.Fatalf("Expected update but got none")
return
case <-time.After(defaultWait / 10):
if ref.Equal(t.lastReceive()) {
// No update received.

View file

@ -733,7 +733,6 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou
t.Helper()
if _, ok := tSets[poolKey]; !ok {
t.Fatalf("'%s' should be present in Pool keys: %v", poolKey, tSets)
return
}
match := false

View file

@ -733,7 +733,6 @@ func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group,
t.Helper()
if _, ok := tGroups[key]; !ok {
t.Fatalf("'%s' should be present in Group map keys: %v", key, tGroups)
return
}
match := false
var mergedTargets string

2
go.mod
View file

@ -74,7 +74,6 @@ require (
go.uber.org/automaxprocs v1.5.3
go.uber.org/goleak v1.3.0
go.uber.org/multierr v1.11.0
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/net v0.22.0
golang.org/x/oauth2 v0.18.0
golang.org/x/sync v0.6.0
@ -186,6 +185,7 @@ require (
go.opentelemetry.io/otel/metric v1.24.0 // indirect
go.opentelemetry.io/proto/otlp v1.1.0 // indirect
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/mod v0.16.0 // indirect
golang.org/x/term v0.18.0 // indirect
golang.org/x/text v0.14.0 // indirect

View file

@ -349,7 +349,9 @@ func (ls Labels) DropMetricName() Labels {
if i == 0 { // Make common case fast with no allocations.
return ls[1:]
}
return append(ls[:i], ls[i+1:]...)
// Avoid modifying original Labels - use [:i:i] so that left slice would not
// have any spare capacity and append would have to allocate a new slice for the result.
return append(ls[:i:i], ls[i+1:]...)
}
}
return ls

View file

@ -457,7 +457,11 @@ func TestLabels_Get(t *testing.T) {
func TestLabels_DropMetricName(t *testing.T) {
require.True(t, Equal(FromStrings("aaa", "111", "bbb", "222"), FromStrings("aaa", "111", "bbb", "222").DropMetricName()))
require.True(t, Equal(FromStrings("aaa", "111"), FromStrings(MetricName, "myname", "aaa", "111").DropMetricName()))
require.True(t, Equal(FromStrings("__aaa__", "111", "bbb", "222"), FromStrings("__aaa__", "111", MetricName, "myname", "bbb", "222").DropMetricName()))
original := FromStrings("__aaa__", "111", MetricName, "myname", "bbb", "222")
check := FromStrings("__aaa__", "111", MetricName, "myname", "bbb", "222")
require.True(t, Equal(FromStrings("__aaa__", "111", "bbb", "222"), check.DropMetricName()))
require.True(t, Equal(original, check))
}
// BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation

View file

@ -118,3 +118,30 @@ func (m *Matcher) GetRegexString() string {
}
return m.re.GetRegexString()
}
// SetMatches returns a set of equality matchers for the current regex matchers if possible.
// For examples the regexp `a(b|f)` will returns "ab" and "af".
// Returns nil if we can't replace the regexp by only equality matchers.
func (m *Matcher) SetMatches() []string {
if m.re == nil {
return nil
}
return m.re.SetMatches()
}
// Prefix returns the required prefix of the value to match, if possible.
// It will be empty if it's an equality matcher or if the prefix can't be determined.
func (m *Matcher) Prefix() string {
if m.re == nil {
return ""
}
return m.re.prefix
}
// IsRegexOptimized returns whether regex is optimized.
func (m *Matcher) IsRegexOptimized() bool {
if m.re == nil {
return false
}
return m.re.IsOptimized()
}

View file

@ -14,13 +14,14 @@
package labels
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func mustNewMatcher(t *testing.T, mType MatchType, value string) *Matcher {
m, err := NewMatcher(mType, "", value)
m, err := NewMatcher(mType, "test_label_name", value)
require.NoError(t, err)
return m
}
@ -81,6 +82,21 @@ func TestMatcher(t *testing.T) {
value: "foo-bar",
match: false,
},
{
matcher: mustNewMatcher(t, MatchRegexp, "$*bar"),
value: "foo-bar",
match: false,
},
{
matcher: mustNewMatcher(t, MatchRegexp, "bar^+"),
value: "foo-bar",
match: false,
},
{
matcher: mustNewMatcher(t, MatchRegexp, "$+bar"),
value: "foo-bar",
match: false,
},
}
for _, test := range tests {
@ -118,6 +134,82 @@ func TestInverse(t *testing.T) {
}
}
func TestPrefix(t *testing.T) {
for i, tc := range []struct {
matcher *Matcher
prefix string
}{
{
matcher: mustNewMatcher(t, MatchEqual, "abc"),
prefix: "",
},
{
matcher: mustNewMatcher(t, MatchNotEqual, "abc"),
prefix: "",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abc.+"),
prefix: "abc",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abcd|abc.+"),
prefix: "abc",
},
{
matcher: mustNewMatcher(t, MatchNotRegexp, "abcd|abc.+"),
prefix: "abc",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abc(def|ghj)|ab|a."),
prefix: "a",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "foo.+bar|foo.*baz"),
prefix: "foo",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abc|.*"),
prefix: "",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abc|def"),
prefix: "",
},
{
matcher: mustNewMatcher(t, MatchRegexp, ".+def"),
prefix: "",
},
} {
t.Run(fmt.Sprintf("%d: %s", i, tc.matcher), func(t *testing.T) {
require.Equal(t, tc.prefix, tc.matcher.Prefix())
})
}
}
func TestIsRegexOptimized(t *testing.T) {
for i, tc := range []struct {
matcher *Matcher
isRegexOptimized bool
}{
{
matcher: mustNewMatcher(t, MatchEqual, "abc"),
isRegexOptimized: false,
},
{
matcher: mustNewMatcher(t, MatchRegexp, "."),
isRegexOptimized: false,
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abc.+"),
isRegexOptimized: true,
},
} {
t.Run(fmt.Sprintf("%d: %s", i, tc.matcher), func(t *testing.T) {
require.Equal(t, tc.isRegexOptimized, tc.matcher.IsRegexOptimized())
})
}
}
func BenchmarkMatchType_String(b *testing.B) {
for i := 0; i <= b.N; i++ {
_ = MatchType(i % int(MatchNotRegexp+1)).String()

View file

@ -14,51 +14,90 @@
package labels
import (
"slices"
"strings"
"github.com/grafana/regexp"
"github.com/grafana/regexp/syntax"
)
const (
maxSetMatches = 256
// The minimum number of alternate values a regex should have to trigger
// the optimization done by optimizeEqualStringMatchers() and so use a map
// to match values instead of iterating over a list. This value has
// been computed running BenchmarkOptimizeEqualStringMatchers.
minEqualMultiStringMatcherMapThreshold = 16
)
type FastRegexMatcher struct {
// Under some conditions, re is nil because the expression is never parsed.
// We store the original string to be able to return it in GetRegexString().
reString string
re *regexp.Regexp
setMatches []string
stringMatcher StringMatcher
prefix string
suffix string
contains string
// shortcut for literals
literal bool
value string
// matchString is the "compiled" function to run by MatchString().
matchString func(string) bool
}
func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
if isLiteral(v) {
return &FastRegexMatcher{literal: true, value: v}, nil
}
re, err := regexp.Compile("^(?:" + v + ")$")
if err != nil {
return nil, err
m := &FastRegexMatcher{
reString: v,
}
m.stringMatcher, m.setMatches = optimizeAlternatingLiterals(v)
if m.stringMatcher != nil {
// If we already have a string matcher, we don't need to parse the regex
// or compile the matchString function. This also avoids the behavior in
// compileMatchStringFunction where it prefers to use setMatches when
// available, even if the string matcher is faster.
m.matchString = m.stringMatcher.Matches
} else {
parsed, err := syntax.Parse(v, syntax.Perl)
if err != nil {
return nil, err
}
m := &FastRegexMatcher{
re: re,
// Simplify the syntax tree to run faster.
parsed = parsed.Simplify()
m.re, err = regexp.Compile("^(?:" + parsed.String() + ")$")
if err != nil {
return nil, err
}
if parsed.Op == syntax.OpConcat {
m.prefix, m.suffix, m.contains = optimizeConcatRegex(parsed)
}
if matches, caseSensitive := findSetMatches(parsed); caseSensitive {
m.setMatches = matches
}
m.stringMatcher = stringMatcherFromRegexp(parsed)
m.matchString = m.compileMatchStringFunction()
}
return m, nil
}
func (m *FastRegexMatcher) MatchString(s string) bool {
if m.literal {
return s == m.value
// compileMatchStringFunction returns the function to run by MatchString().
func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool {
// If the only optimization available is the string matcher, then we can just run it.
if len(m.setMatches) == 0 && m.prefix == "" && m.suffix == "" && m.contains == "" && m.stringMatcher != nil {
return m.stringMatcher.Matches
}
return func(s string) bool {
if len(m.setMatches) != 0 {
for _, match := range m.setMatches {
if match == s {
return true
}
}
return false
}
if m.prefix != "" && !strings.HasPrefix(s, m.prefix) {
return false
@ -69,18 +108,254 @@ func (m *FastRegexMatcher) MatchString(s string) bool {
if m.contains != "" && !strings.Contains(s, m.contains) {
return false
}
if m.stringMatcher != nil {
return m.stringMatcher.Matches(s)
}
return m.re.MatchString(s)
}
}
// IsOptimized returns true if any fast-path optimization is applied to the
// regex matcher.
func (m *FastRegexMatcher) IsOptimized() bool {
return len(m.setMatches) > 0 || m.stringMatcher != nil || m.prefix != "" || m.suffix != "" || m.contains != ""
}
// findSetMatches extract equality matches from a regexp.
// Returns nil if we can't replace the regexp by only equality matchers or the regexp contains
// a mix of case sensitive and case insensitive matchers.
func findSetMatches(re *syntax.Regexp) (matches []string, caseSensitive bool) {
clearBeginEndText(re)
return findSetMatchesInternal(re, "")
}
func findSetMatchesInternal(re *syntax.Regexp, base string) (matches []string, caseSensitive bool) {
switch re.Op {
case syntax.OpBeginText:
// Correctly handling the begin text operator inside a regex is tricky,
// so in this case we fallback to the regex engine.
return nil, false
case syntax.OpEndText:
// Correctly handling the end text operator inside a regex is tricky,
// so in this case we fallback to the regex engine.
return nil, false
case syntax.OpLiteral:
return []string{base + string(re.Rune)}, isCaseSensitive(re)
case syntax.OpEmptyMatch:
if base != "" {
return []string{base}, isCaseSensitive(re)
}
case syntax.OpAlternate:
return findSetMatchesFromAlternate(re, base)
case syntax.OpCapture:
clearCapture(re)
return findSetMatchesInternal(re, base)
case syntax.OpConcat:
return findSetMatchesFromConcat(re, base)
case syntax.OpCharClass:
if len(re.Rune)%2 != 0 {
return nil, false
}
var matches []string
var totalSet int
for i := 0; i+1 < len(re.Rune); i += 2 {
totalSet += int(re.Rune[i+1]-re.Rune[i]) + 1
}
// limits the total characters that can be used to create matches.
// In some case like negation [^0-9] a lot of possibilities exists and that
// can create thousands of possible matches at which points we're better off using regexp.
if totalSet > maxSetMatches {
return nil, false
}
for i := 0; i+1 < len(re.Rune); i += 2 {
lo, hi := re.Rune[i], re.Rune[i+1]
for c := lo; c <= hi; c++ {
matches = append(matches, base+string(c))
}
}
return matches, isCaseSensitive(re)
default:
return nil, false
}
return nil, false
}
func findSetMatchesFromConcat(re *syntax.Regexp, base string) (matches []string, matchesCaseSensitive bool) {
if len(re.Sub) == 0 {
return nil, false
}
clearCapture(re.Sub...)
matches = []string{base}
for i := 0; i < len(re.Sub); i++ {
var newMatches []string
for j, b := range matches {
m, caseSensitive := findSetMatchesInternal(re.Sub[i], b)
if m == nil {
return nil, false
}
if tooManyMatches(newMatches, m...) {
return nil, false
}
// All matches must have the same case sensitivity. If it's the first set of matches
// returned, we store its sensitivity as the expected case, and then we'll check all
// other ones.
if i == 0 && j == 0 {
matchesCaseSensitive = caseSensitive
}
if matchesCaseSensitive != caseSensitive {
return nil, false
}
newMatches = append(newMatches, m...)
}
matches = newMatches
}
return matches, matchesCaseSensitive
}
func findSetMatchesFromAlternate(re *syntax.Regexp, base string) (matches []string, matchesCaseSensitive bool) {
for i, sub := range re.Sub {
found, caseSensitive := findSetMatchesInternal(sub, base)
if found == nil {
return nil, false
}
if tooManyMatches(matches, found...) {
return nil, false
}
// All matches must have the same case sensitivity. If it's the first set of matches
// returned, we store its sensitivity as the expected case, and then we'll check all
// other ones.
if i == 0 {
matchesCaseSensitive = caseSensitive
}
if matchesCaseSensitive != caseSensitive {
return nil, false
}
matches = append(matches, found...)
}
return matches, matchesCaseSensitive
}
// clearCapture removes capture operation as they are not used for matching.
func clearCapture(regs ...*syntax.Regexp) {
for _, r := range regs {
// Iterate on the regexp because capture groups could be nested.
for r.Op == syntax.OpCapture {
*r = *r.Sub[0]
}
}
}
// clearBeginEndText removes the begin and end text from the regexp. Prometheus regexp are anchored to the beginning and end of the string.
func clearBeginEndText(re *syntax.Regexp) {
// Do not clear begin/end text from an alternate operator because it could
// change the actual regexp properties.
if re.Op == syntax.OpAlternate {
return
}
if len(re.Sub) == 0 {
return
}
if len(re.Sub) == 1 {
if re.Sub[0].Op == syntax.OpBeginText || re.Sub[0].Op == syntax.OpEndText {
// We need to remove this element. Since it's the only one, we convert into a matcher of an empty string.
// OpEmptyMatch is regexp's nop operator.
re.Op = syntax.OpEmptyMatch
re.Sub = nil
return
}
}
if re.Sub[0].Op == syntax.OpBeginText {
re.Sub = re.Sub[1:]
}
if re.Sub[len(re.Sub)-1].Op == syntax.OpEndText {
re.Sub = re.Sub[:len(re.Sub)-1]
}
}
// isCaseInsensitive tells if a regexp is case insensitive.
// The flag should be check at each level of the syntax tree.
func isCaseInsensitive(reg *syntax.Regexp) bool {
return (reg.Flags & syntax.FoldCase) != 0
}
// isCaseSensitive tells if a regexp is case sensitive.
// The flag should be check at each level of the syntax tree.
func isCaseSensitive(reg *syntax.Regexp) bool {
return !isCaseInsensitive(reg)
}
// tooManyMatches guards against creating too many set matches.
func tooManyMatches(matches []string, added ...string) bool {
return len(matches)+len(added) > maxSetMatches
}
func (m *FastRegexMatcher) MatchString(s string) bool {
return m.matchString(s)
}
func (m *FastRegexMatcher) SetMatches() []string {
// IMPORTANT: always return a copy, otherwise if the caller manipulate this slice it will
// also get manipulated in the cached FastRegexMatcher instance.
return slices.Clone(m.setMatches)
}
func (m *FastRegexMatcher) GetRegexString() string {
if m.literal {
return m.value
}
return m.re.String()
return m.reString
}
func isLiteral(re string) bool {
return regexp.QuoteMeta(re) == re
// optimizeAlternatingLiterals optimizes a regex of the form
//
// `literal1|literal2|literal3|...`
//
// this function returns an optimized StringMatcher or nil if the regex
// cannot be optimized in this way, and a list of setMatches up to maxSetMatches.
func optimizeAlternatingLiterals(s string) (StringMatcher, []string) {
if len(s) == 0 {
return emptyStringMatcher{}, nil
}
estimatedAlternates := strings.Count(s, "|") + 1
// If there are no alternates, check if the string is a literal
if estimatedAlternates == 1 {
if regexp.QuoteMeta(s) == s {
return &equalStringMatcher{s: s, caseSensitive: true}, []string{s}
}
return nil, nil
}
multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates)
for end := strings.IndexByte(s, '|'); end > -1; end = strings.IndexByte(s, '|') {
// Split the string into the next literal and the remainder
subMatch := s[:end]
s = s[end+1:]
// break if any of the submatches are not literals
if regexp.QuoteMeta(subMatch) != subMatch {
return nil, nil
}
multiMatcher.add(subMatch)
}
// break if the remainder is not a literal
if regexp.QuoteMeta(s) != s {
return nil, nil
}
multiMatcher.add(s)
return multiMatcher, multiMatcher.setMatches()
}
// optimizeConcatRegex returns literal prefix/suffix text that can be safely
@ -123,3 +398,540 @@ func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) {
return
}
// StringMatcher is a matcher that matches a string in place of a regular expression.
type StringMatcher interface {
Matches(s string) bool
}
// stringMatcherFromRegexp attempts to replace a common regexp with a string matcher.
// It returns nil if the regexp is not supported.
func stringMatcherFromRegexp(re *syntax.Regexp) StringMatcher {
clearBeginEndText(re)
m := stringMatcherFromRegexpInternal(re)
m = optimizeEqualStringMatchers(m, minEqualMultiStringMatcherMapThreshold)
return m
}
func stringMatcherFromRegexpInternal(re *syntax.Regexp) StringMatcher {
clearCapture(re)
switch re.Op {
case syntax.OpBeginText:
// Correctly handling the begin text operator inside a regex is tricky,
// so in this case we fallback to the regex engine.
return nil
case syntax.OpEndText:
// Correctly handling the end text operator inside a regex is tricky,
// so in this case we fallback to the regex engine.
return nil
case syntax.OpPlus:
if re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL {
return nil
}
return &anyNonEmptyStringMatcher{
matchNL: re.Sub[0].Op == syntax.OpAnyChar,
}
case syntax.OpStar:
if re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL {
return nil
}
// If the newline is valid, than this matcher literally match any string (even empty).
if re.Sub[0].Op == syntax.OpAnyChar {
return trueMatcher{}
}
// Any string is fine (including an empty one), as far as it doesn't contain any newline.
return anyStringWithoutNewlineMatcher{}
case syntax.OpQuest:
// Only optimize for ".?".
if len(re.Sub) != 1 || (re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL) {
return nil
}
return &zeroOrOneCharacterStringMatcher{
matchNL: re.Sub[0].Op == syntax.OpAnyChar,
}
case syntax.OpEmptyMatch:
return emptyStringMatcher{}
case syntax.OpLiteral:
return &equalStringMatcher{
s: string(re.Rune),
caseSensitive: !isCaseInsensitive(re),
}
case syntax.OpAlternate:
or := make([]StringMatcher, 0, len(re.Sub))
for _, sub := range re.Sub {
m := stringMatcherFromRegexpInternal(sub)
if m == nil {
return nil
}
or = append(or, m)
}
return orStringMatcher(or)
case syntax.OpConcat:
clearCapture(re.Sub...)
if len(re.Sub) == 0 {
return emptyStringMatcher{}
}
if len(re.Sub) == 1 {
return stringMatcherFromRegexpInternal(re.Sub[0])
}
var left, right StringMatcher
// Let's try to find if there's a first and last any matchers.
if re.Sub[0].Op == syntax.OpPlus || re.Sub[0].Op == syntax.OpStar || re.Sub[0].Op == syntax.OpQuest {
left = stringMatcherFromRegexpInternal(re.Sub[0])
if left == nil {
return nil
}
re.Sub = re.Sub[1:]
}
if re.Sub[len(re.Sub)-1].Op == syntax.OpPlus || re.Sub[len(re.Sub)-1].Op == syntax.OpStar || re.Sub[len(re.Sub)-1].Op == syntax.OpQuest {
right = stringMatcherFromRegexpInternal(re.Sub[len(re.Sub)-1])
if right == nil {
return nil
}
re.Sub = re.Sub[:len(re.Sub)-1]
}
matches, matchesCaseSensitive := findSetMatchesInternal(re, "")
if len(matches) == 0 && len(re.Sub) == 2 {
// We have not find fixed set matches. We look for other known cases that
// we can optimize.
switch {
// Prefix is literal.
case right == nil && re.Sub[0].Op == syntax.OpLiteral:
right = stringMatcherFromRegexpInternal(re.Sub[1])
if right != nil {
matches = []string{string(re.Sub[0].Rune)}
matchesCaseSensitive = !isCaseInsensitive(re.Sub[0])
}
// Suffix is literal.
case left == nil && re.Sub[1].Op == syntax.OpLiteral:
left = stringMatcherFromRegexpInternal(re.Sub[0])
if left != nil {
matches = []string{string(re.Sub[1].Rune)}
matchesCaseSensitive = !isCaseInsensitive(re.Sub[1])
}
}
}
// Ensure we've found some literals to match (optionally with a left and/or right matcher).
// If not, then this optimization doesn't trigger.
if len(matches) == 0 {
return nil
}
// Use the right (and best) matcher based on what we've found.
switch {
// No left and right matchers (only fixed set matches).
case left == nil && right == nil:
// if there's no any matchers on both side it's a concat of literals
or := make([]StringMatcher, 0, len(matches))
for _, match := range matches {
or = append(or, &equalStringMatcher{
s: match,
caseSensitive: matchesCaseSensitive,
})
}
return orStringMatcher(or)
// Right matcher with 1 fixed set match.
case left == nil && len(matches) == 1:
return &literalPrefixStringMatcher{
prefix: matches[0],
prefixCaseSensitive: matchesCaseSensitive,
right: right,
}
// Left matcher with 1 fixed set match.
case right == nil && len(matches) == 1:
return &literalSuffixStringMatcher{
left: left,
suffix: matches[0],
suffixCaseSensitive: matchesCaseSensitive,
}
// We found literals in the middle. We can trigger the fast path only if
// the matches are case sensitive because containsStringMatcher doesn't
// support case insensitive.
case matchesCaseSensitive:
return &containsStringMatcher{
substrings: matches,
left: left,
right: right,
}
}
}
return nil
}
// containsStringMatcher matches a string if it contains any of the substrings.
// If left and right are not nil, it's a contains operation where left and right must match.
// If left is nil, it's a hasPrefix operation and right must match.
// Finally, if right is nil it's a hasSuffix operation and left must match.
type containsStringMatcher struct {
// The matcher that must match the left side. Can be nil.
left StringMatcher
// At least one of these strings must match in the "middle", between left and right matchers.
substrings []string
// The matcher that must match the right side. Can be nil.
right StringMatcher
}
func (m *containsStringMatcher) Matches(s string) bool {
for _, substr := range m.substrings {
switch {
case m.right != nil && m.left != nil:
searchStartPos := 0
for {
pos := strings.Index(s[searchStartPos:], substr)
if pos < 0 {
break
}
// Since we started searching from searchStartPos, we have to add that offset
// to get the actual position of the substring inside the text.
pos += searchStartPos
// If both the left and right matchers match, then we can stop searching because
// we've found a match.
if m.left.Matches(s[:pos]) && m.right.Matches(s[pos+len(substr):]) {
return true
}
// Continue searching for another occurrence of the substring inside the text.
searchStartPos = pos + 1
}
case m.left != nil:
// If we have to check for characters on the left then we need to match a suffix.
if strings.HasSuffix(s, substr) && m.left.Matches(s[:len(s)-len(substr)]) {
return true
}
case m.right != nil:
if strings.HasPrefix(s, substr) && m.right.Matches(s[len(substr):]) {
return true
}
}
}
return false
}
// literalPrefixStringMatcher matches a string with the given literal prefix and right side matcher.
type literalPrefixStringMatcher struct {
prefix string
prefixCaseSensitive bool
// The matcher that must match the right side. Can be nil.
right StringMatcher
}
func (m *literalPrefixStringMatcher) Matches(s string) bool {
// Ensure the prefix matches.
if m.prefixCaseSensitive && !strings.HasPrefix(s, m.prefix) {
return false
}
if !m.prefixCaseSensitive && !hasPrefixCaseInsensitive(s, m.prefix) {
return false
}
// Ensure the right side matches.
return m.right.Matches(s[len(m.prefix):])
}
// literalSuffixStringMatcher matches a string with the given literal suffix and left side matcher.
type literalSuffixStringMatcher struct {
// The matcher that must match the left side. Can be nil.
left StringMatcher
suffix string
suffixCaseSensitive bool
}
func (m *literalSuffixStringMatcher) Matches(s string) bool {
// Ensure the suffix matches.
if m.suffixCaseSensitive && !strings.HasSuffix(s, m.suffix) {
return false
}
if !m.suffixCaseSensitive && !hasSuffixCaseInsensitive(s, m.suffix) {
return false
}
// Ensure the left side matches.
return m.left.Matches(s[:len(s)-len(m.suffix)])
}
// emptyStringMatcher matches an empty string.
type emptyStringMatcher struct{}
func (m emptyStringMatcher) Matches(s string) bool {
return len(s) == 0
}
// orStringMatcher matches any of the sub-matchers.
type orStringMatcher []StringMatcher
func (m orStringMatcher) Matches(s string) bool {
for _, matcher := range m {
if matcher.Matches(s) {
return true
}
}
return false
}
// equalStringMatcher matches a string exactly and support case insensitive.
type equalStringMatcher struct {
s string
caseSensitive bool
}
func (m *equalStringMatcher) Matches(s string) bool {
if m.caseSensitive {
return m.s == s
}
return strings.EqualFold(m.s, s)
}
type multiStringMatcherBuilder interface {
StringMatcher
add(s string)
setMatches() []string
}
func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize int) multiStringMatcherBuilder {
// If the estimated size is low enough, it's faster to use a slice instead of a map.
if estimatedSize < minEqualMultiStringMatcherMapThreshold {
return &equalMultiStringSliceMatcher{caseSensitive: caseSensitive, values: make([]string, 0, estimatedSize)}
}
return &equalMultiStringMapMatcher{
values: make(map[string]struct{}, estimatedSize),
caseSensitive: caseSensitive,
}
}
// equalMultiStringSliceMatcher matches a string exactly against a slice of valid values.
type equalMultiStringSliceMatcher struct {
values []string
caseSensitive bool
}
func (m *equalMultiStringSliceMatcher) add(s string) {
m.values = append(m.values, s)
}
func (m *equalMultiStringSliceMatcher) setMatches() []string {
return m.values
}
func (m *equalMultiStringSliceMatcher) Matches(s string) bool {
if m.caseSensitive {
for _, v := range m.values {
if s == v {
return true
}
}
} else {
for _, v := range m.values {
if strings.EqualFold(s, v) {
return true
}
}
}
return false
}
// equalMultiStringMapMatcher matches a string exactly against a map of valid values.
type equalMultiStringMapMatcher struct {
// values contains values to match a string against. If the matching is case insensitive,
// the values here must be lowercase.
values map[string]struct{}
caseSensitive bool
}
func (m *equalMultiStringMapMatcher) add(s string) {
if !m.caseSensitive {
s = strings.ToLower(s)
}
m.values[s] = struct{}{}
}
func (m *equalMultiStringMapMatcher) setMatches() []string {
if len(m.values) >= maxSetMatches {
return nil
}
matches := make([]string, 0, len(m.values))
for s := range m.values {
matches = append(matches, s)
}
return matches
}
func (m *equalMultiStringMapMatcher) Matches(s string) bool {
if !m.caseSensitive {
s = strings.ToLower(s)
}
_, ok := m.values[s]
return ok
}
// anyStringWithoutNewlineMatcher is a stringMatcher which matches any string
// (including an empty one) as far as it doesn't contain any newline character.
type anyStringWithoutNewlineMatcher struct{}
func (m anyStringWithoutNewlineMatcher) Matches(s string) bool {
// We need to make sure it doesn't contain a newline. Since the newline is
// an ASCII character, we can use strings.IndexByte().
return strings.IndexByte(s, '\n') == -1
}
// anyNonEmptyStringMatcher is a stringMatcher which matches any non-empty string.
type anyNonEmptyStringMatcher struct {
matchNL bool
}
func (m *anyNonEmptyStringMatcher) Matches(s string) bool {
if m.matchNL {
// It's OK if the string contains a newline so we just need to make
// sure it's non-empty.
return len(s) > 0
}
// We need to make sure it non-empty and doesn't contain a newline.
// Since the newline is an ASCII character, we can use strings.IndexByte().
return len(s) > 0 && strings.IndexByte(s, '\n') == -1
}
// zeroOrOneCharacterStringMatcher is a StringMatcher which matches zero or one occurrence
// of any character. The newline character is matches only if matchNL is set to true.
type zeroOrOneCharacterStringMatcher struct {
matchNL bool
}
func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool {
// Zero or one.
if len(s) > 1 {
return false
}
// No need to check for the newline if the string is empty or matching a newline is OK.
if m.matchNL || len(s) == 0 {
return true
}
return s[0] != '\n'
}
// trueMatcher is a stringMatcher which matches any string (always returns true).
type trueMatcher struct{}
func (m trueMatcher) Matches(_ string) bool {
return true
}
// optimizeEqualStringMatchers optimize a specific case where all matchers are made by an
// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher). In
// this specific case, when we have many strings to match against we can use a map instead
// of iterating over the list of strings.
func optimizeEqualStringMatchers(input StringMatcher, threshold int) StringMatcher {
var (
caseSensitive bool
caseSensitiveSet bool
numValues int
)
// Analyse the input StringMatcher to count the number of occurrences
// and ensure all of them have the same case sensitivity.
analyseCallback := func(matcher *equalStringMatcher) bool {
// Ensure we don't have mixed case sensitivity.
if caseSensitiveSet && caseSensitive != matcher.caseSensitive {
return false
} else if !caseSensitiveSet {
caseSensitive = matcher.caseSensitive
caseSensitiveSet = true
}
numValues++
return true
}
if !findEqualStringMatchers(input, analyseCallback) {
return input
}
// If the number of values found is less than the threshold, then we should skip the optimization.
if numValues < threshold {
return input
}
// Parse again the input StringMatcher to extract all values and storing them.
// We can skip the case sensitivity check because we've already checked it and
// if the code reach this point then it means all matchers have the same case sensitivity.
multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues)
// Ignore the return value because we already iterated over the input StringMatcher
// and it was all good.
findEqualStringMatchers(input, func(matcher *equalStringMatcher) bool {
multiMatcher.add(matcher.s)
return true
})
return multiMatcher
}
// findEqualStringMatchers analyze the input StringMatcher and calls the callback for each
// equalStringMatcher found. Returns true if and only if the input StringMatcher is *only*
// composed by an alternation of equalStringMatcher.
func findEqualStringMatchers(input StringMatcher, callback func(matcher *equalStringMatcher) bool) bool {
orInput, ok := input.(orStringMatcher)
if !ok {
return false
}
for _, m := range orInput {
switch casted := m.(type) {
case orStringMatcher:
if !findEqualStringMatchers(m, callback) {
return false
}
case *equalStringMatcher:
if !callback(casted) {
return false
}
default:
// It's not an equal string matcher, so we have to stop searching
// cause this optimization can't be applied.
return false
}
}
return true
}
func hasPrefixCaseInsensitive(s, prefix string) bool {
return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
}
func hasSuffixCaseInsensitive(s, suffix string) bool {
return len(s) >= len(suffix) && strings.EqualFold(s[len(s)-len(suffix):], suffix)
}

File diff suppressed because one or more lines are too long

View file

@ -3212,6 +3212,24 @@ func TestRangeQuery(t *testing.T) {
End: time.Unix(120, 0),
Interval: 1 * time.Minute,
},
{
Name: "drop-metric-name",
Load: `load 30s
requests{job="1", __address__="bar"} 100`,
Query: `requests * 2`,
Result: Matrix{
Series{
Floats: []FPoint{{F: 200, T: 0}, {F: 200, T: 60000}, {F: 200, T: 120000}},
Metric: labels.FromStrings(
"__address__", "bar",
"job", "1",
),
},
},
Start: time.Unix(0, 0),
End: time.Unix(120, 0),
Interval: 1 * time.Minute,
},
}
for _, c := range cases {
t.Run(c.Name, func(t *testing.T) {
@ -3496,7 +3514,39 @@ func TestNativeHistogram_HistogramStdDevVar(t *testing.T) {
},
NegativeBuckets: []int64{1, 0},
},
stdVar: 1544.8582535368798, // actual variance: 1738.4082
stdVar: 1844.4651144196398, // actual variance: 1738.4082
},
{
name: "-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3",
h: &histogram.Histogram{
Count: 10,
ZeroCount: 0,
Sum: -112946,
Schema: 0,
NegativeSpans: []histogram.Span{
{Offset: 2, Length: 3},
{Offset: 1, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 1},
{Offset: 2, Length: 1},
},
NegativeBuckets: []int64{1, 0, 0, 0, 0, 2, -2, 0},
},
stdVar: 759352122.1939945, // actual variance: 882690990
},
{
name: "-10 x10",
h: &histogram.Histogram{
Count: 10,
ZeroCount: 0,
Sum: -100,
Schema: 0,
NegativeSpans: []histogram.Span{
{Offset: 4, Length: 1},
},
NegativeBuckets: []int64{10},
},
stdVar: 1.725830020304794, // actual variance: 0
},
{
name: "-50, -8, 0, 3, 8, 9, 100, NaN",

View file

@ -1154,11 +1154,17 @@ func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *Eval
it := sample.H.AllBucketIterator()
for it.Next() {
bucket := it.At()
if bucket.Count == 0 {
continue
}
var val float64
if bucket.Lower <= 0 && 0 <= bucket.Upper {
val = 0
} else {
val = math.Sqrt(bucket.Upper * bucket.Lower)
if bucket.Upper < 0 {
val = -val
}
}
delta := val - mean
variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)
@ -1187,11 +1193,17 @@ func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *Eval
it := sample.H.AllBucketIterator()
for it.Next() {
bucket := it.At()
if bucket.Count == 0 {
continue
}
var val float64
if bucket.Lower <= 0 && 0 <= bucket.Upper {
val = 0
} else {
val = math.Sqrt(bucket.Upper * bucket.Lower)
if bucket.Upper < 0 {
val = -val
}
}
delta := val - mean
variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)
@ -1429,6 +1441,9 @@ func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annot
}
srcLabels[i-3] = src
}
if !model.LabelName(dst).IsValid() {
panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst))
}
val, ws := ev.eval(args[0])
matrix := val.(Matrix)

View file

@ -3706,7 +3706,31 @@ func TestParseExpressions(t *testing.T) {
if !test.fail {
require.NoError(t, err)
require.Equal(t, test.expected, expr, "error on input '%s'", test.input)
expected := test.expected
// The FastRegexMatcher is not comparable with a deep equal, so only compare its String() version.
if actualVector, ok := expr.(*VectorSelector); ok {
require.IsType(t, &VectorSelector{}, test.expected, "error on input '%s'", test.input)
expectedVector := test.expected.(*VectorSelector)
require.Len(t, actualVector.LabelMatchers, len(expectedVector.LabelMatchers), "error on input '%s'", test.input)
for i := 0; i < len(actualVector.LabelMatchers); i++ {
expectedMatcher := expectedVector.LabelMatchers[i].String()
actualMatcher := actualVector.LabelMatchers[i].String()
require.Equal(t, expectedMatcher, actualMatcher, "unexpected label matcher '%s' on input '%s'", actualMatcher, test.input)
}
// Make a shallow copy of the expected expr (because the test cases are defined in a global variable)
// and then reset the LabelMatcher to not compared them with the following deep equal.
expectedCopy := *expectedVector
expectedCopy.LabelMatchers = nil
expected = &expectedCopy
actualVector.LabelMatchers = nil
}
require.Equal(t, expected, expr, "error on input '%s'", test.input)
} else {
require.Error(t, err)
require.Contains(t, err.Error(), test.errMsg, "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error())

View file

@ -46,6 +46,7 @@ var (
patSpace = regexp.MustCompile("[\t ]+")
patLoad = regexp.MustCompile(`^load\s+(.+?)$`)
patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`)
patEvalRange = regexp.MustCompile(`^eval(?:_(fail))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`)
)
const (
@ -72,7 +73,7 @@ func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage {
}
// RunBuiltinTests runs an acceptance test suite against the provided engine.
func RunBuiltinTests(t *testing.T, engine engineQuerier) {
func RunBuiltinTests(t *testing.T, engine QueryEngine) {
t.Cleanup(func() { parser.EnableExperimentalFunctions = false })
parser.EnableExperimentalFunctions = true
@ -89,11 +90,19 @@ func RunBuiltinTests(t *testing.T, engine engineQuerier) {
}
// RunTest parses and runs the test against the provided engine.
func RunTest(t testutil.T, input string, engine engineQuerier) {
test, err := newTest(t, input)
require.NoError(t, err)
func RunTest(t testutil.T, input string, engine QueryEngine) {
require.NoError(t, runTest(t, input, engine))
}
func runTest(t testutil.T, input string, engine QueryEngine) error {
test, err := newTest(t, input)
// Why do this before checking err? newTest() can create the test storage and then return an error,
// and we want to make sure to clean that up to avoid leaking goroutines.
defer func() {
if test == nil {
return
}
if test.storage != nil {
test.storage.Close()
}
@ -102,13 +111,21 @@ func RunTest(t testutil.T, input string, engine engineQuerier) {
}
}()
if err != nil {
return err
}
for _, cmd := range test.cmds {
if err := test.exec(cmd, engine); err != nil {
// TODO(fabxc): aggregate command errors, yield diffs for result
// comparison errors.
require.NoError(t, test.exec(cmd, engine))
return err
}
}
return nil
}
// test is a sequence of read and write commands that are run
// against a test storage.
type test struct {
@ -137,11 +154,6 @@ func newTest(t testutil.T, input string) (*test, error) {
//go:embed testdata
var testsFs embed.FS
type engineQuerier interface {
NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error)
NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error)
}
func raise(line int, format string, v ...interface{}) error {
return &parser.ParseErr{
LineOffset: line,
@ -188,15 +200,26 @@ func parseSeries(defLine string, line int) (labels.Labels, []parser.SequenceValu
}
func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
if !patEvalInstant.MatchString(lines[i]) {
return i, nil, raise(i, "invalid evaluation command. (eval[_fail|_ordered] instant [at <offset:duration>] <query>")
instantParts := patEvalInstant.FindStringSubmatch(lines[i])
rangeParts := patEvalRange.FindStringSubmatch(lines[i])
if instantParts == nil && rangeParts == nil {
return i, nil, raise(i, "invalid evaluation command. Must be either 'eval[_fail|_ordered] instant [at <offset:duration>] <query>' or 'eval[_fail] range from <from> to <to> step <step> <query>'")
}
parts := patEvalInstant.FindStringSubmatch(lines[i])
var (
mod = parts[1]
at = parts[2]
expr = parts[3]
)
isInstant := instantParts != nil
var mod string
var expr string
if isInstant {
mod = instantParts[1]
expr = instantParts[3]
} else {
mod = rangeParts[1]
expr = rangeParts[5]
}
_, err := parser.ParseExpr(expr)
if err != nil {
parser.EnrichParseError(err, func(parseErr *parser.ParseErr) {
@ -209,15 +232,54 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
return i, nil, err
}
formatErr := func(format string, args ...any) error {
combinedArgs := []any{expr, i + 1}
combinedArgs = append(combinedArgs, args...)
return fmt.Errorf("error in eval %s (line %v): "+format, combinedArgs...)
}
var cmd *evalCmd
if isInstant {
at := instantParts[2]
offset, err := model.ParseDuration(at)
if err != nil {
return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err)
return i, nil, formatErr("invalid timestamp definition %q: %s", at, err)
}
ts := testStartTime.Add(time.Duration(offset))
cmd = newInstantEvalCmd(expr, ts, i+1)
} else {
from := rangeParts[2]
to := rangeParts[3]
step := rangeParts[4]
parsedFrom, err := model.ParseDuration(from)
if err != nil {
return i, nil, formatErr("invalid start timestamp definition %q: %s", from, err)
}
parsedTo, err := model.ParseDuration(to)
if err != nil {
return i, nil, formatErr("invalid end timestamp definition %q: %s", to, err)
}
if parsedTo < parsedFrom {
return i, nil, formatErr("invalid test definition, end timestamp (%s) is before start timestamp (%s)", to, from)
}
parsedStep, err := model.ParseDuration(step)
if err != nil {
return i, nil, formatErr("invalid step definition %q: %s", step, err)
}
cmd = newRangeEvalCmd(expr, testStartTime.Add(time.Duration(parsedFrom)), testStartTime.Add(time.Duration(parsedTo)), time.Duration(parsedStep), i+1)
}
cmd := newEvalCmd(expr, ts, i+1)
switch mod {
case "ordered":
// Ordered results are not supported for range queries, but the regex for range query commands does not allow
// asserting an ordered result, so we don't need to do any error checking here.
cmd.ordered = true
case "fail":
cmd.fail = true
@ -240,8 +302,8 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
}
// Currently, we are not expecting any matrices.
if len(vals) > 1 {
return i, nil, raise(i, "expecting multiple values in instant evaluation not allowed")
if len(vals) > 1 && isInstant {
return i, nil, formatErr("expecting multiple values in instant evaluation not allowed")
}
cmd.expectMetric(j, metric, vals...)
}
@ -375,8 +437,11 @@ func appendSample(a storage.Appender, s Sample, m labels.Labels) error {
type evalCmd struct {
expr string
start time.Time
end time.Time
step time.Duration
line int
isRange bool // if false, instant query
fail, ordered bool
metrics map[uint64]labels.Labels
@ -392,7 +457,7 @@ func (e entry) String() string {
return fmt.Sprintf("%d: %s", e.pos, e.vals)
}
func newEvalCmd(expr string, start time.Time, line int) *evalCmd {
func newInstantEvalCmd(expr string, start time.Time, line int) *evalCmd {
return &evalCmd{
expr: expr,
start: start,
@ -403,6 +468,20 @@ func newEvalCmd(expr string, start time.Time, line int) *evalCmd {
}
}
func newRangeEvalCmd(expr string, start, end time.Time, step time.Duration, line int) *evalCmd {
return &evalCmd{
expr: expr,
start: start,
end: end,
step: step,
line: line,
isRange: true,
metrics: map[uint64]labels.Labels{},
expected: map[uint64]entry{},
}
}
func (ev *evalCmd) String() string {
return "eval"
}
@ -425,7 +504,77 @@ func (ev *evalCmd) expectMetric(pos int, m labels.Labels, vals ...parser.Sequenc
func (ev *evalCmd) compareResult(result parser.Value) error {
switch val := result.(type) {
case Matrix:
return errors.New("received range result on instant evaluation")
if ev.ordered {
return fmt.Errorf("expected ordered result, but query returned a matrix")
}
if err := assertMatrixSorted(val); err != nil {
return err
}
seen := map[uint64]bool{}
for _, s := range val {
hash := s.Metric.Hash()
if _, ok := ev.metrics[hash]; !ok {
return fmt.Errorf("unexpected metric %s in result", s.Metric)
}
seen[hash] = true
exp := ev.expected[hash]
var expectedFloats []FPoint
var expectedHistograms []HPoint
for i, e := range exp.vals {
ts := ev.start.Add(time.Duration(i) * ev.step)
if ts.After(ev.end) {
return fmt.Errorf("expected %v points for %s, but query time range cannot return this many points", len(exp.vals), ev.metrics[hash])
}
t := ts.UnixNano() / int64(time.Millisecond/time.Nanosecond)
if e.Histogram != nil {
expectedHistograms = append(expectedHistograms, HPoint{T: t, H: e.Histogram})
} else if !e.Omitted {
expectedFloats = append(expectedFloats, FPoint{T: t, F: e.Value})
}
}
if len(expectedFloats) != len(s.Floats) || len(expectedHistograms) != len(s.Histograms) {
return fmt.Errorf("expected %v float points and %v histogram points for %s, but got %s", len(expectedFloats), len(expectedHistograms), ev.metrics[hash], formatSeriesResult(s))
}
for i, expected := range expectedFloats {
actual := s.Floats[i]
if expected.T != actual.T {
return fmt.Errorf("expected float value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s))
}
if !almostEqual(actual.F, expected.F, defaultEpsilon) {
return fmt.Errorf("expected float value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.F, actual.F, formatSeriesResult(s))
}
}
for i, expected := range expectedHistograms {
actual := s.Histograms[i]
if expected.T != actual.T {
return fmt.Errorf("expected histogram value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s))
}
if !actual.H.Equals(expected.H) {
return fmt.Errorf("expected histogram value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.H, actual.H, formatSeriesResult(s))
}
}
}
for hash := range ev.expected {
if !seen[hash] {
return fmt.Errorf("expected metric %s not found", ev.metrics[hash])
}
}
case Vector:
seen := map[uint64]bool{}
@ -440,7 +589,13 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
}
exp0 := exp.vals[0]
expH := exp0.Histogram
if (expH == nil) != (v.H == nil) || (expH != nil && !expH.Equals(v.H)) {
if expH == nil && v.H != nil {
return fmt.Errorf("expected float value %v for %s but got histogram %s", exp0, v.Metric, HistogramTestExpression(v.H))
}
if expH != nil && v.H == nil {
return fmt.Errorf("expected histogram %s for %s but got float value %v", HistogramTestExpression(expH), v.Metric, v.F)
}
if expH != nil && !expH.Equals(v.H) {
return fmt.Errorf("expected %v for %s but got %s", HistogramTestExpression(expH), v.Metric, HistogramTestExpression(v.H))
}
if !almostEqual(exp0.Value, v.F, defaultEpsilon) {
@ -477,6 +632,21 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
return nil
}
func formatSeriesResult(s Series) string {
floatPlural := "s"
histogramPlural := "s"
if len(s.Floats) == 1 {
floatPlural = ""
}
if len(s.Histograms) == 1 {
histogramPlural = ""
}
return fmt.Sprintf("%v float point%s %v and %v histogram point%s %v", len(s.Floats), floatPlural, s.Floats, len(s.Histograms), histogramPlural, s.Histograms)
}
// HistogramTestExpression returns TestExpression() for the given histogram or "" if the histogram is nil.
func HistogramTestExpression(h *histogram.FloatHistogram) string {
if h != nil {
@ -561,7 +731,7 @@ func atModifierTestCases(exprStr string, evalTime time.Time) ([]atModifierTestCa
}
// exec processes a single step of the test.
func (t *test) exec(tc testCommand, engine engineQuerier) error {
func (t *test) exec(tc testCommand, engine QueryEngine) error {
switch cmd := tc.(type) {
case *clearCmd:
t.clear()
@ -578,6 +748,48 @@ func (t *test) exec(tc testCommand, engine engineQuerier) error {
}
case *evalCmd:
return t.execEval(cmd, engine)
default:
panic("promql.Test.exec: unknown test command type")
}
return nil
}
func (t *test) execEval(cmd *evalCmd, engine QueryEngine) error {
if cmd.isRange {
return t.execRangeEval(cmd, engine)
}
return t.execInstantEval(cmd, engine)
}
func (t *test) execRangeEval(cmd *evalCmd, engine QueryEngine) error {
q, err := engine.NewRangeQuery(t.context, t.storage, nil, cmd.expr, cmd.start, cmd.end, cmd.step)
if err != nil {
return err
}
res := q.Exec(t.context)
if res.Err != nil {
if cmd.fail {
return nil
}
return fmt.Errorf("error evaluating query %q (line %d): %w", cmd.expr, cmd.line, res.Err)
}
if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
}
defer q.Close()
if err := cmd.compareResult(res.Value); err != nil {
return fmt.Errorf("error in %s %s (line %d): %w", cmd, cmd.expr, cmd.line, err)
}
return nil
}
func (t *test) execInstantEval(cmd *evalCmd, engine QueryEngine) error {
queries, err := atModifierTestCases(cmd.expr, cmd.start)
if err != nil {
return err
@ -616,10 +828,14 @@ func (t *test) exec(tc testCommand, engine engineQuerier) error {
}
defer q.Close()
if cmd.ordered {
// Ordering isn't defined for range queries.
// Range queries are always sorted by labels, so skip this test case that expects results in a particular order.
continue
}
mat := rangeRes.Value.(Matrix)
if err := assertMatrixSorted(mat); err != nil {
return err
}
vec := make(Vector, 0, len(mat))
for _, series := range mat {
// We expect either Floats or Histograms.
@ -644,12 +860,25 @@ func (t *test) exec(tc testCommand, engine engineQuerier) error {
if err != nil {
return fmt.Errorf("error in %s %s (line %d) range mode: %w", cmd, iq.expr, cmd.line, err)
}
}
default:
panic("promql.Test.exec: unknown test command type")
return nil
}
func assertMatrixSorted(m Matrix) error {
if len(m) <= 1 {
return nil
}
for i, s := range m[:len(m)-1] {
nextIndex := i + 1
nextMetric := m[nextIndex].Metric
if labels.Compare(s.Metric, nextMetric) > 0 {
return fmt.Errorf("matrix results should always be sorted by labels, but matrix is not sorted: series at index %v with labels %s sorts before series at index %v with labels %s", nextIndex, nextMetric, i, s.Metric)
}
}
return nil
}
@ -704,8 +933,6 @@ func parseNumber(s string) (float64, error) {
// LazyLoader lazily loads samples into storage.
// This is specifically implemented for unit testing of rules.
type LazyLoader struct {
testutil.T
loadCmd *loadCmd
storage storage.Storage
@ -727,13 +954,15 @@ type LazyLoaderOpts struct {
}
// NewLazyLoader returns an initialized empty LazyLoader.
func NewLazyLoader(t testutil.T, input string, opts LazyLoaderOpts) (*LazyLoader, error) {
func NewLazyLoader(input string, opts LazyLoaderOpts) (*LazyLoader, error) {
ll := &LazyLoader{
T: t,
opts: opts,
}
err := ll.parse(input)
ll.clear()
if err != nil {
return nil, err
}
err = ll.clear()
return ll, err
}
@ -761,15 +990,20 @@ func (ll *LazyLoader) parse(input string) error {
}
// clear the current test storage of all inserted samples.
func (ll *LazyLoader) clear() {
func (ll *LazyLoader) clear() error {
if ll.storage != nil {
err := ll.storage.Close()
require.NoError(ll.T, err, "Unexpected error while closing test storage.")
if err := ll.storage.Close(); err != nil {
return fmt.Errorf("closing test storage: %w", err)
}
}
if ll.cancelCtx != nil {
ll.cancelCtx()
}
ll.storage = teststorage.New(ll)
var err error
ll.storage, err = teststorage.NewWithError()
if err != nil {
return err
}
opts := EngineOpts{
Logger: nil,
@ -783,6 +1017,7 @@ func (ll *LazyLoader) clear() {
ll.queryEngine = NewEngine(opts)
ll.context, ll.cancelCtx = context.WithCancel(context.Background())
return nil
}
// appendTill appends the defined time series to the storage till the given timestamp (in milliseconds).
@ -836,8 +1071,7 @@ func (ll *LazyLoader) Storage() storage.Storage {
}
// Close closes resources associated with the LazyLoader.
func (ll *LazyLoader) Close() {
func (ll *LazyLoader) Close() error {
ll.cancelCtx()
err := ll.storage.Close()
require.NoError(ll.T, err, "Unexpected error while closing test storage.")
return ll.storage.Close()
}

View file

@ -110,7 +110,7 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
}
for _, c := range cases {
suite, err := NewLazyLoader(t, c.loadString, LazyLoaderOpts{})
suite, err := NewLazyLoader(c.loadString, LazyLoaderOpts{})
require.NoError(t, err)
defer suite.Close()
@ -156,3 +156,354 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
}
}
}
func TestRunTest(t *testing.T) {
testData := `
load 5m
http_requests{job="api-server", instance="0", group="production"} 0+10x10
http_requests{job="api-server", instance="1", group="production"} 0+20x10
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
`
testCases := map[string]struct {
input string
expectedError string
}{
"instant query with expected float result": {
input: testData + `
eval instant at 5m sum by (group) (http_requests)
{group="production"} 30
{group="canary"} 70
`,
},
"instant query with unexpected float result": {
input: testData + `
eval instant at 5m sum by (group) (http_requests)
{group="production"} 30
{group="canary"} 80
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected 80 for {group="canary"} but got 70`,
},
"instant query with expected histogram result": {
input: `
load 5m
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}}
eval instant at 0 testmetric
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}}
`,
},
"instant query with unexpected histogram result": {
input: `
load 5m
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}}
eval instant at 0 testmetric
testmetric {{schema:-1 sum:6 count:1 buckets:[1] offset:1}}
`,
expectedError: `error in eval testmetric (line 5): expected {{schema:-1 count:1 sum:6 offset:1 buckets:[1]}} for {__name__="testmetric"} but got {{schema:-1 count:1 sum:4 offset:1 buckets:[1]}}`,
},
"instant query with float value returned when histogram expected": {
input: `
load 5m
testmetric 2
eval instant at 0 testmetric
testmetric {{}}
`,
expectedError: `error in eval testmetric (line 5): expected histogram {{}} for {__name__="testmetric"} but got float value 2`,
},
"instant query with histogram returned when float expected": {
input: `
load 5m
testmetric {{}}
eval instant at 0 testmetric
testmetric 2
`,
expectedError: `error in eval testmetric (line 5): expected float value 2.000000 for {__name__="testmetric"} but got histogram {{}}`,
},
"instant query, but result has an unexpected series": {
input: testData + `
eval instant at 5m sum by (group) (http_requests)
{group="production"} 30
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): unexpected metric {group="canary"} in result`,
},
"instant query, but result is missing a series": {
input: testData + `
eval instant at 5m sum by (group) (http_requests)
{group="production"} 30
{group="canary"} 70
{group="test"} 100
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected metric {group="test"} with 3: [100.000000] not found`,
},
"instant query expected to fail, and query fails": {
input: `
load 5m
testmetric1{src="a",dst="b"} 0
testmetric2{src="a",dst="b"} 1
eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
`,
},
"instant query expected to fail, but query succeeds": {
input: `eval_fail instant at 0s vector(0)`,
expectedError: `expected error evaluating query "vector(0)" (line 1) but got none`,
},
"instant query with results expected to match provided order, and result is in expected order": {
input: testData + `
eval_ordered instant at 50m sort(http_requests)
http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="canary", instance="0", job="api-server"} 300
http_requests{group="canary", instance="1", job="api-server"} 400
`,
},
"instant query with results expected to match provided order, but result is out of order": {
input: testData + `
eval_ordered instant at 50m sort(http_requests)
http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="canary", instance="1", job="api-server"} 400
http_requests{group="canary", instance="0", job="api-server"} 300
`,
expectedError: `error in eval sort(http_requests) (line 8): expected metric {__name__="http_requests", group="canary", instance="0", job="api-server"} with [300.000000] at position 4 but was at 3`,
},
"instant query with results expected to match provided order, but result has an unexpected series": {
input: testData + `
eval_ordered instant at 50m sort(http_requests)
http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="canary", instance="0", job="api-server"} 300
`,
expectedError: `error in eval sort(http_requests) (line 8): unexpected metric {__name__="http_requests", group="canary", instance="1", job="api-server"} in result`,
},
"instant query with invalid timestamp": {
input: `eval instant at abc123 vector(0)`,
expectedError: `error in eval vector(0) (line 1): invalid timestamp definition "abc123": not a valid duration string: "abc123"`,
},
"range query with expected result": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60
{group="canary"} 0 70 140
`,
},
"range query with unexpected float value": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60
{group="canary"} 0 80 140
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected float value at index 1 (t=300000) for {group="canary"} to be 80, but got 70 (result has 3 float points [0 @[0] 70 @[300000] 140 @[600000]] and 0 histogram points [])`,
},
"range query with expected histogram values": {
input: `
load 5m
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}} {{schema:-1 sum:5 count:1 buckets:[1] offset:1}} {{schema:-1 sum:6 count:1 buckets:[1] offset:1}}
eval range from 0 to 10m step 5m testmetric
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}} {{schema:-1 sum:5 count:1 buckets:[1] offset:1}} {{schema:-1 sum:6 count:1 buckets:[1] offset:1}}
`,
},
"range query with unexpected histogram value": {
input: `
load 5m
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}} {{schema:-1 sum:5 count:1 buckets:[1] offset:1}} {{schema:-1 sum:6 count:1 buckets:[1] offset:1}}
eval range from 0 to 10m step 5m testmetric
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}} {{schema:-1 sum:7 count:1 buckets:[1] offset:1}} {{schema:-1 sum:8 count:1 buckets:[1] offset:1}}
`,
expectedError: `error in eval testmetric (line 5): expected histogram value at index 1 (t=300000) for {__name__="testmetric"} to be {count:1, sum:7, (1,4]:1}, but got {count:1, sum:5, (1,4]:1} (result has 0 float points [] and 3 histogram points [{count:1, sum:4, (1,4]:1} @[0] {count:1, sum:5, (1,4]:1} @[300000] {count:1, sum:6, (1,4]:1} @[600000]])`,
},
"range query with too many points for query time range": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60 90
{group="canary"} 0 70 140
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected 4 points for {group="production"}, but query time range cannot return this many points`,
},
"range query with missing point in result": {
input: `
load 5m
testmetric 5
eval range from 0 to 6m step 6m testmetric
testmetric 5 10
`,
expectedError: `error in eval testmetric (line 5): expected 2 float points and 0 histogram points for {__name__="testmetric"}, but got 1 float point [5 @[0]] and 0 histogram points []`,
},
"range query with extra point in result": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30
{group="canary"} 0 70 140
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected 2 float points and 0 histogram points for {group="production"}, but got 3 float points [0 @[0] 30 @[300000] 60 @[600000]] and 0 histogram points []`,
},
"range query, but result has an unexpected series": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): unexpected metric {group="canary"} in result`,
},
"range query, but result is missing a series": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60
{group="canary"} 0 70 140
{group="test"} 0 100 200
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected metric {group="test"} not found`,
},
"range query expected to fail, and query fails": {
input: `
load 5m
testmetric1{src="a",dst="b"} 0
testmetric2{src="a",dst="b"} 1
eval_fail range from 0 to 10m step 5m ceil({__name__=~'testmetric1|testmetric2'})
`,
},
"range query expected to fail, but query succeeds": {
input: `eval_fail range from 0 to 10m step 5m vector(0)`,
expectedError: `expected error evaluating query "vector(0)" (line 1) but got none`,
},
"range query with from and to timestamps in wrong order": {
input: `eval range from 10m to 9m step 5m vector(0)`,
expectedError: `error in eval vector(0) (line 1): invalid test definition, end timestamp (9m) is before start timestamp (10m)`,
},
"range query with sparse output": {
input: `
load 6m
testmetric 1 _ 3
eval range from 0 to 18m step 6m testmetric
testmetric 1 _ 3
`,
},
"range query with float value returned when no value expected": {
input: `
load 6m
testmetric 1 2 3
eval range from 0 to 18m step 6m testmetric
testmetric 1 _ 3
`,
expectedError: `error in eval testmetric (line 5): expected 2 float points and 0 histogram points for {__name__="testmetric"}, but got 3 float points [1 @[0] 2 @[360000] 3 @[720000]] and 0 histogram points []`,
},
"range query with float value returned when histogram expected": {
input: `
load 5m
testmetric 2 3
eval range from 0 to 5m step 5m testmetric
testmetric {{}} {{}}
`,
expectedError: `error in eval testmetric (line 5): expected 0 float points and 2 histogram points for {__name__="testmetric"}, but got 2 float points [2 @[0] 3 @[300000]] and 0 histogram points []`,
},
"range query with histogram returned when float expected": {
input: `
load 5m
testmetric {{}} {{}}
eval range from 0 to 5m step 5m testmetric
testmetric 2 3
`,
expectedError: `error in eval testmetric (line 5): expected 2 float points and 0 histogram points for {__name__="testmetric"}, but got 0 float points [] and 2 histogram points [{count:0, sum:0} @[0] {count:0, sum:0} @[300000]]`,
},
"range query with expected mixed results": {
input: `
load 6m
testmetric{group="a"} {{}} _ _
testmetric{group="b"} _ _ 3
eval range from 0 to 12m step 6m sum(testmetric)
{} {{}} _ 3
`,
},
"range query with mixed results and incorrect values": {
input: `
load 5m
testmetric 3 {{}}
eval range from 0 to 5m step 5m testmetric
testmetric {{}} 3
`,
expectedError: `error in eval testmetric (line 5): expected float value at index 0 for {__name__="testmetric"} to have timestamp 300000, but it had timestamp 0 (result has 1 float point [3 @[0]] and 1 histogram point [{count:0, sum:0} @[300000]])`,
},
}
for name, testCase := range testCases {
t.Run(name, func(t *testing.T) {
err := runTest(t, testCase.input, newTestEngine())
if testCase.expectedError == "" {
require.NoError(t, err)
} else {
require.EqualError(t, err, testCase.expectedError)
}
})
}
}
func TestAssertMatrixSorted(t *testing.T) {
testCases := map[string]struct {
matrix Matrix
expectedError string
}{
"empty matrix": {
matrix: Matrix{},
},
"matrix with one series": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_1")},
},
},
"matrix with two series, series in sorted order": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_1")},
Series{Metric: labels.FromStrings("the_label", "value_2")},
},
},
"matrix with two series, series in reverse order": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_2")},
Series{Metric: labels.FromStrings("the_label", "value_1")},
},
expectedError: `matrix results should always be sorted by labels, but matrix is not sorted: series at index 1 with labels {the_label="value_1"} sorts before series at index 0 with labels {the_label="value_2"}`,
},
"matrix with three series, series in sorted order": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_1")},
Series{Metric: labels.FromStrings("the_label", "value_2")},
Series{Metric: labels.FromStrings("the_label", "value_3")},
},
},
"matrix with three series, series not in sorted order": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_1")},
Series{Metric: labels.FromStrings("the_label", "value_3")},
Series{Metric: labels.FromStrings("the_label", "value_2")},
},
expectedError: `matrix results should always be sorted by labels, but matrix is not sorted: series at index 2 with labels {the_label="value_2"} sorts before series at index 1 with labels {the_label="value_3"}`,
},
}
for name, testCase := range testCases {
t.Run(name, func(t *testing.T) {
err := assertMatrixSorted(testCase.matrix)
if testCase.expectedError == "" {
require.NoError(t, err)
} else {
require.EqualError(t, err, testCase.expectedError)
}
})
}
}

View file

@ -427,6 +427,7 @@ func (sp *scrapePool) sync(targets []*Target) {
bodySizeLimit = int64(sp.config.BodySizeLimit)
sampleLimit = int(sp.config.SampleLimit)
bucketLimit = int(sp.config.NativeHistogramBucketLimit)
maxSchema = pickSchema(sp.config.NativeHistogramMinBucketFactor)
labelLimits = &labelLimits{
labelLimit: int(sp.config.LabelLimit),
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
@ -464,6 +465,7 @@ func (sp *scrapePool) sync(targets []*Target) {
scraper: s,
sampleLimit: sampleLimit,
bucketLimit: bucketLimit,
maxSchema: maxSchema,
labelLimits: labelLimits,
honorLabels: honorLabels,
honorTimestamps: honorTimestamps,

View file

@ -41,6 +41,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
@ -3631,3 +3632,137 @@ func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) {
value := metric.GetCounter().GetValue()
require.Equal(t, 4.0, value)
}
// This tests running a full scrape loop and checking that the scrape option
// `native_histogram_min_bucket_factor` is used correctly.
func TestNativeHistogramMaxSchemaSet(t *testing.T) {
testcases := map[string]struct {
minBucketFactor string
expectedSchema int32
}{
"min factor not specified": {
minBucketFactor: "",
expectedSchema: 3, // Factor 1.09.
},
"min factor 1": {
minBucketFactor: "native_histogram_min_bucket_factor: 1",
expectedSchema: 3, // Factor 1.09.
},
"min factor 2": {
minBucketFactor: "native_histogram_min_bucket_factor: 2",
expectedSchema: 0, // Factor 2.00.
},
}
for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
testNativeHistogramMaxSchemaSet(t, tc.minBucketFactor, tc.expectedSchema)
})
}
}
func testNativeHistogramMaxSchemaSet(t *testing.T, minBucketFactor string, expectedSchema int32) {
// Create a ProtoBuf message to serve as a Prometheus metric.
nativeHistogram := prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "testing",
Name: "example_native_histogram",
Help: "This is used for testing",
NativeHistogramBucketFactor: 1.1,
NativeHistogramMaxBucketNumber: 100,
},
)
registry := prometheus.NewRegistry()
registry.Register(nativeHistogram)
nativeHistogram.Observe(1.0)
nativeHistogram.Observe(1.0)
nativeHistogram.Observe(1.0)
nativeHistogram.Observe(10.0) // in different bucket since > 1*1.1.
nativeHistogram.Observe(10.0)
gathered, err := registry.Gather()
require.NoError(t, err)
require.NotEmpty(t, gathered)
histogramMetricFamily := gathered[0]
buffer := protoMarshalDelimited(t, histogramMetricFamily)
// Create a HTTP server to serve /metrics via ProtoBuf
metricsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`)
w.Write(buffer)
}))
defer metricsServer.Close()
// Create a scrape loop with the HTTP server as the target.
configStr := fmt.Sprintf(`
global:
scrape_interval: 1s
scrape_timeout: 1s
scrape_configs:
- job_name: test
%s
static_configs:
- targets: [%s]
`, minBucketFactor, strings.ReplaceAll(metricsServer.URL, "http://", ""))
s := teststorage.New(t)
defer s.Close()
s.DB.EnableNativeHistograms()
reg := prometheus.NewRegistry()
mng, err := NewManager(nil, nil, s, reg)
require.NoError(t, err)
cfg, err := config.Load(configStr, false, log.NewNopLogger())
require.NoError(t, err)
mng.ApplyConfig(cfg)
tsets := make(chan map[string][]*targetgroup.Group)
go func() {
err = mng.Run(tsets)
require.NoError(t, err)
}()
defer mng.Stop()
// Get the static targets and apply them to the scrape manager.
require.Len(t, cfg.ScrapeConfigs, 1)
scrapeCfg := cfg.ScrapeConfigs[0]
require.Len(t, scrapeCfg.ServiceDiscoveryConfigs, 1)
staticDiscovery, ok := scrapeCfg.ServiceDiscoveryConfigs[0].(discovery.StaticConfig)
require.True(t, ok)
require.Len(t, staticDiscovery, 1)
tsets <- map[string][]*targetgroup.Group{"test": staticDiscovery}
// Wait for the scrape loop to scrape the target.
require.Eventually(t, func() bool {
q, err := s.Querier(0, math.MaxInt64)
require.NoError(t, err)
seriesS := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "__name__", "testing_example_native_histogram"))
countSeries := 0
for seriesS.Next() {
countSeries++
}
return countSeries > 0
}, 15*time.Second, 100*time.Millisecond)
// Check that native histogram schema is as expected.
q, err := s.Querier(0, math.MaxInt64)
require.NoError(t, err)
seriesS := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "__name__", "testing_example_native_histogram"))
histogramSamples := []*histogram.Histogram{}
for seriesS.Next() {
series := seriesS.At()
it := series.Iterator(nil)
for vt := it.Next(); vt != chunkenc.ValNone; vt = it.Next() {
if vt != chunkenc.ValHistogram {
// don't care about other samples
continue
}
_, h := it.AtHistogram(nil)
histogramSamples = append(histogramSamples, h)
}
}
require.NoError(t, seriesS.Err())
require.NotEmpty(t, histogramSamples)
for _, h := range histogramSamples {
require.Equal(t, expectedSchema, h.Schema)
}
}

View file

@ -529,9 +529,10 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue
if err := head.Init(maxBlockTime); err != nil {
return nil, fmt.Errorf("read WAL: %w", err)
}
// Set the wal to nil to disable all wal operations.
// Set the wal and the wbl to nil to disable related operations.
// This is mainly to avoid blocking when closing the head.
head.wal = nil
head.wbl = nil
}
db.closers = append(db.closers, head)
@ -1608,7 +1609,7 @@ func BeyondTimeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc
deletable = make(map[ulid.ULID]struct{})
for i, block := range blocks {
// The difference between the first block and this block is larger than
// The difference between the first block and this block is greater than or equal to
// the retention period so any blocks after that are added as deletable.
if i > 0 && blocks[0].Meta().MaxTime-block.Meta().MaxTime >= db.opts.RetentionDuration {
for _, b := range blocks[i:] {

View file

@ -681,34 +681,6 @@ func TestDB_Snapshot(t *testing.T) {
require.Equal(t, 1000.0, sum)
}
func TestDB_BeyondTimeRetention(t *testing.T) {
opts := DefaultOptions()
opts.RetentionDuration = 100
db := openTestDB(t, opts, nil)
defer func() {
require.NoError(t, db.Close())
}()
// We have 4 blocks, 3 of which are beyond the retention duration.
metas := []BlockMeta{
{MinTime: 300, MaxTime: 500},
{MinTime: 200, MaxTime: 300},
{MinTime: 100, MaxTime: 200},
{MinTime: 0, MaxTime: 100},
}
for _, m := range metas {
createBlock(t, db.Dir(), genSeries(1, 1, m.MinTime, m.MaxTime))
}
// Reloading should truncate the 3 blocks which are >= the retention period.
require.NoError(t, db.reloadBlocks())
blocks := db.Blocks()
require.Len(t, blocks, 1)
require.Equal(t, metas[0].MinTime, blocks[0].Meta().MinTime)
require.Equal(t, metas[0].MaxTime, blocks[0].Meta().MaxTime)
}
// TestDB_Snapshot_ChunksOutsideOfCompactedRange ensures that a snapshot removes chunks samples
// that are outside the set block time range.
// See https://github.com/prometheus/prometheus/issues/5105
@ -1491,34 +1463,66 @@ func (*mockCompactorFailing) CompactOOO(string, *OOOCompactionHead) (result []ul
}
func TestTimeRetention(t *testing.T) {
testCases := []struct {
name string
blocks []*BlockMeta
expBlocks []*BlockMeta
retentionDuration int64
}{
{
name: "Block max time delta greater than retention duration",
blocks: []*BlockMeta{
{MinTime: 500, MaxTime: 900}, // Oldest block, beyond retention
{MinTime: 1000, MaxTime: 1500},
{MinTime: 1500, MaxTime: 2000}, // Newest block
},
expBlocks: []*BlockMeta{
{MinTime: 1000, MaxTime: 1500},
{MinTime: 1500, MaxTime: 2000},
},
retentionDuration: 1000,
},
{
name: "Block max time delta equal to retention duration",
blocks: []*BlockMeta{
{MinTime: 500, MaxTime: 900}, // Oldest block
{MinTime: 1000, MaxTime: 1500}, // Coinciding exactly with the retention duration.
{MinTime: 1500, MaxTime: 2000}, // Newest block
},
expBlocks: []*BlockMeta{
{MinTime: 1500, MaxTime: 2000},
},
retentionDuration: 500,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
db := openTestDB(t, nil, []int64{1000})
defer func() {
require.NoError(t, db.Close())
}()
blocks := []*BlockMeta{
{MinTime: 500, MaxTime: 900}, // Oldest block
{MinTime: 1000, MaxTime: 1500},
{MinTime: 1500, MaxTime: 2000}, // Newest Block
}
for _, m := range blocks {
for _, m := range tc.blocks {
createBlock(t, db.Dir(), genSeries(10, 10, m.MinTime, m.MaxTime))
}
require.NoError(t, db.reloadBlocks()) // Reload the db to register the new blocks.
require.Equal(t, len(blocks), len(db.Blocks())) // Ensure all blocks are registered.
require.Len(t, db.Blocks(), len(tc.blocks)) // Ensure all blocks are registered.
db.opts.RetentionDuration = blocks[2].MaxTime - blocks[1].MinTime
db.opts.RetentionDuration = tc.retentionDuration
// Reloading should truncate the blocks which are >= the retention duration vs the first block.
require.NoError(t, db.reloadBlocks())
expBlocks := blocks[1:]
actBlocks := db.Blocks()
require.Equal(t, 1, int(prom_testutil.ToFloat64(db.metrics.timeRetentionCount)), "metric retention count mismatch")
require.Equal(t, len(expBlocks), len(actBlocks))
require.Equal(t, expBlocks[0].MaxTime, actBlocks[0].meta.MaxTime)
require.Equal(t, expBlocks[len(expBlocks)-1].MaxTime, actBlocks[len(actBlocks)-1].meta.MaxTime)
require.Len(t, actBlocks, len(tc.expBlocks))
for i, eb := range tc.expBlocks {
require.Equal(t, eb.MinTime, actBlocks[i].meta.MinTime)
require.Equal(t, eb.MaxTime, actBlocks[i].meta.MaxTime)
}
})
}
}
func TestRetentionDurationMetric(t *testing.T) {
@ -1994,6 +1998,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
// Should be set to init values if no WAL or blocks exist so far.
require.Equal(t, int64(math.MaxInt64), db.head.MinTime())
require.Equal(t, int64(math.MinInt64), db.head.MaxTime())
require.False(t, db.head.initialized())
// First added sample initializes the writable range.
ctx := context.Background()
@ -2003,6 +2008,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
require.Equal(t, int64(1000), db.head.MinTime())
require.Equal(t, int64(1000), db.head.MaxTime())
require.True(t, db.head.initialized())
})
t.Run("wal-only", func(t *testing.T) {
dir := t.TempDir()
@ -2031,6 +2037,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
require.Equal(t, int64(5000), db.head.MinTime())
require.Equal(t, int64(15000), db.head.MaxTime())
require.True(t, db.head.initialized())
})
t.Run("existing-block", func(t *testing.T) {
dir := t.TempDir()
@ -2043,6 +2050,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
require.Equal(t, int64(2000), db.head.MinTime())
require.Equal(t, int64(2000), db.head.MaxTime())
require.True(t, db.head.initialized())
})
t.Run("existing-block-and-wal", func(t *testing.T) {
dir := t.TempDir()
@ -2075,6 +2083,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
require.Equal(t, int64(6000), db.head.MinTime())
require.Equal(t, int64(15000), db.head.MaxTime())
require.True(t, db.head.initialized())
// Check that old series has been GCed.
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.series))
})

View file

@ -620,6 +620,7 @@ func (h *Head) Init(minValidTime int64) error {
refSeries := make(map[chunks.HeadSeriesRef]*memSeries)
snapshotLoaded := false
var chunkSnapshotLoadDuration time.Duration
if h.opts.EnableMemorySnapshotOnShutdown {
level.Info(h.logger).Log("msg", "Chunk snapshot is enabled, replaying from the snapshot")
// If there are any WAL files, there should be at least one WAL file with an index that is current or newer
@ -650,7 +651,8 @@ func (h *Head) Init(minValidTime int64) error {
snapIdx, snapOffset, refSeries, err = h.loadChunkSnapshot()
if err == nil {
snapshotLoaded = true
level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", time.Since(start).String())
chunkSnapshotLoadDuration = time.Since(start)
level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", chunkSnapshotLoadDuration.String())
}
if err != nil {
snapIdx, snapOffset = -1, 0
@ -672,6 +674,8 @@ func (h *Head) Init(minValidTime int64) error {
oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk
lastMmapRef chunks.ChunkDiskMapperRef
err error
mmapChunkReplayDuration time.Duration
)
if snapshotLoaded || h.wal != nil {
// If snapshot was not loaded and if there is no WAL, then m-map chunks will be discarded
@ -695,7 +699,8 @@ func (h *Head) Init(minValidTime int64) error {
return err
}
}
level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", time.Since(mmapChunkReplayStart).String())
mmapChunkReplayDuration = time.Since(mmapChunkReplayStart)
level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", mmapChunkReplayDuration.String())
}
if h.wal == nil {
@ -817,6 +822,8 @@ func (h *Head) Init(minValidTime int64) error {
"checkpoint_replay_duration", checkpointReplayDuration.String(),
"wal_replay_duration", walReplayDuration.String(),
"wbl_replay_duration", wblReplayDuration.String(),
"chunk_snapshot_load_duration", chunkSnapshotLoadDuration.String(),
"mmap_chunk_replay_duration", mmapChunkReplayDuration.String(),
"total_replay_duration", totalReplayDuration.String(),
)
@ -1074,11 +1081,11 @@ func (h *Head) SetMinValidTime(minValidTime int64) {
// Truncate removes old data before mint from the head and WAL.
func (h *Head) Truncate(mint int64) (err error) {
initialize := h.MinTime() == math.MaxInt64
initialized := h.initialized()
if err := h.truncateMemory(mint); err != nil {
return err
}
if initialize {
if !initialized {
return nil
}
return h.truncateWAL(mint)
@ -1100,9 +1107,9 @@ func (h *Head) truncateMemory(mint int64) (err error) {
}
}()
initialize := h.MinTime() == math.MaxInt64
initialized := h.initialized()
if h.MinTime() >= mint && !initialize {
if h.MinTime() >= mint && initialized {
return nil
}
@ -1113,7 +1120,7 @@ func (h *Head) truncateMemory(mint int64) (err error) {
defer h.memTruncationInProcess.Store(false)
// We wait for pending queries to end that overlap with this truncation.
if !initialize {
if initialized {
h.WaitForPendingReadersInTimeRange(h.MinTime(), mint)
}
@ -1127,7 +1134,7 @@ func (h *Head) truncateMemory(mint int64) (err error) {
// This was an initial call to Truncate after loading blocks on startup.
// We haven't read back the WAL yet, so do not attempt to truncate it.
if initialize {
if !initialized {
return nil
}
@ -1615,10 +1622,19 @@ func (h *Head) MaxOOOTime() int64 {
return h.maxOOOTime.Load()
}
// initialized returns true if the head has a MinTime set, false otherwise.
func (h *Head) initialized() bool {
return h.MinTime() != math.MaxInt64
}
// compactable returns whether the head has a compactable range.
// The head has a compactable range when the head time range is 1.5 times the chunk range.
// The 0.5 acts as a buffer of the appendable window.
func (h *Head) compactable() bool {
if !h.initialized() {
return false
}
return h.MaxTime()-h.MinTime() > h.chunkRange.Load()/2*3
}

View file

@ -138,7 +138,7 @@ func (h *Head) Appender(_ context.Context) storage.Appender {
// The head cache might not have a starting point yet. The init appender
// picks up the first appended timestamp as the base.
if h.MinTime() == math.MaxInt64 {
if !h.initialized() {
return &initAppender{
head: h,
}
@ -191,7 +191,7 @@ func (h *Head) appendableMinValidTime() int64 {
// AppendableMinValidTime returns the minimum valid time for samples to be appended to the Head.
// Returns false if Head hasn't been initialized yet and the minimum time isn't known yet.
func (h *Head) AppendableMinValidTime() (int64, bool) {
if h.MinTime() == math.MaxInt64 {
if !h.initialized() {
return 0, false
}

View file

@ -5819,3 +5819,16 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) {
require.Equal(t, chunkenc.ValNone, it.Next())
}
}
func TestHeadCompactableDoesNotCompactEmptyHead(t *testing.T) {
// Use a chunk range of 1 here so that if we attempted to determine if the head
// was compactable using default values for min and max times, `Head.compactable()`
// would return true which is incorrect. This test verifies that we short-circuit
// the check when the head has not yet had any samples added.
head, _ := newTestHead(t, 1, wlog.CompressionNone, false)
defer func() {
require.NoError(t, head.Close())
}()
require.False(t, head.compactable())
}

View file

@ -19,8 +19,6 @@ import (
"fmt"
"math"
"slices"
"strings"
"unicode/utf8"
"github.com/oklog/ulid"
@ -35,20 +33,6 @@ import (
"github.com/prometheus/prometheus/util/annotations"
)
// Bitmap used by func isRegexMetaCharacter to check whether a character needs to be escaped.
var regexMetaCharacterBytes [16]byte
// isRegexMetaCharacter reports whether byte b needs to be escaped.
func isRegexMetaCharacter(b byte) bool {
return b < utf8.RuneSelf && regexMetaCharacterBytes[b%16]&(1<<(b/16)) != 0
}
func init() {
for _, b := range []byte(`.+*?()|[]{}^$`) {
regexMetaCharacterBytes[b%16] |= 1 << (b / 16)
}
}
type blockBaseQuerier struct {
blockID ulid.ULID
index IndexReader
@ -195,55 +179,6 @@ func (q *blockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *
return NewBlockChunkSeriesSet(q.blockID, q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming)
}
func findSetMatches(pattern string) []string {
// Return empty matches if the wrapper from Prometheus is missing.
if len(pattern) < 6 || pattern[:4] != "^(?:" || pattern[len(pattern)-2:] != ")$" {
return nil
}
escaped := false
sets := []*strings.Builder{{}}
init := 4
end := len(pattern) - 2
// If the regex is wrapped in a group we can remove the first and last parentheses
if pattern[init] == '(' && pattern[end-1] == ')' {
init++
end--
}
for i := init; i < end; i++ {
if escaped {
switch {
case isRegexMetaCharacter(pattern[i]):
sets[len(sets)-1].WriteByte(pattern[i])
case pattern[i] == '\\':
sets[len(sets)-1].WriteByte('\\')
default:
return nil
}
escaped = false
} else {
switch {
case isRegexMetaCharacter(pattern[i]):
if pattern[i] == '|' {
sets = append(sets, &strings.Builder{})
} else {
return nil
}
case pattern[i] == '\\':
escaped = true
default:
sets[len(sets)-1].WriteByte(pattern[i])
}
}
}
matches := make([]string, 0, len(sets))
for _, s := range sets {
if s.Len() > 0 {
matches = append(matches, s.String())
}
}
return matches
}
// PostingsForMatchers assembles a single postings iterator against the index reader
// based on the given matchers. The resulting postings are not ordered by series.
func PostingsForMatchers(ctx context.Context, ix IndexReader, ms ...*labels.Matcher) (index.Postings, error) {
@ -385,7 +320,7 @@ func postingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Matcher)
// Fast-path for set matching.
if m.Type == labels.MatchRegexp {
setMatches := findSetMatches(m.GetRegexString())
setMatches := m.SetMatches()
if len(setMatches) > 0 {
return ix.Postings(ctx, m.Name, setMatches...)
}
@ -416,7 +351,7 @@ func inversePostingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Ma
// Inverse of a MatchNotRegexp is MatchRegexp (double negation).
// Fast-path for set matching.
if m.Type == labels.MatchNotRegexp {
setMatches := findSetMatches(m.GetRegexString())
setMatches := m.SetMatches()
if len(setMatches) > 0 {
return ix.Postings(ctx, m.Name, setMatches...)
}

View file

@ -2658,54 +2658,6 @@ func BenchmarkSetMatcher(b *testing.B) {
}
}
// Refer to https://github.com/prometheus/prometheus/issues/2651.
func TestFindSetMatches(t *testing.T) {
cases := []struct {
pattern string
exp []string
}{
// Single value, coming from a `bar=~"foo"` selector.
{
pattern: "^(?:foo)$",
exp: []string{
"foo",
},
},
// Simple sets.
{
pattern: "^(?:foo|bar|baz)$",
exp: []string{
"foo",
"bar",
"baz",
},
},
// Simple sets containing escaped characters.
{
pattern: "^(?:fo\\.o|bar\\?|\\^baz)$",
exp: []string{
"fo.o",
"bar?",
"^baz",
},
},
// Simple sets containing special characters without escaping.
{
pattern: "^(?:fo.o|bar?|^baz)$",
exp: nil,
},
// Missing wrapper.
{
pattern: "foo|bar|baz",
exp: nil,
},
}
for _, c := range cases {
require.Equal(t, c.exp, findSetMatches(c.pattern), "Evaluating %s, unexpected result.", c.pattern)
}
}
func TestPostingsForMatchers(t *testing.T) {
ctx := context.Background()
@ -3310,7 +3262,7 @@ func TestPostingsForMatcher(t *testing.T) {
{
// Test case for double quoted regex matcher
matcher: labels.MustNewMatcher(labels.MatchRegexp, "test", "^(?:a|b)$"),
hasError: true,
hasError: false,
},
}

View file

@ -152,6 +152,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head
series []record.RefSeries
samples []record.RefSample
histogramSamples []record.RefHistogramSample
floatHistogramSamples []record.RefFloatHistogramSample
tstones []tombstones.Stone
exemplars []record.RefExemplar
metadata []record.RefMetadata
@ -164,7 +165,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head
latestMetadataMap = make(map[chunks.HeadSeriesRef]record.RefMetadata)
)
for r.Next() {
series, samples, histogramSamples, tstones, exemplars, metadata = series[:0], samples[:0], histogramSamples[:0], tstones[:0], exemplars[:0], metadata[:0]
series, samples, histogramSamples, floatHistogramSamples, tstones, exemplars, metadata = series[:0], samples[:0], histogramSamples[:0], floatHistogramSamples[:0], tstones[:0], exemplars[:0], metadata[:0]
// We don't reset the buffer since we batch up multiple records
// before writing them to the checkpoint.
@ -224,8 +225,26 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head
if len(repl) > 0 {
buf = enc.HistogramSamples(repl, buf)
}
stats.TotalSamples += len(samples)
stats.DroppedSamples += len(samples) - len(repl)
stats.TotalSamples += len(histogramSamples)
stats.DroppedSamples += len(histogramSamples) - len(repl)
case record.FloatHistogramSamples:
floatHistogramSamples, err = dec.FloatHistogramSamples(rec, floatHistogramSamples)
if err != nil {
return nil, fmt.Errorf("decode float histogram samples: %w", err)
}
// Drop irrelevant floatHistogramSamples in place.
repl := floatHistogramSamples[:0]
for _, fh := range floatHistogramSamples {
if fh.T >= mint {
repl = append(repl, fh)
}
}
if len(repl) > 0 {
buf = enc.FloatHistogramSamples(repl, buf)
}
stats.TotalSamples += len(floatHistogramSamples)
stats.DroppedSamples += len(floatHistogramSamples) - len(repl)
case record.Tombstones:
tstones, err = dec.Tombstones(rec, tstones)

View file

@ -125,6 +125,20 @@ func TestCheckpoint(t *testing.T) {
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
}
}
makeFloatHistogram := func(i int) *histogram.FloatHistogram {
return &histogram.FloatHistogram{
Count: 5 + float64(i*4),
ZeroCount: 2 + float64(i),
ZeroThreshold: 0.001,
Sum: 18.4 * float64(i+1),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{float64(i + 1), 1, -1, 0},
}
}
for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} {
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
@ -154,7 +168,7 @@ func TestCheckpoint(t *testing.T) {
w, err = NewSize(nil, nil, dir, 64*1024, compress)
require.NoError(t, err)
samplesInWAL, histogramsInWAL := 0, 0
samplesInWAL, histogramsInWAL, floatHistogramsInWAL := 0, 0, 0
var last int64
for i := 0; ; i++ {
_, n, err := Segments(w.Dir())
@ -200,6 +214,15 @@ func TestCheckpoint(t *testing.T) {
}, nil)
require.NoError(t, w.Log(b))
histogramsInWAL += 4
fh := makeFloatHistogram(i)
b = enc.FloatHistogramSamples([]record.RefFloatHistogramSample{
{Ref: 0, T: last, FH: fh},
{Ref: 1, T: last + 10000, FH: fh},
{Ref: 2, T: last + 20000, FH: fh},
{Ref: 3, T: last + 30000, FH: fh},
}, nil)
require.NoError(t, w.Log(b))
floatHistogramsInWAL += 4
b = enc.Exemplars([]record.RefExemplar{
{Ref: 1, T: last, V: float64(i), Labels: labels.FromStrings("trace_id", fmt.Sprintf("trace-%d", i))},
@ -220,12 +243,14 @@ func TestCheckpoint(t *testing.T) {
}
require.NoError(t, w.Close())
_, err = Checkpoint(log.NewNopLogger(), w, 100, 106, func(x chunks.HeadSeriesRef) bool {
stats, err := Checkpoint(log.NewNopLogger(), w, 100, 106, func(x chunks.HeadSeriesRef) bool {
return x%2 == 0
}, last/2)
require.NoError(t, err)
require.NoError(t, w.Truncate(107))
require.NoError(t, DeleteCheckpoints(w.Dir(), 106))
require.Equal(t, histogramsInWAL+floatHistogramsInWAL+samplesInWAL, stats.TotalSamples)
require.Greater(t, stats.DroppedSamples, 0)
// Only the new checkpoint should be left.
files, err := os.ReadDir(dir)
@ -242,7 +267,7 @@ func TestCheckpoint(t *testing.T) {
var metadata []record.RefMetadata
r := NewReader(sr)
samplesInCheckpoint, histogramsInCheckpoint := 0, 0
samplesInCheckpoint, histogramsInCheckpoint, floatHistogramsInCheckpoint := 0, 0, 0
for r.Next() {
rec := r.Record()
@ -264,6 +289,13 @@ func TestCheckpoint(t *testing.T) {
require.GreaterOrEqual(t, h.T, last/2, "histogram with wrong timestamp")
}
histogramsInCheckpoint += len(histograms)
case record.FloatHistogramSamples:
floatHistograms, err := dec.FloatHistogramSamples(rec, nil)
require.NoError(t, err)
for _, h := range floatHistograms {
require.GreaterOrEqual(t, h.T, last/2, "float histogram with wrong timestamp")
}
floatHistogramsInCheckpoint += len(floatHistograms)
case record.Exemplars:
exemplars, err := dec.Exemplars(rec, nil)
require.NoError(t, err)
@ -281,6 +313,8 @@ func TestCheckpoint(t *testing.T) {
require.Less(t, float64(samplesInCheckpoint)/float64(samplesInWAL), 0.8)
require.Greater(t, float64(histogramsInCheckpoint)/float64(histogramsInWAL), 0.5)
require.Less(t, float64(histogramsInCheckpoint)/float64(histogramsInWAL), 0.8)
require.Greater(t, float64(floatHistogramsInCheckpoint)/float64(floatHistogramsInWAL), 0.5)
require.Less(t, float64(floatHistogramsInCheckpoint)/float64(floatHistogramsInWAL), 0.8)
expectedRefSeries := []record.RefSeries{
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")},

View file

@ -14,6 +14,7 @@
package teststorage
import (
"fmt"
"os"
"time"
@ -30,8 +31,18 @@ import (
// New returns a new TestStorage for testing purposes
// that removes all associated files on closing.
func New(t testutil.T) *TestStorage {
stor, err := NewWithError()
require.NoError(t, err)
return stor
}
// NewWithError returns a new TestStorage for user facing tests, which reports
// errors directly.
func NewWithError() (*TestStorage, error) {
dir, err := os.MkdirTemp("", "test_storage")
require.NoError(t, err, "unexpected error while opening test directory")
if err != nil {
return nil, fmt.Errorf("opening test directory: %w", err)
}
// Tests just load data for a series sequentially. Thus we
// need a long appendable window.
@ -41,13 +52,17 @@ func New(t testutil.T) *TestStorage {
opts.RetentionDuration = 0
opts.EnableNativeHistograms = true
db, err := tsdb.Open(dir, nil, nil, opts, tsdb.NewDBStats())
require.NoError(t, err, "unexpected error while opening test storage")
if err != nil {
return nil, fmt.Errorf("opening test storage: %w", err)
}
reg := prometheus.NewRegistry()
eMetrics := tsdb.NewExemplarMetrics(reg)
es, err := tsdb.NewCircularExemplarStorage(10, eMetrics)
require.NoError(t, err, "unexpected error while opening test exemplar storage")
return &TestStorage{DB: db, exemplarStorage: es, dir: dir}
if err != nil {
return nil, fmt.Errorf("opening test exemplar storage: %w", err)
}
return &TestStorage{DB: db, exemplarStorage: es, dir: dir}, nil
}
type TestStorage struct {