Merge pull request #561 from grafana/krajo/merge-upstream

Sync with upstream main at 79f4e45d64
This commit is contained in:
George Krajcsovits 2023-11-03 07:18:20 +01:00 committed by GitHub
commit 790cede0a1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
74 changed files with 953 additions and 312 deletions

View file

@ -15,6 +15,7 @@ linters:
- depguard - depguard
- errorlint - errorlint
- gocritic - gocritic
- godot
- gofumpt - gofumpt
- goimports - goimports
- misspell - misspell
@ -33,9 +34,6 @@ issues:
- path: _test.go - path: _test.go
linters: linters:
- errcheck - errcheck
- path: scrape/
linters:
- errorlint
- path: tsdb/ - path: tsdb/
linters: linters:
- errorlint - errorlint
@ -45,6 +43,9 @@ issues:
- path: web/ - path: web/
linters: linters:
- errorlint - errorlint
- linters:
- godot
source: "^// ==="
linters-settings: linters-settings:
depguard: depguard:

View file

@ -1,5 +1,11 @@
# Changelog # Changelog
## 2.48.0-rc.2 / 2023-11-02
* [ENHANCEMENT] Scraping: Add configuration option for tracking staleness of scraped timestamps. #13060
* [BUGFIX] Storage: Fix crash caused by incorrect mixed samples handling. #13055
* [BUGFIX] TSDB: Fix compactor failures by adding min time to histogram chunks. #13062
## 2.48.0-rc.1 / 2023-10-24 ## 2.48.0-rc.1 / 2023-10-24
* [BUGFIX] PromQL: Reduce inefficiency introduced by warnings/annotations and temporarily remove possible non-counter warnings. #13012 * [BUGFIX] PromQL: Reduce inefficiency introduced by warnings/annotations and temporarily remove possible non-counter warnings. #13012

View file

@ -1 +1 @@
2.48.0-rc.1 2.48.0-rc.2

View file

@ -1283,7 +1283,7 @@ func startsOrEndsWithQuote(s string) bool {
strings.HasSuffix(s, "\"") || strings.HasSuffix(s, "'") strings.HasSuffix(s, "\"") || strings.HasSuffix(s, "'")
} }
// compileCORSRegexString compiles given string and adds anchors // compileCORSRegexString compiles given string and adds anchors.
func compileCORSRegexString(s string) (*regexp.Regexp, error) { func compileCORSRegexString(s string) (*regexp.Regexp, error) {
r, err := relabel.NewRegexp(s) r, err := relabel.NewRegexp(s)
if err != nil { if err != nil {

View file

@ -281,7 +281,7 @@ func newCredential(cfg SDConfig, policyClientOptions policy.ClientOptions) (azco
return credential, nil return credential, nil
} }
// virtualMachine represents an Azure virtual machine (which can also be created by a VMSS) // virtualMachine represents an Azure virtual machine (which can also be created by a VMSS).
type virtualMachine struct { type virtualMachine struct {
ID string ID string
Name string Name string

View file

@ -50,7 +50,7 @@ const (
tagsLabel = model.MetaLabelPrefix + "consul_tags" tagsLabel = model.MetaLabelPrefix + "consul_tags"
// serviceLabel is the name of the label containing the service name. // serviceLabel is the name of the label containing the service name.
serviceLabel = model.MetaLabelPrefix + "consul_service" serviceLabel = model.MetaLabelPrefix + "consul_service"
// healthLabel is the name of the label containing the health of the service instance // healthLabel is the name of the label containing the health of the service instance.
healthLabel = model.MetaLabelPrefix + "consul_health" healthLabel = model.MetaLabelPrefix + "consul_health"
// serviceAddressLabel is the name of the label containing the (optional) service address. // serviceAddressLabel is the name of the label containing the (optional) service address.
serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address" serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address"

View file

@ -21,7 +21,7 @@ import (
"testing" "testing"
) )
// SDMock is the interface for the DigitalOcean mock // SDMock is the interface for the DigitalOcean mock.
type SDMock struct { type SDMock struct {
t *testing.T t *testing.T
Server *httptest.Server Server *httptest.Server
@ -35,18 +35,18 @@ func NewSDMock(t *testing.T) *SDMock {
} }
} }
// Endpoint returns the URI to the mock server // Endpoint returns the URI to the mock server.
func (m *SDMock) Endpoint() string { func (m *SDMock) Endpoint() string {
return m.Server.URL + "/" return m.Server.URL + "/"
} }
// Setup creates the mock server // Setup creates the mock server.
func (m *SDMock) Setup() { func (m *SDMock) Setup() {
m.Mux = http.NewServeMux() m.Mux = http.NewServeMux()
m.Server = httptest.NewServer(m.Mux) m.Server = httptest.NewServer(m.Mux)
} }
// ShutdownServer creates the mock server // ShutdownServer creates the mock server.
func (m *SDMock) ShutdownServer() { func (m *SDMock) ShutdownServer() {
m.Server.Close() m.Server.Close()
} }

View file

@ -20,7 +20,7 @@ import (
"testing" "testing"
) )
// SDMock is the interface for the Hetzner Cloud mock // SDMock is the interface for the Hetzner Cloud mock.
type SDMock struct { type SDMock struct {
t *testing.T t *testing.T
Server *httptest.Server Server *httptest.Server
@ -34,19 +34,19 @@ func NewSDMock(t *testing.T) *SDMock {
} }
} }
// Endpoint returns the URI to the mock server // Endpoint returns the URI to the mock server.
func (m *SDMock) Endpoint() string { func (m *SDMock) Endpoint() string {
return m.Server.URL + "/" return m.Server.URL + "/"
} }
// Setup creates the mock server // Setup creates the mock server.
func (m *SDMock) Setup() { func (m *SDMock) Setup() {
m.Mux = http.NewServeMux() m.Mux = http.NewServeMux()
m.Server = httptest.NewServer(m.Mux) m.Server = httptest.NewServer(m.Mux)
m.t.Cleanup(m.Server.Close) m.t.Cleanup(m.Server.Close)
} }
// ShutdownServer creates the mock server // ShutdownServer creates the mock server.
func (m *SDMock) ShutdownServer() { func (m *SDMock) ShutdownServer() {
m.Server.Close() m.Server.Close()
} }

View file

@ -45,7 +45,7 @@ var (
[]string{"endpoint"}, []string{"endpoint"},
) )
// Definition of metrics for client-go workflow metrics provider // Definition of metrics for client-go workflow metrics provider.
clientGoWorkqueueDepthMetricVec = prometheus.NewGaugeVec( clientGoWorkqueueDepthMetricVec = prometheus.NewGaugeVec(
prometheus.GaugeOpts{ prometheus.GaugeOpts{
Namespace: workqueueMetricsNamespace, Namespace: workqueueMetricsNamespace,
@ -106,7 +106,7 @@ func (noopMetric) Dec() {}
func (noopMetric) Observe(float64) {} func (noopMetric) Observe(float64) {}
func (noopMetric) Set(float64) {} func (noopMetric) Set(float64) {}
// Definition of client-go metrics adapters for HTTP requests observation // Definition of client-go metrics adapters for HTTP requests observation.
type clientGoRequestMetricAdapter struct{} type clientGoRequestMetricAdapter struct{}
func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer) { func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer) {
@ -130,7 +130,7 @@ func (clientGoRequestMetricAdapter) Observe(_ context.Context, _ string, u url.U
clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds()) clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds())
} }
// Definition of client-go workqueue metrics provider definition // Definition of client-go workqueue metrics provider definition.
type clientGoWorkqueueMetricsProvider struct{} type clientGoWorkqueueMetricsProvider struct{}
func (f *clientGoWorkqueueMetricsProvider) Register(registerer prometheus.Registerer) { func (f *clientGoWorkqueueMetricsProvider) Register(registerer prometheus.Registerer) {

View file

@ -20,7 +20,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
// endpointSliceAdaptor is an adaptor for the different EndpointSlice versions // endpointSliceAdaptor is an adaptor for the different EndpointSlice versions.
type endpointSliceAdaptor interface { type endpointSliceAdaptor interface {
get() interface{} get() interface{}
getObjectMeta() metav1.ObjectMeta getObjectMeta() metav1.ObjectMeta
@ -55,7 +55,7 @@ type endpointSliceEndpointConditionsAdaptor interface {
terminating() *bool terminating() *bool
} }
// Adaptor for k8s.io/api/discovery/v1 // Adaptor for k8s.io/api/discovery/v1.
type endpointSliceAdaptorV1 struct { type endpointSliceAdaptorV1 struct {
endpointSlice *v1.EndpointSlice endpointSlice *v1.EndpointSlice
} }
@ -108,7 +108,7 @@ func (e *endpointSliceAdaptorV1) labelServiceName() string {
return v1.LabelServiceName return v1.LabelServiceName
} }
// Adaptor for k8s.io/api/discovery/v1beta1 // Adaptor for k8s.io/api/discovery/v1beta1.
type endpointSliceAdaptorV1Beta1 struct { type endpointSliceAdaptorV1Beta1 struct {
endpointSlice *v1beta1.EndpointSlice endpointSlice *v1beta1.EndpointSlice
} }

View file

@ -19,7 +19,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
// ingressAdaptor is an adaptor for the different Ingress versions // ingressAdaptor is an adaptor for the different Ingress versions.
type ingressAdaptor interface { type ingressAdaptor interface {
getObjectMeta() metav1.ObjectMeta getObjectMeta() metav1.ObjectMeta
name() string name() string
@ -36,7 +36,7 @@ type ingressRuleAdaptor interface {
host() string host() string
} }
// Adaptor for networking.k8s.io/v1 // Adaptor for networking.k8s.io/v1.
type ingressAdaptorV1 struct { type ingressAdaptorV1 struct {
ingress *v1.Ingress ingress *v1.Ingress
} }
@ -90,7 +90,7 @@ func (i *ingressRuleAdaptorV1) paths() []string {
func (i *ingressRuleAdaptorV1) host() string { return i.rule.Host } func (i *ingressRuleAdaptorV1) host() string { return i.rule.Host }
// Adaptor for networking.k8s.io/v1beta1 // Adaptor for networking.k8s.io/v1beta1.
type ingressAdaptorV1Beta1 struct { type ingressAdaptorV1Beta1 struct {
ingress *v1beta1.Ingress ingress *v1beta1.Ingress
} }

View file

@ -65,9 +65,9 @@ const (
) )
var ( var (
// Http header // Http header.
userAgent = fmt.Sprintf("Prometheus/%s", version.Version) userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
// Custom events metric // Custom events metric.
eventCount = prometheus.NewCounterVec( eventCount = prometheus.NewCounterVec(
prometheus.CounterOpts{ prometheus.CounterOpts{
Namespace: metricsNamespace, Namespace: metricsNamespace,
@ -76,7 +76,7 @@ var (
}, },
[]string{"role", "event"}, []string{"role", "event"},
) )
// DefaultSDConfig is the default Kubernetes SD configuration // DefaultSDConfig is the default Kubernetes SD configuration.
DefaultSDConfig = SDConfig{ DefaultSDConfig = SDConfig{
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
} }

View file

@ -202,7 +202,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group {
// 5. NodeLegacyHostIP // 5. NodeLegacyHostIP
// 6. NodeHostName // 6. NodeHostName
// //
// Derived from k8s.io/kubernetes/pkg/util/node/node.go // Derived from k8s.io/kubernetes/pkg/util/node/node.go.
func nodeAddress(node *apiv1.Node) (string, map[apiv1.NodeAddressType][]string, error) { func nodeAddress(node *apiv1.Node) (string, map[apiv1.NodeAddressType][]string, error) {
m := map[apiv1.NodeAddressType][]string{} m := map[apiv1.NodeAddressType][]string{}
for _, a := range node.Status.Addresses { for _, a := range node.Status.Addresses {

View file

@ -137,7 +137,7 @@ type Manager struct {
triggerSend chan struct{} triggerSend chan struct{}
} }
// Run starts the background processing // Run starts the background processing.
func (m *Manager) Run() error { func (m *Manager) Run() error {
go m.sender() go m.sender()
<-m.ctx.Done() <-m.ctx.Done()

View file

@ -20,7 +20,7 @@ import (
"testing" "testing"
) )
// SDMock is the interface for the Linode mock // SDMock is the interface for the Linode mock.
type SDMock struct { type SDMock struct {
t *testing.T t *testing.T
Server *httptest.Server Server *httptest.Server
@ -34,18 +34,18 @@ func NewSDMock(t *testing.T) *SDMock {
} }
} }
// Endpoint returns the URI to the mock server // Endpoint returns the URI to the mock server.
func (m *SDMock) Endpoint() string { func (m *SDMock) Endpoint() string {
return m.Server.URL + "/" return m.Server.URL + "/"
} }
// Setup creates the mock server // Setup creates the mock server.
func (m *SDMock) Setup() { func (m *SDMock) Setup() {
m.Mux = http.NewServeMux() m.Mux = http.NewServeMux()
m.Server = httptest.NewServer(m.Mux) m.Server = httptest.NewServer(m.Mux)
} }
// ShutdownServer creates the mock server // ShutdownServer creates the mock server.
func (m *SDMock) ShutdownServer() { func (m *SDMock) ShutdownServer() {
m.Server.Close() m.Server.Close()
} }

View file

@ -92,7 +92,7 @@ type Provider struct {
newSubs map[string]struct{} newSubs map[string]struct{}
} }
// Discoverer return the Discoverer of the provider // Discoverer return the Discoverer of the provider.
func (p *Provider) Discoverer() Discoverer { func (p *Provider) Discoverer() Discoverer {
return p.d return p.d
} }

View file

@ -48,7 +48,7 @@ const (
// imageLabel is the label that is used for the docker image running the service. // imageLabel is the label that is used for the docker image running the service.
imageLabel model.LabelName = metaLabelPrefix + "image" imageLabel model.LabelName = metaLabelPrefix + "image"
// portIndexLabel is the integer port index when multiple ports are defined; // portIndexLabel is the integer port index when multiple ports are defined;
// e.g. PORT1 would have a value of '1' // e.g. PORT1 would have a value of '1'.
portIndexLabel model.LabelName = metaLabelPrefix + "port_index" portIndexLabel model.LabelName = metaLabelPrefix + "port_index"
// taskLabel contains the mesos task name of the app instance. // taskLabel contains the mesos task name of the app instance.
taskLabel model.LabelName = metaLabelPrefix + "task" taskLabel model.LabelName = metaLabelPrefix + "task"

View file

@ -29,7 +29,7 @@ import (
"github.com/prometheus/prometheus/util/strutil" "github.com/prometheus/prometheus/util/strutil"
) )
// SDMock is the interface for the DigitalOcean mock // SDMock is the interface for the DigitalOcean mock.
type SDMock struct { type SDMock struct {
t *testing.T t *testing.T
Server *httptest.Server Server *httptest.Server
@ -47,12 +47,12 @@ func NewSDMock(t *testing.T, directory string) *SDMock {
} }
} }
// Endpoint returns the URI to the mock server // Endpoint returns the URI to the mock server.
func (m *SDMock) Endpoint() string { func (m *SDMock) Endpoint() string {
return m.Server.URL + "/" return m.Server.URL + "/"
} }
// Setup creates the mock server // Setup creates the mock server.
func (m *SDMock) Setup() { func (m *SDMock) Setup() {
m.Mux = http.NewServeMux() m.Mux = http.NewServeMux()
m.Server = httptest.NewServer(m.Mux) m.Server = httptest.NewServer(m.Mux)

View file

@ -30,7 +30,7 @@ type NomadSDTestSuite struct {
Mock *SDMock Mock *SDMock
} }
// SDMock is the interface for the nomad mock // SDMock is the interface for the nomad mock.
type SDMock struct { type SDMock struct {
t *testing.T t *testing.T
Server *httptest.Server Server *httptest.Server

View file

@ -20,7 +20,7 @@ import (
"testing" "testing"
) )
// SDMock is the interface for the OpenStack mock // SDMock is the interface for the OpenStack mock.
type SDMock struct { type SDMock struct {
t *testing.T t *testing.T
Server *httptest.Server Server *httptest.Server
@ -34,12 +34,12 @@ func NewSDMock(t *testing.T) *SDMock {
} }
} }
// Endpoint returns the URI to the mock server // Endpoint returns the URI to the mock server.
func (m *SDMock) Endpoint() string { func (m *SDMock) Endpoint() string {
return m.Server.URL + "/" return m.Server.URL + "/"
} }
// Setup creates the mock server // Setup creates the mock server.
func (m *SDMock) Setup() { func (m *SDMock) Setup() {
m.Mux = http.NewServeMux() m.Mux = http.NewServeMux()
m.Server = httptest.NewServer(m.Mux) m.Server = httptest.NewServer(m.Mux)
@ -60,7 +60,7 @@ func testHeader(t *testing.T, r *http.Request, header, expected string) {
} }
} }
// HandleVersionsSuccessfully mocks version call // HandleVersionsSuccessfully mocks version call.
func (m *SDMock) HandleVersionsSuccessfully() { func (m *SDMock) HandleVersionsSuccessfully() {
m.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, ` fmt.Fprintf(w, `
@ -88,7 +88,7 @@ func (m *SDMock) HandleVersionsSuccessfully() {
}) })
} }
// HandleAuthSuccessfully mocks auth call // HandleAuthSuccessfully mocks auth call.
func (m *SDMock) HandleAuthSuccessfully() { func (m *SDMock) HandleAuthSuccessfully() {
m.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("X-Subject-Token", tokenID) w.Header().Add("X-Subject-Token", tokenID)
@ -236,7 +236,7 @@ const hypervisorListBody = `
] ]
}` }`
// HandleHypervisorListSuccessfully mocks os-hypervisors detail call // HandleHypervisorListSuccessfully mocks os-hypervisors detail call.
func (m *SDMock) HandleHypervisorListSuccessfully() { func (m *SDMock) HandleHypervisorListSuccessfully() {
m.Mux.HandleFunc("/os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc("/os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) {
testMethod(m.t, r, "GET") testMethod(m.t, r, "GET")
@ -533,7 +533,7 @@ const serverListBody = `
} }
` `
// HandleServerListSuccessfully mocks server detail call // HandleServerListSuccessfully mocks server detail call.
func (m *SDMock) HandleServerListSuccessfully() { func (m *SDMock) HandleServerListSuccessfully() {
m.Mux.HandleFunc("/servers/detail", func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc("/servers/detail", func(w http.ResponseWriter, r *http.Request) {
testMethod(m.t, r, "GET") testMethod(m.t, r, "GET")
@ -572,7 +572,7 @@ const listOutput = `
} }
` `
// HandleFloatingIPListSuccessfully mocks floating ips call // HandleFloatingIPListSuccessfully mocks floating ips call.
func (m *SDMock) HandleFloatingIPListSuccessfully() { func (m *SDMock) HandleFloatingIPListSuccessfully() {
m.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) {
testMethod(m.t, r, "GET") testMethod(m.t, r, "GET")

View file

@ -20,7 +20,7 @@ import (
"testing" "testing"
) )
// SDMock is the interface for the Vultr mock // SDMock is the interface for the Vultr mock.
type SDMock struct { type SDMock struct {
t *testing.T t *testing.T
Server *httptest.Server Server *httptest.Server

View file

@ -125,7 +125,61 @@ histogram (albeit via the text format). With this flag enabled, Prometheus will
still ingest those conventional histograms that do not come with a still ingest those conventional histograms that do not come with a
corresponding native histogram. However, if a native histogram is present, corresponding native histogram. However, if a native histogram is present,
Prometheus will ignore the corresponding conventional histogram, with the Prometheus will ignore the corresponding conventional histogram, with the
notable exception of exemplars, which are always ingested. notable exception of exemplars, which are always ingested. To keep the
conventional histograms as well, enable `scrape_classic_histograms` in the
scrape job.
_Note about the format of `le` and `quantile` label values:_
In certain situations, the protobuf parsing changes the number formatting of
the `le` labels of conventional histograms and the `quantile` labels of
summaries. Typically, this happens if the scraped target is instrumented with
[client_golang](https://github.com/prometheus/client_golang) provided that
[promhttp.HandlerOpts.EnableOpenMetrics](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus/promhttp#HandlerOpts)
is set to `false`. In such a case, integer label values are represented in the
text format as such, e.g. `quantile="1"` or `le="2"`. However, the protobuf parsing
changes the representation to float-like (following the OpenMetrics
specification), so the examples above become `quantile="1.0"` and `le="2.0"` after
ingestion into Prometheus, which changes the identity of the metric compared to
what was ingested before via the text format.
The effect of this change is that alerts, recording rules and dashboards that
directly reference label values as whole numbers such as `le="1"` will stop
working.
Aggregation by the `le` and `quantile` labels for vectors that contain the old and
new formatting will lead to unexpected results, and range vectors that span the
transition between the different formatting will contain additional series.
The most common use case for both is the quantile calculation via
`histogram_quantile`, e.g.
`histogram_quantile(0.95, sum by (le) (rate(histogram_bucket[10m])))`.
The `histogram_quantile` function already tries to mitigate the effects to some
extent, but there will be inaccuracies, in particular for shorter ranges that
cover only a few samples.
Ways to deal with this change either globally or on a per metric basis:
- Fix references to integer `le`, `quantile` label values, but otherwise do
nothing and accept that some queries that span the transition time will produce
inaccurate or unexpected results.
_This is the recommended solution, to get consistently normalized label values._
Also Prometheus 3.0 is expected to enforce normalization of these label values.
- Use `metric_relabel_config` to retain the old labels when scraping targets.
This should **only** be applied to metrics that currently produce such labels.
<!-- The following config snippet is unit tested in scrape/scrape_test.go. -->
```yaml
metric_relabel_configs:
- source_labels:
- quantile
target_label: quantile
regex: (\d+)\.0+
- source_labels:
- le
- __name__
target_label: le
regex: (\d+)\.0+;.*_bucket
```
## OTLP Receiver ## OTLP Receiver

View file

@ -9,7 +9,7 @@ require (
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.11.2 github.com/influxdata/influxdb v1.11.2
github.com/prometheus/client_golang v1.17.0 github.com/prometheus/client_golang v1.17.0
github.com/prometheus/common v0.44.0 github.com/prometheus/common v0.45.0
github.com/prometheus/prometheus v0.47.2 github.com/prometheus/prometheus v0.47.2
github.com/stretchr/testify v1.8.4 github.com/stretchr/testify v1.8.4
) )
@ -39,7 +39,7 @@ require (
github.com/klauspost/compress v1.16.7 // indirect github.com/klauspost/compress v1.16.7 // indirect
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
@ -58,12 +58,12 @@ require (
go.opentelemetry.io/otel/trace v1.16.0 // indirect go.opentelemetry.io/otel/trace v1.16.0 // indirect
go.uber.org/atomic v1.11.0 // indirect go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.11.0 // indirect golang.org/x/crypto v0.14.0 // indirect
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect
golang.org/x/net v0.12.0 // indirect golang.org/x/net v0.17.0 // indirect
golang.org/x/oauth2 v0.10.0 // indirect golang.org/x/oauth2 v0.12.0 // indirect
golang.org/x/sys v0.11.0 // indirect golang.org/x/sys v0.13.0 // indirect
golang.org/x/text v0.11.0 // indirect golang.org/x/text v0.13.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect

View file

@ -167,8 +167,8 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@ -211,8 +211,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@ -266,8 +266,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw=
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@ -290,12 +290,12 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4=
golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -323,20 +323,20 @@ golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View file

@ -69,16 +69,16 @@ const (
// //
// Examples: // Examples:
// //
// "foo-bar-42" -> "foo-bar-42" // "foo-bar-42" -> "foo-bar-42"
// //
// "foo_bar%42" -> "foo_bar%2542" // "foo_bar%42" -> "foo_bar%2542"
// //
// "http://example.org:8080" -> "http:%2F%2Fexample%2Eorg:8080" // "http://example.org:8080" -> "http:%2F%2Fexample%2Eorg:8080"
// //
// "Björn's email: bjoern@soundcloud.com" -> // "Björn's email: bjoern@soundcloud.com" ->
// "Bj%C3%B6rn's%20email:%20bjoern%40soundcloud.com" // "Bj%C3%B6rn's%20email:%20bjoern%40soundcloud.com"
// //
// "日" -> "%E6%97%A5" // "日" -> "%E6%97%A5"
func escape(tv model.LabelValue) string { func escape(tv model.LabelValue) string {
length := len(tv) length := len(tv)
result := bytes.NewBuffer(make([]byte, 0, length)) result := bytes.NewBuffer(make([]byte, 0, length))

View file

@ -51,16 +51,16 @@ type TagValue model.LabelValue
// //
// Examples: // Examples:
// //
// "foo-bar-42" -> "foo-bar-42" // "foo-bar-42" -> "foo-bar-42"
// //
// "foo_bar_42" -> "foo__bar__42" // "foo_bar_42" -> "foo__bar__42"
// //
// "http://example.org:8080" -> "http_.//example.org_.8080" // "http://example.org:8080" -> "http_.//example.org_.8080"
// //
// "Björn's email: bjoern@soundcloud.com" -> // "Björn's email: bjoern@soundcloud.com" ->
// "Bj_C3_B6rn_27s_20email_._20bjoern_40soundcloud.com" // "Bj_C3_B6rn_27s_20email_._20bjoern_40soundcloud.com"
// //
// "日" -> "_E6_97_A5" // "日" -> "_E6_97_A5"
func (tv TagValue) MarshalJSON() ([]byte, error) { func (tv TagValue) MarshalJSON() ([]byte, error) {
length := len(tv) length := len(tv)
// Need at least two more bytes than in tv. // Need at least two more bytes than in tv.

8
go.mod
View file

@ -57,8 +57,8 @@ require (
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.8.4 github.com/stretchr/testify v1.8.4
github.com/vultr/govultr/v2 v2.17.2 github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 go.opentelemetry.io/collector/pdata v1.0.0-rcv0017
go.opentelemetry.io/collector/semconv v0.87.0 go.opentelemetry.io/collector/semconv v0.88.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0
go.opentelemetry.io/otel v1.19.0 go.opentelemetry.io/otel v1.19.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0
@ -79,7 +79,7 @@ require (
golang.org/x/tools v0.14.0 golang.org/x/tools v0.14.0
google.golang.org/api v0.147.0 google.golang.org/api v0.147.0
google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a
google.golang.org/grpc v1.58.3 google.golang.org/grpc v1.59.0
google.golang.org/protobuf v1.31.0 google.golang.org/protobuf v1.31.0
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
@ -139,7 +139,7 @@ require (
github.com/go-openapi/swag v0.22.4 // indirect github.com/go-openapi/swag v0.22.4 // indirect
github.com/go-openapi/validate v0.22.1 // indirect github.com/go-openapi/validate v0.22.1 // indirect
github.com/go-resty/resty/v2 v2.7.0 // indirect github.com/go-resty/resty/v2 v2.7.0 // indirect
github.com/golang/glog v1.1.0 // indirect github.com/golang/glog v1.1.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-cmp v0.6.0 // indirect

16
go.sum
View file

@ -285,8 +285,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE=
github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -768,10 +768,10 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 h1:qCPXSQCoD3qeWFb1RuIks8fw9Atxpk78bmtVdi15KhE= go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 h1:AgALhc2VenoA5l1DvTdg7mkzaBGqoTSuMkAtjsttBFo=
go.opentelemetry.io/collector/pdata v1.0.0-rcv0016/go.mod h1:OdN0alYOlYhHXu6BDlGehrZWgtBuiDsz/rlNeJeXiNg= go.opentelemetry.io/collector/pdata v1.0.0-rcv0017/go.mod h1:Rv9fOclA5AtM/JGm0d4jBOIAo1+jBA13UT5Bx0ovXi4=
go.opentelemetry.io/collector/semconv v0.87.0 h1:BsG1jdLLRCBRlvUujk4QA86af7r/ZXnizczQpEs/gg8= go.opentelemetry.io/collector/semconv v0.88.0 h1:8TVP4hYaUC87S6CCLKNoSxsUE0ChldE4vqotvNHHUnE=
go.opentelemetry.io/collector/semconv v0.87.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= go.opentelemetry.io/collector/semconv v0.88.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
@ -1156,8 +1156,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View file

@ -31,7 +31,7 @@ type BucketCount interface {
// absolute counts directly). Go type parameters don't allow type // absolute counts directly). Go type parameters don't allow type
// specialization. Therefore, where special treatment of deltas between buckets // specialization. Therefore, where special treatment of deltas between buckets
// vs. absolute counts is important, this information has to be provided as a // vs. absolute counts is important, this information has to be provided as a
// separate boolean parameter "deltaBuckets" // separate boolean parameter "deltaBuckets".
type InternalBucketCount interface { type InternalBucketCount interface {
float64 | int64 float64 | int64
} }

View file

@ -450,16 +450,17 @@ func TestLabels_Get(t *testing.T) {
// BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation // BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation
// The results have shown that binary search would only be better when searching last labels in scenarios with more than 10 labels. // The results have shown that binary search would only be better when searching last labels in scenarios with more than 10 labels.
// In the following list, `old` is the linear search while `new` is the binary search implementation (without calling sort.Search, which performs even worse here) // In the following list, `old` is the linear search while `new` is the binary search implementation (without calling sort.Search, which performs even worse here)
// name old time/op new time/op delta //
// Labels_Get/with_5_labels/get_first_label 5.12ns ± 0% 14.24ns ± 0% ~ (p=1.000 n=1+1) // name old time/op new time/op delta
// Labels_Get/with_5_labels/get_middle_label 13.5ns ± 0% 18.5ns ± 0% ~ (p=1.000 n=1+1) // Labels_Get/with_5_labels/get_first_label 5.12ns ± 0% 14.24ns ± 0% ~ (p=1.000 n=1+1)
// Labels_Get/with_5_labels/get_last_label 21.9ns ± 0% 18.9ns ± 0% ~ (p=1.000 n=1+1) // Labels_Get/with_5_labels/get_middle_label 13.5ns ± 0% 18.5ns ± 0% ~ (p=1.000 n=1+1)
// Labels_Get/with_10_labels/get_first_label 5.11ns ± 0% 19.47ns ± 0% ~ (p=1.000 n=1+1) // Labels_Get/with_5_labels/get_last_label 21.9ns ± 0% 18.9ns ± 0% ~ (p=1.000 n=1+1)
// Labels_Get/with_10_labels/get_middle_label 26.2ns ± 0% 19.3ns ± 0% ~ (p=1.000 n=1+1) // Labels_Get/with_10_labels/get_first_label 5.11ns ± 0% 19.47ns ± 0% ~ (p=1.000 n=1+1)
// Labels_Get/with_10_labels/get_last_label 42.8ns ± 0% 23.4ns ± 0% ~ (p=1.000 n=1+1) // Labels_Get/with_10_labels/get_middle_label 26.2ns ± 0% 19.3ns ± 0% ~ (p=1.000 n=1+1)
// Labels_Get/with_30_labels/get_first_label 5.10ns ± 0% 24.63ns ± 0% ~ (p=1.000 n=1+1) // Labels_Get/with_10_labels/get_last_label 42.8ns ± 0% 23.4ns ± 0% ~ (p=1.000 n=1+1)
// Labels_Get/with_30_labels/get_middle_label 75.8ns ± 0% 29.7ns ± 0% ~ (p=1.000 n=1+1) // Labels_Get/with_30_labels/get_first_label 5.10ns ± 0% 24.63ns ± 0% ~ (p=1.000 n=1+1)
// Labels_Get/with_30_labels/get_last_label 169ns ± 0% 29ns ± 0% ~ (p=1.000 n=1+1) // Labels_Get/with_30_labels/get_middle_label 75.8ns ± 0% 29.7ns ± 0% ~ (p=1.000 n=1+1)
// Labels_Get/with_30_labels/get_last_label 169ns ± 0% 29ns ± 0% ~ (p=1.000 n=1+1)
func BenchmarkLabels_Get(b *testing.B) { func BenchmarkLabels_Get(b *testing.B) {
maxLabels := 30 maxLabels := 30
allLabels := make([]Label, maxLabels) allLabels := make([]Label, maxLabels)

View file

@ -329,7 +329,7 @@ func isCaseSensitive(reg *syntax.Regexp) bool {
return !isCaseInsensitive(reg) return !isCaseInsensitive(reg)
} }
// tooManyMatches guards against creating too many set matches // tooManyMatches guards against creating too many set matches.
func tooManyMatches(matches []string, added ...string) bool { func tooManyMatches(matches []string, added ...string) bool {
return len(matches)+len(added) > maxSetMatches return len(matches)+len(added) > maxSetMatches
} }
@ -351,7 +351,7 @@ func (m *FastRegexMatcher) GetRegexString() string {
// `literal1|literal2|literal3|...` // `literal1|literal2|literal3|...`
// //
// this function returns an optimized StringMatcher or nil if the regex // this function returns an optimized StringMatcher or nil if the regex
// cannot be optimized in this way, and a list of setMatches up to maxSetMatches // cannot be optimized in this way, and a list of setMatches up to maxSetMatches.
func optimizeAlternatingLiterals(s string) (StringMatcher, []string) { func optimizeAlternatingLiterals(s string) (StringMatcher, []string) {
if len(s) == 0 { if len(s) == 0 {
return emptyStringMatcher{}, nil return emptyStringMatcher{}, nil

View file

@ -754,6 +754,8 @@ func FuzzFastRegexMatcher_WithFuzzyRegularExpressions(f *testing.F) {
// running the following command: // running the following command:
// //
// logcli --addr=XXX --username=YYY --password=ZZZ query '{namespace=~"(cortex|mimir).*",name="query-frontend"} |= "query stats" |= "=~" --limit=100000 > logs.txt // logcli --addr=XXX --username=YYY --password=ZZZ query '{namespace=~"(cortex|mimir).*",name="query-frontend"} |= "query stats" |= "=~" --limit=100000 > logs.txt
//
// against Loki.
func TestAnalyzeRealQueries(t *testing.T) { func TestAnalyzeRealQueries(t *testing.T) {
t.Skip("Decomment this test only to manually analyze real queries") t.Skip("Decomment this test only to manually analyze real queries")
@ -1157,7 +1159,7 @@ func TestFindEqualStringMatchers(t *testing.T) {
} }
// This benchmark is used to find a good threshold to use to apply the optimization // This benchmark is used to find a good threshold to use to apply the optimization
// done by optimizeEqualStringMatchers() // done by optimizeEqualStringMatchers().
func BenchmarkOptimizeEqualStringMatchers(b *testing.B) { func BenchmarkOptimizeEqualStringMatchers(b *testing.B) {
randGenerator := rand.New(rand.NewSource(time.Now().UnixNano())) randGenerator := rand.New(rand.NewSource(time.Now().UnixNano()))

View file

@ -675,7 +675,7 @@ func durationMilliseconds(d time.Duration) int64 {
// execEvalStmt evaluates the expression of an evaluation statement for the given time range. // execEvalStmt evaluates the expression of an evaluation statement for the given time range.
func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, annotations.Annotations, error) { func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, annotations.Annotations, error) {
prepareSpanTimer, ctxPrepare := query.stats.GetSpanTimer(ctx, stats.QueryPreparationTime, ng.metrics.queryPrepareTime) prepareSpanTimer, ctxPrepare := query.stats.GetSpanTimer(ctx, stats.QueryPreparationTime, ng.metrics.queryPrepareTime)
mint, maxt := ng.findMinMaxTime(s) mint, maxt := FindMinMaxTime(s)
querier, err := query.queryable.Querier(mint, maxt) querier, err := query.queryable.Querier(mint, maxt)
if err != nil { if err != nil {
prepareSpanTimer.Finish() prepareSpanTimer.Finish()
@ -817,7 +817,10 @@ func subqueryTimes(path []parser.Node) (time.Duration, time.Duration, *int64) {
return subqOffset, subqRange, tsp return subqOffset, subqRange, tsp
} }
func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) { // FindMinMaxTime returns the time in milliseconds of the earliest and latest point in time the statement will try to process.
// This takes into account offsets, @ modifiers, and range selectors.
// If the statement does not select series, then FindMinMaxTime returns (0, 0).
func FindMinMaxTime(s *parser.EvalStmt) (int64, int64) {
var minTimestamp, maxTimestamp int64 = math.MaxInt64, math.MinInt64 var minTimestamp, maxTimestamp int64 = math.MaxInt64, math.MinInt64
// Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range. // Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range.
// The evaluation of the VectorSelector inside then evaluates the given range and unsets // The evaluation of the VectorSelector inside then evaluates the given range and unsets
@ -826,7 +829,7 @@ func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) {
parser.Inspect(s.Expr, func(node parser.Node, path []parser.Node) error { parser.Inspect(s.Expr, func(node parser.Node, path []parser.Node) error {
switch n := node.(type) { switch n := node.(type) {
case *parser.VectorSelector: case *parser.VectorSelector:
start, end := ng.getTimeRangesForSelector(s, n, path, evalRange) start, end := getTimeRangesForSelector(s, n, path, evalRange)
if start < minTimestamp { if start < minTimestamp {
minTimestamp = start minTimestamp = start
} }
@ -849,7 +852,7 @@ func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) {
return minTimestamp, maxTimestamp return minTimestamp, maxTimestamp
} }
func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path []parser.Node, evalRange time.Duration) (int64, int64) { func getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path []parser.Node, evalRange time.Duration) (int64, int64) {
start, end := timestamp.FromTime(s.Start), timestamp.FromTime(s.End) start, end := timestamp.FromTime(s.Start), timestamp.FromTime(s.End)
subqOffset, subqRange, subqTs := subqueryTimes(path) subqOffset, subqRange, subqTs := subqueryTimes(path)
@ -906,7 +909,7 @@ func (ng *Engine) populateSeries(ctx context.Context, querier storage.Querier, s
parser.Inspect(s.Expr, func(node parser.Node, path []parser.Node) error { parser.Inspect(s.Expr, func(node parser.Node, path []parser.Node) error {
switch n := node.(type) { switch n := node.(type) {
case *parser.VectorSelector: case *parser.VectorSelector:
start, end := ng.getTimeRangesForSelector(s, n, path, evalRange) start, end := getTimeRangesForSelector(s, n, path, evalRange)
interval := ng.getLastSubqueryInterval(path) interval := ng.getLastSubqueryInterval(path)
if interval == 0 { if interval == 0 {
interval = s.Interval interval = s.Interval

View file

@ -786,47 +786,47 @@ func funcTan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper)
return simpleFunc(vals, enh, math.Tan), nil return simpleFunc(vals, enh, math.Tan), nil
} }
// == asin(Vector parser.ValueTypeVector) (Vector, Annotations) === // === asin(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Asin), nil return simpleFunc(vals, enh, math.Asin), nil
} }
// == acos(Vector parser.ValueTypeVector) (Vector, Annotations) === // === acos(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Acos), nil return simpleFunc(vals, enh, math.Acos), nil
} }
// == atan(Vector parser.ValueTypeVector) (Vector, Annotations) === // === atan(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Atan), nil return simpleFunc(vals, enh, math.Atan), nil
} }
// == sinh(Vector parser.ValueTypeVector) (Vector, Annotations) === // === sinh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Sinh), nil return simpleFunc(vals, enh, math.Sinh), nil
} }
// == cosh(Vector parser.ValueTypeVector) (Vector, Annotations) === // === cosh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Cosh), nil return simpleFunc(vals, enh, math.Cosh), nil
} }
// == tanh(Vector parser.ValueTypeVector) (Vector, Annotations) === // === tanh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Tanh), nil return simpleFunc(vals, enh, math.Tanh), nil
} }
// == asinh(Vector parser.ValueTypeVector) (Vector, Annotations) === // === asinh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Asinh), nil return simpleFunc(vals, enh, math.Asinh), nil
} }
// == acosh(Vector parser.ValueTypeVector) (Vector, Annotations) === // === acosh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Acosh), nil return simpleFunc(vals, enh, math.Acosh), nil
} }
// == atanh(Vector parser.ValueTypeVector) (Vector, Annotations) === // === atanh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Atanh), nil return simpleFunc(vals, enh, math.Atanh), nil
} }

View file

@ -59,11 +59,11 @@ func (i Item) Pretty(int) string { return i.String() }
func (i ItemType) IsOperator() bool { return i > operatorsStart && i < operatorsEnd } func (i ItemType) IsOperator() bool { return i > operatorsStart && i < operatorsEnd }
// IsAggregator returns true if the Item belongs to the aggregator functions. // IsAggregator returns true if the Item belongs to the aggregator functions.
// Returns false otherwise // Returns false otherwise.
func (i ItemType) IsAggregator() bool { return i > aggregatorsStart && i < aggregatorsEnd } func (i ItemType) IsAggregator() bool { return i > aggregatorsStart && i < aggregatorsEnd }
// IsAggregatorWithParam returns true if the Item is an aggregator that takes a parameter. // IsAggregatorWithParam returns true if the Item is an aggregator that takes a parameter.
// Returns false otherwise // Returns false otherwise.
func (i ItemType) IsAggregatorWithParam() bool { func (i ItemType) IsAggregatorWithParam() bool {
return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE
} }

View file

@ -171,7 +171,7 @@ func ParseExpr(input string) (expr Expr, err error) {
return p.ParseExpr() return p.ParseExpr()
} }
// ParseMetric parses the input into a metric // ParseMetric parses the input into a metric.
func ParseMetric(input string) (m labels.Labels, err error) { func ParseMetric(input string) (m labels.Labels, err error) {
p := NewParser(input) p := NewParser(input)
defer p.Close() defer p.Close()

View file

@ -472,7 +472,7 @@ func (r *AlertingRule) Eval(ctx context.Context, evalDelay time.Duration, ts tim
} }
// State returns the maximum state of alert instances for this rule. // State returns the maximum state of alert instances for this rule.
// StateFiring > StatePending > StateInactive // StateFiring > StatePending > StateInactive.
func (r *AlertingRule) State() AlertState { func (r *AlertingRule) State() AlertState {
r.activeMtx.Lock() r.activeMtx.Lock()
defer r.activeMtx.Unlock() defer r.activeMtx.Unlock()

View file

@ -265,7 +265,7 @@ type GroupLoader interface {
} }
// FileLoader is the default GroupLoader implementation. It defers to rulefmt.ParseFile // FileLoader is the default GroupLoader implementation. It defers to rulefmt.ParseFile
// and parser.ParseExpr // and parser.ParseExpr.
type FileLoader struct{} type FileLoader struct{}
func (FileLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) { func (FileLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) {

View file

@ -34,7 +34,7 @@ import (
"github.com/prometheus/prometheus/util/osutil" "github.com/prometheus/prometheus/util/osutil"
) )
// NewManager is the Manager constructor // NewManager is the Manager constructor.
func NewManager(o *Options, logger log.Logger, app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) { func NewManager(o *Options, logger log.Logger, app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) {
if o == nil { if o == nil {
o = &Options{} o = &Options{}

View file

@ -18,6 +18,7 @@ import (
"bytes" "bytes"
"compress/gzip" "compress/gzip"
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"math" "math"
@ -30,7 +31,6 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/go-kit/log/level" "github.com/go-kit/log/level"
"github.com/pkg/errors"
config_util "github.com/prometheus/common/config" config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/version" "github.com/prometheus/common/version"
@ -112,7 +112,7 @@ type scrapeLoopOptions struct {
const maxAheadTime = 10 * time.Minute const maxAheadTime = 10 * time.Minute
// returning an empty label set is interpreted as "drop" // returning an empty label set is interpreted as "drop".
type labelsMutator func(labels.Labels) labels.Labels type labelsMutator func(labels.Labels) labels.Labels
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, options *Options, metrics *scrapeMetrics) (*scrapePool, error) { func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, options *Options, metrics *scrapeMetrics) (*scrapePool, error) {
@ -122,7 +122,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...) client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error creating HTTP client") return nil, fmt.Errorf("error creating HTTP client: %w", err)
} }
buffers := pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) buffers := pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
@ -250,7 +250,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, sp.httpOpts...) client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, sp.httpOpts...)
if err != nil { if err != nil {
sp.metrics.targetScrapePoolReloadsFailed.Inc() sp.metrics.targetScrapePoolReloadsFailed.Inc()
return errors.Wrap(err, "error creating HTTP client") return fmt.Errorf("error creating HTTP client: %w", err)
} }
reuseCache := reusableCache(sp.config, cfg) reuseCache := reusableCache(sp.config, cfg)
@ -695,7 +695,7 @@ func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w
}() }()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return "", errors.Errorf("server returned HTTP status %s", resp.Status) return "", fmt.Errorf("server returned HTTP status %s", resp.Status)
} }
if s.bodySizeLimit <= 0 { if s.bodySizeLimit <= 0 {
@ -1549,7 +1549,7 @@ loop:
} }
sampleAdded, err = sl.checkAddError(ce, met, parsedTimestamp, err, &sampleLimitErr, &bucketLimitErr, &appErrs) sampleAdded, err = sl.checkAddError(ce, met, parsedTimestamp, err, &sampleLimitErr, &bucketLimitErr, &appErrs)
if err != nil { if err != nil {
if err != storage.ErrNotFound { if !errors.Is(err, storage.ErrNotFound) {
level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err) level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err)
} }
break loop break loop
@ -1620,8 +1620,8 @@ loop:
sl.cache.forEachStale(func(lset labels.Labels) bool { sl.cache.forEachStale(func(lset labels.Labels) bool {
// Series no longer exposed, mark it stale. // Series no longer exposed, mark it stale.
_, err = app.Append(0, lset, defTime, math.Float64frombits(value.StaleNaN)) _, err = app.Append(0, lset, defTime, math.Float64frombits(value.StaleNaN))
switch errors.Cause(err) { switch {
case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp: case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
// Do not count these in logging, as this is expected if a target // Do not count these in logging, as this is expected if a target
// goes away and comes back again with a new scrape loop. // goes away and comes back again with a new scrape loop.
err = nil err = nil
@ -1635,35 +1635,35 @@ loop:
// Adds samples to the appender, checking the error, and then returns the # of samples added, // Adds samples to the appender, checking the error, and then returns the # of samples added,
// whether the caller should continue to process more samples, and any sample or bucket limit errors. // whether the caller should continue to process more samples, and any sample or bucket limit errors.
func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) { func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) {
switch errors.Cause(err) { switch {
case nil: case err == nil:
if (tp == nil || sl.trackTimestampsStaleness) && ce != nil { if (tp == nil || sl.trackTimestampsStaleness) && ce != nil {
sl.cache.trackStaleness(ce.hash, ce.lset) sl.cache.trackStaleness(ce.hash, ce.lset)
} }
return true, nil return true, nil
case storage.ErrNotFound: case errors.Is(err, storage.ErrNotFound):
return false, storage.ErrNotFound return false, storage.ErrNotFound
case storage.ErrOutOfOrderSample: case errors.Is(err, storage.ErrOutOfOrderSample):
appErrs.numOutOfOrder++ appErrs.numOutOfOrder++
level.Debug(sl.l).Log("msg", "Out of order sample", "series", string(met)) level.Debug(sl.l).Log("msg", "Out of order sample", "series", string(met))
sl.metrics.targetScrapeSampleOutOfOrder.Inc() sl.metrics.targetScrapeSampleOutOfOrder.Inc()
return false, nil return false, nil
case storage.ErrDuplicateSampleForTimestamp: case errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
appErrs.numDuplicates++ appErrs.numDuplicates++
level.Debug(sl.l).Log("msg", "Duplicate sample for timestamp", "series", string(met)) level.Debug(sl.l).Log("msg", "Duplicate sample for timestamp", "series", string(met))
sl.metrics.targetScrapeSampleDuplicate.Inc() sl.metrics.targetScrapeSampleDuplicate.Inc()
return false, nil return false, nil
case storage.ErrOutOfBounds: case errors.Is(err, storage.ErrOutOfBounds):
appErrs.numOutOfBounds++ appErrs.numOutOfBounds++
level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met)) level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met))
sl.metrics.targetScrapeSampleOutOfBounds.Inc() sl.metrics.targetScrapeSampleOutOfBounds.Inc()
return false, nil return false, nil
case errSampleLimit: case errors.Is(err, errSampleLimit):
// Keep on parsing output if we hit the limit, so we report the correct // Keep on parsing output if we hit the limit, so we report the correct
// total number of samples scraped. // total number of samples scraped.
*sampleLimitErr = err *sampleLimitErr = err
return false, nil return false, nil
case errBucketLimit: case errors.Is(err, errBucketLimit):
// Keep on parsing output if we hit the limit, so we report the correct // Keep on parsing output if we hit the limit, so we report the correct
// total number of samples scraped. // total number of samples scraped.
*bucketLimitErr = err *bucketLimitErr = err
@ -1674,10 +1674,10 @@ func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err e
} }
func (sl *scrapeLoop) checkAddExemplarError(err error, e exemplar.Exemplar, appErrs *appendErrors) error { func (sl *scrapeLoop) checkAddExemplarError(err error, e exemplar.Exemplar, appErrs *appendErrors) error {
switch errors.Cause(err) { switch {
case storage.ErrNotFound: case errors.Is(err, storage.ErrNotFound):
return storage.ErrNotFound return storage.ErrNotFound
case storage.ErrOutOfOrderExemplar: case errors.Is(err, storage.ErrOutOfOrderExemplar):
appErrs.numExemplarOutOfOrder++ appErrs.numExemplarOutOfOrder++
level.Debug(sl.l).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e)) level.Debug(sl.l).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e))
sl.metrics.targetScrapeExemplarOutOfOrder.Inc() sl.metrics.targetScrapeExemplarOutOfOrder.Inc()
@ -1789,13 +1789,13 @@ func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v
} }
ref, err := app.Append(ref, lset, t, v) ref, err := app.Append(ref, lset, t, v)
switch errors.Cause(err) { switch {
case nil: case err == nil:
if !ok { if !ok {
sl.cache.addRef(s, ref, lset, lset.Hash()) sl.cache.addRef(s, ref, lset, lset.Hash())
} }
return nil return nil
case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp: case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
// Do not log here, as this is expected if a target goes away and comes back // Do not log here, as this is expected if a target goes away and comes back
// again with a new scrape loop. // again with a new scrape loop.
return nil return nil

View file

@ -18,6 +18,7 @@ import (
"compress/gzip" "compress/gzip"
"context" "context"
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"io" "io"
"math" "math"
@ -31,7 +32,6 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
config_util "github.com/prometheus/common/config" config_util "github.com/prometheus/common/config"
@ -882,7 +882,7 @@ func TestScrapeLoopRun(t *testing.T) {
select { select {
case err := <-errc: case err := <-errc:
if err != context.DeadlineExceeded { if !errors.Is(err, context.DeadlineExceeded) {
t.Fatalf("Expected timeout error but got: %s", err) t.Fatalf("Expected timeout error but got: %s", err)
} }
case <-time.After(3 * time.Second): case <-time.After(3 * time.Second):
@ -952,7 +952,7 @@ func TestScrapeLoopForcedErr(t *testing.T) {
select { select {
case err := <-errc: case err := <-errc:
if err != forcedErr { if !errors.Is(err, forcedErr) {
t.Fatalf("Expected forced error but got: %s", err) t.Fatalf("Expected forced error but got: %s", err)
} }
case <-time.After(3 * time.Second): case <-time.After(3 * time.Second):
@ -1741,7 +1741,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
now := time.Now() now := time.Now()
slApp := sl.appender(context.Background()) slApp := sl.appender(context.Background())
total, added, seriesAdded, err := sl.append(app, []byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"), "", now) total, added, seriesAdded, err := sl.append(app, []byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"), "", now)
if err != errSampleLimit { if !errors.Is(err, errSampleLimit) {
t.Fatalf("Did not see expected sample limit error: %s", err) t.Fatalf("Did not see expected sample limit error: %s", err)
} }
require.NoError(t, slApp.Rollback()) require.NoError(t, slApp.Rollback())
@ -1772,7 +1772,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
now = time.Now() now = time.Now()
slApp = sl.appender(context.Background()) slApp = sl.appender(context.Background())
total, added, seriesAdded, err = sl.append(slApp, []byte("metric_a 1\nmetric_b 1\nmetric_c{deleteme=\"yes\"} 1\nmetric_d 1\nmetric_e 1\nmetric_f 1\nmetric_g 1\nmetric_h{deleteme=\"yes\"} 1\nmetric_i{deleteme=\"yes\"} 1\n"), "", now) total, added, seriesAdded, err = sl.append(slApp, []byte("metric_a 1\nmetric_b 1\nmetric_c{deleteme=\"yes\"} 1\nmetric_d 1\nmetric_e 1\nmetric_f 1\nmetric_g 1\nmetric_h{deleteme=\"yes\"} 1\nmetric_i{deleteme=\"yes\"} 1\n"), "", now)
if err != errSampleLimit { if !errors.Is(err, errSampleLimit) {
t.Fatalf("Did not see expected sample limit error: %s", err) t.Fatalf("Did not see expected sample limit error: %s", err)
} }
require.NoError(t, slApp.Rollback()) require.NoError(t, slApp.Rollback())
@ -1868,7 +1868,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
now = time.Now() now = time.Now()
total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now) total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now)
if err != errBucketLimit { if !errors.Is(err, errBucketLimit) {
t.Fatalf("Did not see expected histogram bucket limit error: %s", err) t.Fatalf("Did not see expected histogram bucket limit error: %s", err)
} }
require.NoError(t, app.Rollback()) require.NoError(t, app.Rollback())
@ -2738,8 +2738,8 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
switch { switch {
case err == nil: case err == nil:
errc <- errors.New("Expected error but got nil") errc <- errors.New("Expected error but got nil")
case ctx.Err() != context.Canceled: case !errors.Is(ctx.Err(), context.Canceled):
errc <- errors.Errorf("Expected context cancellation error but got: %s", ctx.Err()) errc <- fmt.Errorf("Expected context cancellation error but got: %w", ctx.Err())
default: default:
close(errc) close(errc)
} }
@ -3679,6 +3679,131 @@ func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) {
require.Equal(t, "750ms", sp.ActiveTargets()[0].labels.Get(model.ScrapeTimeoutLabel)) require.Equal(t, "750ms", sp.ActiveTargets()[0].labels.Get(model.ScrapeTimeoutLabel))
} }
// Testing whether we can remove trailing .0 from histogram 'le' and summary 'quantile' labels.
func TestLeQuantileReLabel(t *testing.T) {
simpleStorage := teststorage.New(t)
defer simpleStorage.Close()
config := &config.ScrapeConfig{
JobName: "test",
MetricRelabelConfigs: []*relabel.Config{
{
SourceLabels: model.LabelNames{"le", "__name__"},
Regex: relabel.MustNewRegexp("(\\d+)\\.0+;.*_bucket"),
Replacement: relabel.DefaultRelabelConfig.Replacement,
Separator: relabel.DefaultRelabelConfig.Separator,
TargetLabel: "le",
Action: relabel.Replace,
},
{
SourceLabels: model.LabelNames{"quantile"},
Regex: relabel.MustNewRegexp("(\\d+)\\.0+"),
Replacement: relabel.DefaultRelabelConfig.Replacement,
Separator: relabel.DefaultRelabelConfig.Separator,
TargetLabel: "quantile",
Action: relabel.Replace,
},
},
SampleLimit: 100,
Scheme: "http",
ScrapeInterval: model.Duration(100 * time.Millisecond),
ScrapeTimeout: model.Duration(100 * time.Millisecond),
}
metricsText := `
# HELP test_histogram This is a histogram with default buckets
# TYPE test_histogram histogram
test_histogram_bucket{address="0.0.0.0",port="5001",le="0.005"} 0
test_histogram_bucket{address="0.0.0.0",port="5001",le="0.01"} 0
test_histogram_bucket{address="0.0.0.0",port="5001",le="0.025"} 0
test_histogram_bucket{address="0.0.0.0",port="5001",le="0.05"} 0
test_histogram_bucket{address="0.0.0.0",port="5001",le="0.1"} 0
test_histogram_bucket{address="0.0.0.0",port="5001",le="0.25"} 0
test_histogram_bucket{address="0.0.0.0",port="5001",le="0.5"} 0
test_histogram_bucket{address="0.0.0.0",port="5001",le="1.0"} 0
test_histogram_bucket{address="0.0.0.0",port="5001",le="2.5"} 0
test_histogram_bucket{address="0.0.0.0",port="5001",le="5.0"} 0
test_histogram_bucket{address="0.0.0.0",port="5001",le="10.0"} 0
test_histogram_bucket{address="0.0.0.0",port="5001",le="+Inf"} 0
test_histogram_sum{address="0.0.0.0",port="5001"} 0
test_histogram_count{address="0.0.0.0",port="5001"} 0
# HELP test_summary Number of inflight requests sampled at a regular interval. Quantile buckets keep track of inflight requests over the last 60s.
# TYPE test_summary summary
test_summary{quantile="0.5"} 0
test_summary{quantile="0.9"} 0
test_summary{quantile="0.95"} 0
test_summary{quantile="0.99"} 0
test_summary{quantile="1.0"} 1
test_summary_sum 1
test_summary_count 199
`
// The expected "le" values do not have the trailing ".0".
expectedLeValues := []string{"0.005", "0.01", "0.025", "0.05", "0.1", "0.25", "0.5", "1", "2.5", "5", "10", "+Inf"}
// The expected "quantile" values do not have the trailing ".0".
expectedQuantileValues := []string{"0.5", "0.9", "0.95", "0.99", "1"}
scrapeCount := 0
scraped := make(chan bool)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, metricsText)
scrapeCount++
if scrapeCount > 2 {
close(scraped)
}
}))
defer ts.Close()
sp, err := newScrapePool(config, simpleStorage, 0, nil, &Options{}, newTestScrapeMetrics(t))
require.NoError(t, err)
defer sp.stop()
testURL, err := url.Parse(ts.URL)
require.NoError(t, err)
sp.Sync([]*targetgroup.Group{
{
Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}},
},
})
require.Equal(t, 1, len(sp.ActiveTargets()))
select {
case <-time.After(5 * time.Second):
t.Fatalf("target was not scraped")
case <-scraped:
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
q, err := simpleStorage.Querier(time.Time{}.UnixNano(), time.Now().UnixNano())
require.NoError(t, err)
defer q.Close()
checkValues := func(labelName string, expectedValues []string, series storage.SeriesSet) {
foundLeValues := map[string]bool{}
for series.Next() {
s := series.At()
v := s.Labels().Get(labelName)
require.NotContains(t, foundLeValues, v, "duplicate label value found")
foundLeValues[v] = true
}
require.Equal(t, len(expectedValues), len(foundLeValues), "number of label values not as expected")
for _, v := range expectedValues {
require.Contains(t, foundLeValues, v, "label value not found")
}
}
series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "test_histogram_bucket"))
checkValues("le", expectedLeValues, series)
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "test_summary"))
checkValues("quantile", expectedQuantileValues, series)
}
func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *testing.T) { func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *testing.T) {
appender := &collectResultAppender{} appender := &collectResultAppender{}
var ( var (

View file

@ -196,7 +196,7 @@ func (t *Target) DiscoveredLabels() labels.Labels {
return t.discoveredLabels.Copy() return t.discoveredLabels.Copy()
} }
// SetDiscoveredLabels sets new DiscoveredLabels // SetDiscoveredLabels sets new DiscoveredLabels.
func (t *Target) SetDiscoveredLabels(l labels.Labels) { func (t *Target) SetDiscoveredLabels(l labels.Labels) {
t.mtx.Lock() t.mtx.Lock()
defer t.mtx.Unlock() defer t.mtx.Unlock()

View file

@ -330,7 +330,7 @@ func (s testSeriesSet) At() Series { return s.series }
func (s testSeriesSet) Err() error { return nil } func (s testSeriesSet) Err() error { return nil }
func (s testSeriesSet) Warnings() annotations.Annotations { return nil } func (s testSeriesSet) Warnings() annotations.Annotations { return nil }
// TestSeriesSet returns a mock series set // TestSeriesSet returns a mock series set.
func TestSeriesSet(series Series) SeriesSet { func TestSeriesSet(series Series) SeriesSet {
return testSeriesSet{series: series} return testSeriesSet{series: series}
} }

View file

@ -43,7 +43,7 @@ const (
IngestionPublicAudience = "https://monitor.azure.com//.default" IngestionPublicAudience = "https://monitor.azure.com//.default"
) )
// ManagedIdentityConfig is used to store managed identity config values // ManagedIdentityConfig is used to store managed identity config values.
type ManagedIdentityConfig struct { type ManagedIdentityConfig struct {
// ClientID is the clientId of the managed identity that is being used to authenticate. // ClientID is the clientId of the managed identity that is being used to authenticate.
ClientID string `yaml:"client_id,omitempty"` ClientID string `yaml:"client_id,omitempty"`
@ -235,7 +235,7 @@ func newManagedIdentityTokenCredential(clientOpts *azcore.ClientOptions, managed
return azidentity.NewManagedIdentityCredential(opts) return azidentity.NewManagedIdentityCredential(opts)
} }
// newOAuthTokenCredential returns new OAuth token credential // newOAuthTokenCredential returns new OAuth token credential.
func newOAuthTokenCredential(clientOpts *azcore.ClientOptions, oAuthConfig *OAuthConfig) (azcore.TokenCredential, error) { func newOAuthTokenCredential(clientOpts *azcore.ClientOptions, oAuthConfig *OAuthConfig) (azcore.TokenCredential, error) {
opts := &azidentity.ClientSecretCredentialOptions{ClientOptions: *clientOpts} opts := &azidentity.ClientSecretCredentialOptions{ClientOptions: *clientOpts}
return azidentity.NewClientSecretCredential(oAuthConfig.TenantID, oAuthConfig.ClientID, oAuthConfig.ClientSecret, opts) return azidentity.NewClientSecretCredential(oAuthConfig.TenantID, oAuthConfig.ClientID, oAuthConfig.ClientSecret, opts)
@ -326,7 +326,7 @@ func getAudience(cloud string) (string, error) {
} }
} }
// getCloudConfiguration returns the cloud Configuration which contains AAD endpoint for different clouds // getCloudConfiguration returns the cloud Configuration which contains AAD endpoint for different clouds.
func getCloudConfiguration(c string) (cloud.Configuration, error) { func getCloudConfiguration(c string) (cloud.Configuration, error) {
switch strings.ToLower(c) { switch strings.ToLower(c) {
case strings.ToLower(AzureChina): case strings.ToLower(AzureChina):

View file

@ -475,7 +475,7 @@ func (c *concreteSeriesIterator) At() (t int64, v float64) {
return s.Timestamp, s.Value return s.Timestamp, s.Value
} }
// AtHistogram implements chunkenc.Iterator // AtHistogram implements chunkenc.Iterator.
func (c *concreteSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { func (c *concreteSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
if c.curValType != chunkenc.ValHistogram { if c.curValType != chunkenc.ValHistogram {
panic("iterator is not on an integer histogram sample") panic("iterator is not on an integer histogram sample")
@ -484,7 +484,7 @@ func (c *concreteSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
return h.Timestamp, HistogramProtoToHistogram(h) return h.Timestamp, HistogramProtoToHistogram(h)
} }
// AtFloatHistogram implements chunkenc.Iterator // AtFloatHistogram implements chunkenc.Iterator.
func (c *concreteSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { func (c *concreteSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
switch c.curValType { switch c.curValType {
case chunkenc.ValHistogram: case chunkenc.ValHistogram:
@ -547,7 +547,7 @@ func (c *concreteSeriesIterator) Err() error {
} }
// validateLabelsAndMetricName validates the label names/values and metric names returned from remote read, // validateLabelsAndMetricName validates the label names/values and metric names returned from remote read,
// also making sure that there are no labels with duplicate names // also making sure that there are no labels with duplicate names.
func validateLabelsAndMetricName(ls []prompb.Label) error { func validateLabelsAndMetricName(ls []prompb.Label) error {
for i, l := range ls { for i, l := range ls {
if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) { if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) {
@ -752,7 +752,7 @@ func spansToSpansProto(s []histogram.Span) []prompb.BucketSpan {
return spans return spans
} }
// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric // LabelProtosToMetric unpack a []*prompb.Label to a model.Metric.
func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric { func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
metric := make(model.Metric, len(labelPairs)) metric := make(model.Metric, len(labelPairs))
for _, l := range labelPairs { for _, l := range labelPairs {

View file

@ -8,13 +8,13 @@ import (
"unicode" "unicode"
) )
// Normalizes the specified label to follow Prometheus label names standard // Normalizes the specified label to follow Prometheus label names standard.
// //
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels // See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
// //
// Labels that start with non-letter rune will be prefixed with "key_" // Labels that start with non-letter rune will be prefixed with "key_".
// //
// Exception is made for double-underscores which are allowed // Exception is made for double-underscores which are allowed.
func NormalizeLabel(label string) string { func NormalizeLabel(label string) string {
// Trivial case // Trivial case
if len(label) == 0 { if len(label) == 0 {
@ -32,7 +32,7 @@ func NormalizeLabel(label string) string {
return label return label
} }
// Return '_' for anything non-alphanumeric // Return '_' for anything non-alphanumeric.
func sanitizeRune(r rune) rune { func sanitizeRune(r rune) rune {
if unicode.IsLetter(r) || unicode.IsDigit(r) { if unicode.IsLetter(r) || unicode.IsDigit(r) {
return r return r

View file

@ -10,7 +10,7 @@ import (
"go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric"
) )
// The map to translate OTLP units to Prometheus units // The map to translate OTLP units to Prometheus units.
// OTLP metrics use the c/s notation as specified at https://ucum.org/ucum.html // OTLP metrics use the c/s notation as specified at https://ucum.org/ucum.html
// (See also https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/README.md#instrument-units) // (See also https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/README.md#instrument-units)
// Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units // Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units
@ -57,8 +57,8 @@ var unitMap = map[string]string{
"$": "dollars", "$": "dollars",
} }
// The map that translates the "per" unit // The map that translates the "per" unit.
// Example: s => per second (singular) // Example: s => per second (singular).
var perUnitMap = map[string]string{ var perUnitMap = map[string]string{
"s": "second", "s": "second",
"m": "minute", "m": "minute",
@ -69,7 +69,7 @@ var perUnitMap = map[string]string{
"y": "year", "y": "year",
} }
// Build a Prometheus-compliant metric name for the specified metric // Build a Prometheus-compliant metric name for the specified metric.
// //
// Metric name is prefixed with specified namespace and underscore (if any). // Metric name is prefixed with specified namespace and underscore (if any).
// Namespace is not cleaned up. Make sure specified namespace follows Prometheus // Namespace is not cleaned up. Make sure specified namespace follows Prometheus
@ -202,7 +202,7 @@ func removeSuffix(tokens []string, suffix string) []string {
return tokens return tokens
} }
// Clean up specified string so it's Prometheus compliant // Clean up specified string so it's Prometheus compliant.
func CleanUpString(s string) string { func CleanUpString(s string) string {
return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }), "_") return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }), "_")
} }
@ -211,8 +211,8 @@ func RemovePromForbiddenRunes(s string) string {
return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != '_' && r != ':' }), "_") return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != '_' && r != ':' }), "_")
} }
// Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit // Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit.
// Returns the specified unit if not found in unitMap // Returns the specified unit if not found in unitMap.
func unitMapGetOrDefault(unit string) string { func unitMapGetOrDefault(unit string) string {
if promUnit, ok := unitMap[unit]; ok { if promUnit, ok := unitMap[unit]; ok {
return promUnit return promUnit
@ -220,8 +220,8 @@ func unitMapGetOrDefault(unit string) string {
return unit return unit
} }
// Retrieve the Prometheus "per" unit corresponding to the specified "per" unit // Retrieve the Prometheus "per" unit corresponding to the specified "per" unit.
// Returns the specified unit if not found in perUnitMap // Returns the specified unit if not found in perUnitMap.
func perUnitMapGetOrDefault(perUnit string) string { func perUnitMapGetOrDefault(perUnit string) string {
if promPerUnit, ok := perUnitMap[perUnit]; ok { if promPerUnit, ok := perUnitMap[perUnit]; ok {
return promPerUnit return promPerUnit
@ -229,7 +229,7 @@ func perUnitMapGetOrDefault(perUnit string) string {
return perUnit return perUnit
} }
// Returns whether the slice contains the specified value // Returns whether the slice contains the specified value.
func contains(slice []string, value string) bool { func contains(slice []string, value string) bool {
for _, sliceEntry := range slice { for _, sliceEntry := range slice {
if sliceEntry == value { if sliceEntry == value {
@ -239,7 +239,7 @@ func contains(slice []string, value string) bool {
return false return false
} }
// Remove the specified value from the slice // Remove the specified value from the slice.
func removeItem(slice []string, value string) []string { func removeItem(slice []string, value string) []string {
newSlice := make([]string, 0, len(slice)) newSlice := make([]string, 0, len(slice))
for _, sliceEntry := range slice { for _, sliceEntry := range slice {

View file

@ -15,7 +15,7 @@ func init() {
ilm = resourceMetrics.ScopeMetrics().AppendEmpty() ilm = resourceMetrics.ScopeMetrics().AppendEmpty()
} }
// Returns a new Metric of type "Gauge" with specified name and unit // Returns a new Metric of type "Gauge" with specified name and unit.
func createGauge(name, unit string) pmetric.Metric { func createGauge(name, unit string) pmetric.Metric {
gauge := ilm.Metrics().AppendEmpty() gauge := ilm.Metrics().AppendEmpty()
gauge.SetName(name) gauge.SetName(name)
@ -24,7 +24,7 @@ func createGauge(name, unit string) pmetric.Metric {
return gauge return gauge
} }
// Returns a new Metric of type Monotonic Sum with specified name and unit // Returns a new Metric of type Monotonic Sum with specified name and unit.
func createCounter(name, unit string) pmetric.Metric { func createCounter(name, unit string) pmetric.Metric {
counter := ilm.Metrics().AppendEmpty() counter := ilm.Metrics().AppendEmpty()
counter.SetEmptySum().SetIsMonotonic(true) counter.SetEmptySum().SetIsMonotonic(true)

View file

@ -125,7 +125,7 @@ func TestIgnoreExternalLabels(t *testing.T) {
} }
// baseRemoteWriteConfig copy values from global Default Write config // baseRemoteWriteConfig copy values from global Default Write config
// to avoid change global state and cross impact test execution // to avoid change global state and cross impact test execution.
func baseRemoteWriteConfig(host string) *config.RemoteWriteConfig { func baseRemoteWriteConfig(host string) *config.RemoteWriteConfig {
cfg := config.DefaultRemoteWriteConfig cfg := config.DefaultRemoteWriteConfig
cfg.URL = &common_config.URL{ cfg.URL = &common_config.URL{
@ -137,7 +137,7 @@ func baseRemoteWriteConfig(host string) *config.RemoteWriteConfig {
} }
// baseRemoteReadConfig copy values from global Default Read config // baseRemoteReadConfig copy values from global Default Read config
// to avoid change global state and cross impact test execution // to avoid change global state and cross impact test execution.
func baseRemoteReadConfig(host string) *config.RemoteReadConfig { func baseRemoteReadConfig(host string) *config.RemoteReadConfig {
cfg := config.DefaultRemoteReadConfig cfg := config.DefaultRemoteReadConfig
cfg.URL = &common_config.URL{ cfg.URL = &common_config.URL{

View file

@ -284,7 +284,7 @@ loop:
// cover an entirely different set of buckets. The function returns the // cover an entirely different set of buckets. The function returns the
// “forward” inserts to expand 'a' to also cover all the buckets exclusively // “forward” inserts to expand 'a' to also cover all the buckets exclusively
// covered by 'b', and it returns the “backward” inserts to expand 'b' to also // covered by 'b', and it returns the “backward” inserts to expand 'b' to also
// cover all the buckets exclusively covered by 'a' // cover all the buckets exclusively covered by 'a'.
func expandSpansBothWays(a, b []histogram.Span) (forward, backward []Insert, mergedSpans []histogram.Span) { func expandSpansBothWays(a, b []histogram.Span) (forward, backward []Insert, mergedSpans []histogram.Span) {
ai := newBucketIterator(a) ai := newBucketIterator(a)
bi := newBucketIterator(b) bi := newBucketIterator(b)

View file

@ -93,13 +93,14 @@ func (p HeadChunkRef) Unpack() (HeadSeriesRef, HeadChunkID) {
// //
// Example: // Example:
// assume a memSeries.firstChunkID=7 and memSeries.mmappedChunks=[p5,p6,p7,p8,p9]. // assume a memSeries.firstChunkID=7 and memSeries.mmappedChunks=[p5,p6,p7,p8,p9].
// | HeadChunkID value | refers to ... | //
// |-------------------|----------------------------------------------------------------------------------------| // | HeadChunkID value | refers to ... |
// | 0-6 | chunks that have been compacted to blocks, these won't return data for queries in Head | // |-------------------|----------------------------------------------------------------------------------------|
// | 7-11 | memSeries.mmappedChunks[i] where i is 0 to 4. | // | 0-6 | chunks that have been compacted to blocks, these won't return data for queries in Head |
// | 12 | *memChunk{next: nil} // | 7-11 | memSeries.mmappedChunks[i] where i is 0 to 4. |
// | 13 | *memChunk{next: ^} // | 12 | *memChunk{next: nil}
// | 14 | memSeries.headChunks -> *memChunk{next: ^} // | 13 | *memChunk{next: ^}
// | 14 | memSeries.headChunks -> *memChunk{next: ^}
type HeadChunkID uint64 type HeadChunkID uint64
// BlockChunkRef refers to a chunk within a persisted block. // BlockChunkRef refers to a chunk within a persisted block.
@ -198,7 +199,7 @@ func ChunkFromSamplesGeneric(s Samples) (Meta, error) {
}, nil }, nil
} }
// PopulatedChunk creates a chunk populated with samples every second starting at minTime // PopulatedChunk creates a chunk populated with samples every second starting at minTime.
func PopulatedChunk(numSamples int, minTime int64) (Meta, error) { func PopulatedChunk(numSamples int, minTime int64) (Meta, error) {
samples := make([]Sample, numSamples) samples := make([]Sample, numSamples)
for i := 0; i < numSamples; i++ { for i := 0; i < numSamples; i++ {

View file

@ -41,6 +41,7 @@ import (
"github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/tsdb/wlog"
) )
@ -1135,6 +1136,32 @@ func TestCompaction_populateBlock(t *testing.T) {
}, },
}, },
}, },
{
// Regression test for populateWithDelChunkSeriesIterator failing to set minTime on chunks.
title: "Populate from mixed type series and expect sample inside the interval only.",
compactMinTime: 1,
compactMaxTime: 11,
inputSeriesSamples: [][]seriesSamples{
{
{
lset: map[string]string{"a": "1"},
chunks: [][]sample{
{{t: 0, h: tsdbutil.GenerateTestHistogram(0)}, {t: 1, h: tsdbutil.GenerateTestHistogram(1)}},
{{t: 10, f: 1}, {t: 11, f: 2}},
},
},
},
},
expSeriesSamples: []seriesSamples{
{
lset: map[string]string{"a": "1"},
chunks: [][]sample{
{{t: 1, h: tsdbutil.GenerateTestHistogram(1)}},
{{t: 10, f: 1}},
},
},
},
},
} { } {
t.Run(tc.title, func(t *testing.T) { t.Run(tc.title, func(t *testing.T) {
blocks := make([]BlockReader, 0, len(tc.inputSeriesSamples)) blocks := make([]BlockReader, 0, len(tc.inputSeriesSamples))
@ -1178,12 +1205,23 @@ func TestCompaction_populateBlock(t *testing.T) {
firstTs int64 = math.MaxInt64 firstTs int64 = math.MaxInt64
s sample s sample
) )
for iter.Next() == chunkenc.ValFloat { for vt := iter.Next(); vt != chunkenc.ValNone; vt = iter.Next() {
s.t, s.f = iter.At() switch vt {
case chunkenc.ValFloat:
s.t, s.f = iter.At()
samples = append(samples, s)
case chunkenc.ValHistogram:
s.t, s.h = iter.AtHistogram()
samples = append(samples, s)
case chunkenc.ValFloatHistogram:
s.t, s.fh = iter.AtFloatHistogram()
samples = append(samples, s)
default:
require.Fail(t, "unexpected value type")
}
if firstTs == math.MaxInt64 { if firstTs == math.MaxInt64 {
firstTs = s.t firstTs = s.t
} }
samples = append(samples, s)
} }
// Check if chunk has correct min, max times. // Check if chunk has correct min, max times.

View file

@ -4411,7 +4411,7 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) {
// TestOOOCompactionWithDisabledWriteLog tests the scenario where the TSDB is // TestOOOCompactionWithDisabledWriteLog tests the scenario where the TSDB is
// configured to not have wal and wbl but its able to compact both the in-order // configured to not have wal and wbl but its able to compact both the in-order
// and out-of-order head // and out-of-order head.
func TestOOOCompactionWithDisabledWriteLog(t *testing.T) { func TestOOOCompactionWithDisabledWriteLog(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
ctx := context.Background() ctx := context.Background()

View file

@ -1556,7 +1556,7 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) {
return actualInOrderMint, minOOOTime, minMmapFile return actualInOrderMint, minOOOTime, minMmapFile
} }
// Tombstones returns a new reader over the head's tombstones // Tombstones returns a new reader over the head's tombstones.
func (h *Head) Tombstones() (tombstones.Reader, error) { func (h *Head) Tombstones() (tombstones.Reader, error) {
return h.tombstones, nil return h.tombstones, nil
} }
@ -2209,7 +2209,7 @@ func overlapsClosedInterval(mint1, maxt1, mint2, maxt2 int64) bool {
return mint1 <= maxt2 && mint2 <= maxt1 return mint1 <= maxt2 && mint2 <= maxt1
} }
// mmappedChunk describes a head chunk on disk that has been mmapped // mmappedChunk describes a head chunk on disk that has been mmapped.
type mmappedChunk struct { type mmappedChunk struct {
ref chunks.ChunkDiskMapperRef ref chunks.ChunkDiskMapperRef
numSamples uint16 numSamples uint16

View file

@ -231,7 +231,7 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB
// headChunkID returns the HeadChunkID referred to by the given position. // headChunkID returns the HeadChunkID referred to by the given position.
// * 0 <= pos < len(s.mmappedChunks) refer to s.mmappedChunks[pos] // * 0 <= pos < len(s.mmappedChunks) refer to s.mmappedChunks[pos]
// * pos >= len(s.mmappedChunks) refers to s.headChunks linked list // * pos >= len(s.mmappedChunks) refers to s.headChunks linked list.
func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID { func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID {
return chunks.HeadChunkID(pos) + s.firstChunkID return chunks.HeadChunkID(pos) + s.firstChunkID
} }
@ -625,7 +625,7 @@ var _ chunkenc.Chunk = &boundedChunk{}
// boundedChunk is an implementation of chunkenc.Chunk that uses a // boundedChunk is an implementation of chunkenc.Chunk that uses a
// boundedIterator that only iterates through samples which timestamps are // boundedIterator that only iterates through samples which timestamps are
// >= minT and <= maxT // >= minT and <= maxT.
type boundedChunk struct { type boundedChunk struct {
chunkenc.Chunk chunkenc.Chunk
minT int64 minT int64
@ -654,7 +654,7 @@ func (b boundedChunk) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator {
var _ chunkenc.Iterator = &boundedIterator{} var _ chunkenc.Iterator = &boundedIterator{}
// boundedIterator is an implementation of Iterator that only iterates through // boundedIterator is an implementation of Iterator that only iterates through
// samples which timestamps are >= minT and <= maxT // samples which timestamps are >= minT and <= maxT.
type boundedIterator struct { type boundedIterator struct {
chunkenc.Iterator chunkenc.Iterator
minT int64 minT int64
@ -700,7 +700,7 @@ func (b boundedIterator) Seek(t int64) chunkenc.ValueType {
return b.Iterator.Seek(t) return b.Iterator.Seek(t)
} }
// safeHeadChunk makes sure that the chunk can be accessed without a race condition // safeHeadChunk makes sure that the chunk can be accessed without a race condition.
type safeHeadChunk struct { type safeHeadChunk struct {
chunkenc.Chunk chunkenc.Chunk
s *memSeries s *memSeries

View file

@ -1487,7 +1487,7 @@ func (r *Reader) SortedLabelValues(ctx context.Context, name string, matchers ..
// LabelValues returns value tuples that exist for the given label name. // LabelValues returns value tuples that exist for the given label name.
// It is not safe to use the return value beyond the lifetime of the byte slice // It is not safe to use the return value beyond the lifetime of the byte slice
// passed into the Reader. // passed into the Reader.
// TODO(replay): Support filtering by matchers // TODO(replay): Support filtering by matchers.
func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
if len(matchers) > 0 { if len(matchers) > 0 {
return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers) return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers)
@ -1799,7 +1799,7 @@ func (r *Reader) Size() int64 {
} }
// LabelNames returns all the unique label names present in the index. // LabelNames returns all the unique label names present in the index.
// TODO(twilkie) implement support for matchers // TODO(twilkie) implement support for matchers.
func (r *Reader) LabelNames(_ context.Context, matchers ...*labels.Matcher) ([]string, error) { func (r *Reader) LabelNames(_ context.Context, matchers ...*labels.Matcher) ([]string, error) {
if len(matchers) > 0 { if len(matchers) > 0 {
return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers) return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers)

View file

@ -39,7 +39,7 @@ type chunkInterval struct {
maxt int64 maxt int64
} }
// permutateChunkIntervals returns all possible orders of the given chunkIntervals // permutateChunkIntervals returns all possible orders of the given chunkIntervals.
func permutateChunkIntervals(in []chunkInterval, out [][]chunkInterval, left, right int) [][]chunkInterval { func permutateChunkIntervals(in []chunkInterval, out [][]chunkInterval, left, right int) [][]chunkInterval {
if left == right { if left == right {
inCopy := make([]chunkInterval, len(in)) inCopy := make([]chunkInterval, len(in))
@ -871,9 +871,9 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
// the response is consistent with the data seen by Series() even if the OOO // the response is consistent with the data seen by Series() even if the OOO
// head receives more samples before Chunks() is called. // head receives more samples before Chunks() is called.
// An example: // An example:
// - Response A comes from: Series() then Chunk() // - Response A comes from: Series() then Chunk()
// - Response B comes from : Series(), in parallel new samples added to the head, then Chunk() // - Response B comes from : Series(), in parallel new samples added to the head, then Chunk()
// - A == B // - A == B
func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(t *testing.T) { func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(t *testing.T) {
opts := DefaultOptions() opts := DefaultOptions()
opts.OutOfOrderCapMax = 5 opts.OutOfOrderCapMax = 5

View file

@ -21,7 +21,7 @@ import (
const testMaxSize int = 32 const testMaxSize int = 32
// Formulas chosen to make testing easy: // Formulas chosen to make testing easy.
func valEven(pos int) int { return pos*2 + 2 } // s[0]=2, s[1]=4, s[2]=6, ..., s[31]=64 - Predictable pre-existing values func valEven(pos int) int { return pos*2 + 2 } // s[0]=2, s[1]=4, s[2]=6, ..., s[31]=64 - Predictable pre-existing values
func valOdd(pos int) int { return pos*2 + 1 } // s[0]=1, s[1]=3, s[2]=5, ..., s[31]=63 - New values will interject at chosen position because they sort before the pre-existing vals. func valOdd(pos int) int { return pos*2 + 1 } // s[0]=1, s[1]=3, s[2]=5, ..., s[31]=63 - New values will interject at chosen position because they sort before the pre-existing vals.

View file

@ -22,7 +22,7 @@ const (
DefaultPostingsForMatchersCacheForce = false DefaultPostingsForMatchersCacheForce = false
) )
// IndexPostingsReader is a subset of IndexReader methods, the minimum required to evaluate PostingsForMatchers // IndexPostingsReader is a subset of IndexReader methods, the minimum required to evaluate PostingsForMatchers.
type IndexPostingsReader interface { type IndexPostingsReader interface {
// LabelValues returns possible label values which may not be sorted. // LabelValues returns possible label values which may not be sorted.
LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error)
@ -166,7 +166,7 @@ func (c *PostingsForMatchersCache) expire() {
// shouldEvictHead returns true if cache head should be evicted, either because it's too old, // shouldEvictHead returns true if cache head should be evicted, either because it's too old,
// or because the cache has too many elements // or because the cache has too many elements
// should be called while read lock is held on cachedMtx // should be called while read lock is held on cachedMtx.
func (c *PostingsForMatchersCache) shouldEvictHead() bool { func (c *PostingsForMatchersCache) shouldEvictHead() bool {
// The cache should be evicted for sure if the max size (either items or bytes) is reached. // The cache should be evicted for sure if the max size (either items or bytes) is reached.
if c.cached.Len() > c.maxItems || c.cachedBytes > c.maxBytes { if c.cached.Len() > c.maxItems || c.cachedBytes > c.maxBytes {
@ -208,9 +208,9 @@ func (c *PostingsForMatchersCache) created(key string, ts time.Time, sizeBytes i
c.cachedBytes += sizeBytes c.cachedBytes += sizeBytes
} }
// matchersKey provides a unique string key for the given matchers slice // matchersKey provides a unique string key for the given matchers slice.
// NOTE: different orders of matchers will produce different keys, // NOTE: different orders of matchers will produce different keys,
// but it's unlikely that we'll receive same matchers in different orders at the same time // but it's unlikely that we'll receive same matchers in different orders at the same time.
func matchersKey(ms []*labels.Matcher) string { func matchersKey(ms []*labels.Matcher) string {
const ( const (
typeLen = 2 typeLen = 2
@ -232,7 +232,7 @@ func matchersKey(ms []*labels.Matcher) string {
return key return key
} }
// indexReaderWithPostingsForMatchers adapts an index.Reader to be an IndexReader by adding the PostingsForMatchers method // indexReaderWithPostingsForMatchers adapts an index.Reader to be an IndexReader by adding the PostingsForMatchers method.
type indexReaderWithPostingsForMatchers struct { type indexReaderWithPostingsForMatchers struct {
*index.Reader *index.Reader
pfmc *PostingsForMatchersCache pfmc *PostingsForMatchersCache

View file

@ -430,13 +430,13 @@ func (idx indexForPostingsMock) Postings(context.Context, string, ...string) (in
} }
// timeNowMock offers a mockable time.Now() implementation // timeNowMock offers a mockable time.Now() implementation
// empty value is ready to be used, and it should not be copied (use a reference) // empty value is ready to be used, and it should not be copied (use a reference).
type timeNowMock struct { type timeNowMock struct {
sync.Mutex sync.Mutex
now time.Time now time.Time
} }
// timeNow can be used as a mocked replacement for time.Now() // timeNow can be used as a mocked replacement for time.Now().
func (t *timeNowMock) timeNow() time.Time { func (t *timeNowMock) timeNow() time.Time {
t.Lock() t.Lock()
defer t.Unlock() defer t.Unlock()
@ -446,7 +446,7 @@ func (t *timeNowMock) timeNow() time.Time {
return t.now return t.now
} }
// advance advances the mocked time.Now() value // advance advances the mocked time.Now() value.
func (t *timeNowMock) advance(d time.Duration) { func (t *timeNowMock) advance(d time.Duration) {
t.Lock() t.Lock()
defer t.Unlock() defer t.Unlock()

View file

@ -897,6 +897,7 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
} }
return false return false
} }
p.curr.MinTime = p.currDelIter.AtT()
// Re-encode the chunk if iterator is provider. This means that it has // Re-encode the chunk if iterator is provider. This means that it has
// some samples to be deleted or chunk is opened. // some samples to be deleted or chunk is opened.
@ -912,7 +913,6 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
if app, err = newChunk.Appender(); err != nil { if app, err = newChunk.Appender(); err != nil {
break break
} }
for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() { for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
if vt != chunkenc.ValHistogram { if vt != chunkenc.ValHistogram {
err = fmt.Errorf("found value type %v in histogram chunk", vt) err = fmt.Errorf("found value type %v in histogram chunk", vt)
@ -930,15 +930,12 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
if app, err = newChunk.Appender(); err != nil { if app, err = newChunk.Appender(); err != nil {
break break
} }
var v float64 for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
t, v = p.currDelIter.At()
p.curr.MinTime = t
app.Append(t, v)
for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
if vt != chunkenc.ValFloat { if vt != chunkenc.ValFloat {
err = fmt.Errorf("found value type %v in float chunk", vt) err = fmt.Errorf("found value type %v in float chunk", vt)
break break
} }
var v float64
t, v = p.currDelIter.At() t, v = p.currDelIter.At()
app.Append(t, v) app.Append(t, v)
} }
@ -947,7 +944,6 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
if app, err = newChunk.Appender(); err != nil { if app, err = newChunk.Appender(); err != nil {
break break
} }
for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() { for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
if vt != chunkenc.ValFloatHistogram { if vt != chunkenc.ValFloatHistogram {
err = fmt.Errorf("found value type %v in histogram chunk", vt) err = fmt.Errorf("found value type %v in histogram chunk", vt)

View file

@ -100,8 +100,8 @@ type seriesSamples struct {
chunks [][]sample chunks [][]sample
} }
// Index: labels -> postings -> chunkMetas -> chunkRef // Index: labels -> postings -> chunkMetas -> chunkRef.
// ChunkReader: ref -> vals // ChunkReader: ref -> vals.
func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkReader, int64, int64) { func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkReader, int64, int64) {
sort.Slice(tc, func(i, j int) bool { sort.Slice(tc, func(i, j int) bool {
return labels.Compare(labels.FromMap(tc[i].lset), labels.FromMap(tc[i].lset)) < 0 return labels.Compare(labels.FromMap(tc[i].lset), labels.FromMap(tc[i].lset)) < 0
@ -132,12 +132,35 @@ func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkRe
Ref: chunkRef, Ref: chunkRef,
}) })
chunk := chunkenc.NewXORChunk() switch {
app, _ := chunk.Appender() case chk[0].fh != nil:
for _, smpl := range chk { chunk := chunkenc.NewFloatHistogramChunk()
app.Append(smpl.t, smpl.f) app, _ := chunk.Appender()
for _, smpl := range chk {
require.NotNil(t, smpl.fh, "chunk can only contain one type of sample")
_, _, _, err := app.AppendFloatHistogram(nil, smpl.t, smpl.fh, true)
require.NoError(t, err, "chunk should be appendable")
}
chkReader[chunkRef] = chunk
case chk[0].h != nil:
chunk := chunkenc.NewHistogramChunk()
app, _ := chunk.Appender()
for _, smpl := range chk {
require.NotNil(t, smpl.h, "chunk can only contain one type of sample")
_, _, _, err := app.AppendHistogram(nil, smpl.t, smpl.h, true)
require.NoError(t, err, "chunk should be appendable")
}
chkReader[chunkRef] = chunk
default:
chunk := chunkenc.NewXORChunk()
app, _ := chunk.Appender()
for _, smpl := range chk {
require.Nil(t, smpl.h, "chunk can only contain one type of sample")
require.Nil(t, smpl.fh, "chunk can only contain one type of sample")
app.Append(smpl.t, smpl.f)
}
chkReader[chunkRef] = chunk
} }
chkReader[chunkRef] = chunk
chunkRef++ chunkRef++
} }
ls := labels.FromMap(s.lset) ls := labels.FromMap(s.lset)
@ -733,12 +756,16 @@ func (r *fakeChunksReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
} }
func TestPopulateWithTombSeriesIterators(t *testing.T) { func TestPopulateWithTombSeriesIterators(t *testing.T) {
type minMaxTimes struct {
minTime, maxTime int64
}
cases := []struct { cases := []struct {
name string name string
chks [][]chunks.Sample chks [][]chunks.Sample
expected []chunks.Sample expected []chunks.Sample
expectedChks []chunks.Meta expectedChks []chunks.Meta
expectedMinMaxTimes []minMaxTimes
intervals tombstones.Intervals intervals tombstones.Intervals
@ -757,6 +784,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
expectedChks: []chunks.Meta{ expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{}), assureChunkFromSamples(t, []chunks.Sample{}),
}, },
expectedMinMaxTimes: []minMaxTimes{{0, 0}},
}, },
{ {
name: "three empty chunks", // This should never happen. name: "three empty chunks", // This should never happen.
@ -767,6 +795,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
assureChunkFromSamples(t, []chunks.Sample{}), assureChunkFromSamples(t, []chunks.Sample{}),
assureChunkFromSamples(t, []chunks.Sample{}), assureChunkFromSamples(t, []chunks.Sample{}),
}, },
expectedMinMaxTimes: []minMaxTimes{{0, 0}, {0, 0}, {0, 0}},
}, },
{ {
name: "one chunk", name: "one chunk",
@ -782,6 +811,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
}), }),
}, },
expectedMinMaxTimes: []minMaxTimes{{1, 6}},
}, },
{ {
name: "two full chunks", name: "two full chunks",
@ -801,6 +831,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
}), }),
}, },
expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}},
}, },
{ {
name: "three full chunks", name: "three full chunks",
@ -824,6 +855,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{10, 22, nil, nil}, sample{203, 3493, nil, nil}, sample{10, 22, nil, nil}, sample{203, 3493, nil, nil},
}), }),
}, },
expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}, {10, 203}},
}, },
// Seek cases. // Seek cases.
{ {
@ -894,6 +926,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{7, 89, nil, nil}, sample{7, 89, nil, nil},
}), }),
}, },
expectedMinMaxTimes: []minMaxTimes{{3, 6}, {7, 7}},
}, },
{ {
name: "two chunks with trimmed middle sample of first chunk", name: "two chunks with trimmed middle sample of first chunk",
@ -914,6 +947,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{7, 89, nil, nil}, sample{9, 8, nil, nil}, sample{7, 89, nil, nil}, sample{9, 8, nil, nil},
}), }),
}, },
expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}},
}, },
{ {
name: "two chunks with deletion across two chunks", name: "two chunks with deletion across two chunks",
@ -934,6 +968,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{9, 8, nil, nil}, sample{9, 8, nil, nil},
}), }),
}, },
expectedMinMaxTimes: []minMaxTimes{{1, 3}, {9, 9}},
}, },
// Deletion with seek. // Deletion with seek.
{ {
@ -974,9 +1009,33 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil}, sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
}), }),
}, },
expectedMinMaxTimes: []minMaxTimes{{1, 6}},
}, },
{ {
name: "one histogram chunk intersect with deletion interval", name: "one histogram chunk intersect with earlier deletion interval",
chks: [][]chunks.Sample{
{
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil},
sample{6, 0, tsdbutil.GenerateTestHistogram(6), nil},
},
},
intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}},
expected: []chunks.Sample{
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
sample{6, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{3, 6}},
},
{
name: "one histogram chunk intersect with later deletion interval",
chks: [][]chunks.Sample{ chks: [][]chunks.Sample{
{ {
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
@ -998,6 +1057,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil}, sample{3, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(3)), nil},
}), }),
}, },
expectedMinMaxTimes: []minMaxTimes{{1, 3}},
}, },
{ {
name: "one float histogram chunk", name: "one float histogram chunk",
@ -1023,9 +1083,33 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))}, sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
}), }),
}, },
expectedMinMaxTimes: []minMaxTimes{{1, 6}},
}, },
{ {
name: "one float histogram chunk intersect with deletion interval", name: "one float histogram chunk intersect with earlier deletion interval",
chks: [][]chunks.Sample{
{
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)},
sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
},
},
intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}},
expected: []chunks.Sample{
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
sample{6, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
}),
},
expectedMinMaxTimes: []minMaxTimes{{3, 6}},
},
{
name: "one float histogram chunk intersect with later deletion interval",
chks: [][]chunks.Sample{ chks: [][]chunks.Sample{
{ {
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
@ -1047,6 +1131,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))}, sample{3, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(3))},
}), }),
}, },
expectedMinMaxTimes: []minMaxTimes{{1, 3}},
}, },
{ {
name: "one gauge histogram chunk", name: "one gauge histogram chunk",
@ -1072,9 +1157,33 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil}, sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
}), }),
}, },
expectedMinMaxTimes: []minMaxTimes{{1, 6}},
}, },
{ {
name: "one gauge histogram chunk intersect with deletion interval", name: "one gauge histogram chunk intersect with earlier deletion interval",
chks: [][]chunks.Sample{
{
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
},
},
intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}},
expected: []chunks.Sample{
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
sample{6, 0, tsdbutil.GenerateTestGaugeHistogram(6), nil},
}),
},
expectedMinMaxTimes: []minMaxTimes{{3, 6}},
},
{
name: "one gauge histogram chunk intersect with later deletion interval",
chks: [][]chunks.Sample{ chks: [][]chunks.Sample{
{ {
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil}, sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
@ -1096,6 +1205,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil}, sample{3, 0, tsdbutil.GenerateTestGaugeHistogram(3), nil},
}), }),
}, },
expectedMinMaxTimes: []minMaxTimes{{1, 3}},
}, },
{ {
name: "one gauge float histogram", name: "one gauge float histogram",
@ -1121,9 +1231,33 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)}, sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
}), }),
}, },
expectedMinMaxTimes: []minMaxTimes{{1, 6}},
}, },
{ {
name: "one gauge float histogram chunk intersect with deletion interval", name: "one gauge float histogram chunk intersect with earlier deletion interval",
chks: [][]chunks.Sample{
{
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
},
},
intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}},
expected: []chunks.Sample{
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
sample{6, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(6)},
}),
},
expectedMinMaxTimes: []minMaxTimes{{3, 6}},
},
{
name: "one gauge float histogram chunk intersect with later deletion interval",
chks: [][]chunks.Sample{ chks: [][]chunks.Sample{
{ {
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)}, sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
@ -1145,6 +1279,134 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)}, sample{3, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3)},
}), }),
}, },
expectedMinMaxTimes: []minMaxTimes{{1, 3}},
},
{
name: "three full mixed chunks",
chks: [][]chunks.Sample{
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
{
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
},
{
sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
},
},
expected: []chunks.Sample{
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
}),
},
expectedMinMaxTimes: []minMaxTimes{{1, 6}, {7, 9}, {10, 203}},
},
{
name: "three full mixed chunks in different order",
chks: [][]chunks.Sample{
{
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
},
{sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}},
{
sample{100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
},
},
expected: []chunks.Sample{
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}, sample{100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
sample{100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
}),
},
expectedMinMaxTimes: []minMaxTimes{{7, 9}, {11, 16}, {100, 203}},
},
{
name: "three full mixed chunks in different order intersect with deletion interval",
chks: [][]chunks.Sample{
{
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
},
{sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}},
{
sample{100, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
},
},
intervals: tombstones.Intervals{{Mint: 8, Maxt: 11}, {Mint: 15, Maxt: 150}},
expected: []chunks.Sample{
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
sample{12, 3, nil, nil}, sample{13, 5, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
}),
},
expectedMinMaxTimes: []minMaxTimes{{7, 7}, {12, 13}, {203, 203}},
},
{
name: "three full mixed chunks overlapping",
chks: [][]chunks.Sample{
{
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
sample{12, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
},
{sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}},
{
sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
},
},
expected: []chunks.Sample{
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil}, sample{12, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil}, sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil}, sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)}, sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
},
expectedChks: []chunks.Meta{
assureChunkFromSamples(t, []chunks.Sample{
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
sample{12, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
sample{11, 2, nil, nil}, sample{12, 3, nil, nil}, sample{13, 5, nil, nil}, sample{16, 1, nil, nil},
}),
assureChunkFromSamples(t, []chunks.Sample{
sample{10, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(22)},
sample{203, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(3493)},
}),
},
expectedMinMaxTimes: []minMaxTimes{{7, 12}, {11, 16}, {10, 203}},
}, },
} }
for _, tc := range cases { for _, tc := range cases {
@ -1186,6 +1448,11 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
rmChunkRefs(expandedResult) rmChunkRefs(expandedResult)
rmChunkRefs(tc.expectedChks) rmChunkRefs(tc.expectedChks)
require.Equal(t, tc.expectedChks, expandedResult) require.Equal(t, tc.expectedChks, expandedResult)
for i, meta := range expandedResult {
require.Equal(t, tc.expectedMinMaxTimes[i].minTime, meta.MinTime)
require.Equal(t, tc.expectedMinMaxTimes[i].maxTime, meta.MaxTime)
}
}) })
}) })
} }
@ -1197,67 +1464,213 @@ func rmChunkRefs(chks []chunks.Meta) {
} }
} }
func checkCurrVal(t *testing.T, valType chunkenc.ValueType, it *populateWithDelSeriesIterator, expectedTs, expectedValue int) {
switch valType {
case chunkenc.ValFloat:
ts, v := it.At()
require.Equal(t, int64(expectedTs), ts)
require.Equal(t, float64(expectedValue), v)
case chunkenc.ValHistogram:
ts, h := it.AtHistogram()
require.Equal(t, int64(expectedTs), ts)
h.CounterResetHint = histogram.UnknownCounterReset
require.Equal(t, tsdbutil.GenerateTestHistogram(expectedValue), h)
case chunkenc.ValFloatHistogram:
ts, h := it.AtFloatHistogram()
require.Equal(t, int64(expectedTs), ts)
h.CounterResetHint = histogram.UnknownCounterReset
require.Equal(t, tsdbutil.GenerateTestFloatHistogram(expectedValue), h)
default:
panic("unexpected value type")
}
}
// Regression for: https://github.com/prometheus/tsdb/pull/97 // Regression for: https://github.com/prometheus/tsdb/pull/97
func TestPopulateWithDelSeriesIterator_DoubleSeek(t *testing.T) { func TestPopulateWithDelSeriesIterator_DoubleSeek(t *testing.T) {
f, chkMetas := createFakeReaderAndNotPopulatedChunks( cases := []struct {
[]chunks.Sample{}, name string
[]chunks.Sample{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}}, valType chunkenc.ValueType
[]chunks.Sample{sample{4, 4, nil, nil}, sample{5, 5, nil, nil}}, chks [][]chunks.Sample
) }{
{
name: "float",
valType: chunkenc.ValFloat,
chks: [][]chunks.Sample{
{},
{sample{1, 1, nil, nil}, sample{2, 2, nil, nil}, sample{3, 3, nil, nil}},
{sample{4, 4, nil, nil}, sample{5, 5, nil, nil}},
},
},
{
name: "histogram",
valType: chunkenc.ValHistogram,
chks: [][]chunks.Sample{
{},
{sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil}, sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil}, sample{3, 0, tsdbutil.GenerateTestHistogram(3), nil}},
{sample{4, 0, tsdbutil.GenerateTestHistogram(4), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(5), nil}},
},
},
{
name: "float histogram",
valType: chunkenc.ValFloatHistogram,
chks: [][]chunks.Sample{
{},
{sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)}, sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(3)}},
{sample{4, 0, nil, tsdbutil.GenerateTestFloatHistogram(4)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)}},
},
},
}
it := &populateWithDelSeriesIterator{} for _, tc := range cases {
it.reset(ulid.ULID{}, f, chkMetas, nil) t.Run(tc.name, func(t *testing.T) {
require.Equal(t, chunkenc.ValFloat, it.Seek(1)) f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.chks...)
require.Equal(t, chunkenc.ValFloat, it.Seek(2)) it := &populateWithDelSeriesIterator{}
require.Equal(t, chunkenc.ValFloat, it.Seek(2)) it.reset(ulid.ULID{}, f, chkMetas, nil)
ts, v := it.At() require.Equal(t, tc.valType, it.Seek(1))
require.Equal(t, int64(2), ts) require.Equal(t, tc.valType, it.Seek(2))
require.Equal(t, float64(2), v) require.Equal(t, tc.valType, it.Seek(2))
checkCurrVal(t, tc.valType, it, 2, 2)
require.Equal(t, int64(0), chkMetas[0].MinTime)
require.Equal(t, int64(1), chkMetas[1].MinTime)
require.Equal(t, int64(4), chkMetas[2].MinTime)
})
}
} }
// Regression when seeked chunks were still found via binary search and we always // Regression when seeked chunks were still found via binary search and we always
// skipped to the end when seeking a value in the current chunk. // skipped to the end when seeking a value in the current chunk.
func TestPopulateWithDelSeriesIterator_SeekInCurrentChunk(t *testing.T) { func TestPopulateWithDelSeriesIterator_SeekInCurrentChunk(t *testing.T) {
f, chkMetas := createFakeReaderAndNotPopulatedChunks( cases := []struct {
[]chunks.Sample{}, name string
[]chunks.Sample{sample{1, 2, nil, nil}, sample{3, 4, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}}, valType chunkenc.ValueType
[]chunks.Sample{}, chks [][]chunks.Sample
) }{
{
name: "float",
valType: chunkenc.ValFloat,
chks: [][]chunks.Sample{
{},
{sample{1, 2, nil, nil}, sample{3, 4, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}},
{},
},
},
{
name: "histogram",
valType: chunkenc.ValHistogram,
chks: [][]chunks.Sample{
{},
{sample{1, 0, tsdbutil.GenerateTestHistogram(2), nil}, sample{3, 0, tsdbutil.GenerateTestHistogram(4), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}},
{},
},
},
{
name: "float histogram",
valType: chunkenc.ValFloatHistogram,
chks: [][]chunks.Sample{
{},
{sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)}, sample{3, 0, nil, tsdbutil.GenerateTestFloatHistogram(4)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}},
{},
},
},
}
it := &populateWithDelSeriesIterator{} for _, tc := range cases {
it.reset(ulid.ULID{}, f, chkMetas, nil) t.Run(tc.name, func(t *testing.T) {
require.Equal(t, chunkenc.ValFloat, it.Next()) f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.chks...)
ts, v := it.At() it := &populateWithDelSeriesIterator{}
require.Equal(t, int64(1), ts) it.reset(ulid.ULID{}, f, chkMetas, nil)
require.Equal(t, float64(2), v) require.Equal(t, tc.valType, it.Next())
checkCurrVal(t, tc.valType, it, 1, 2)
require.Equal(t, chunkenc.ValFloat, it.Seek(4)) require.Equal(t, tc.valType, it.Seek(4))
ts, v = it.At() checkCurrVal(t, tc.valType, it, 5, 6)
require.Equal(t, int64(5), ts) require.Equal(t, int64(0), chkMetas[0].MinTime)
require.Equal(t, float64(6), v) require.Equal(t, int64(1), chkMetas[1].MinTime)
require.Equal(t, int64(0), chkMetas[2].MinTime)
})
}
} }
func TestPopulateWithDelSeriesIterator_SeekWithMinTime(t *testing.T) { func TestPopulateWithDelSeriesIterator_SeekWithMinTime(t *testing.T) {
f, chkMetas := createFakeReaderAndNotPopulatedChunks( cases := []struct {
[]chunks.Sample{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{6, 8, nil, nil}}, name string
) valType chunkenc.ValueType
chks [][]chunks.Sample
}{
{
name: "float",
valType: chunkenc.ValFloat,
chks: [][]chunks.Sample{
{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{6, 8, nil, nil}},
},
},
{
name: "histogram",
valType: chunkenc.ValHistogram,
chks: [][]chunks.Sample{
{sample{1, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{6, 0, tsdbutil.GenerateTestHistogram(8), nil}},
},
},
{
name: "float histogram",
valType: chunkenc.ValFloatHistogram,
chks: [][]chunks.Sample{
{sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{6, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}},
},
},
}
it := &populateWithDelSeriesIterator{} for _, tc := range cases {
it.reset(ulid.ULID{}, f, chkMetas, nil) t.Run(tc.name, func(t *testing.T) {
require.Equal(t, chunkenc.ValNone, it.Seek(7)) f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.chks...)
require.Equal(t, chunkenc.ValFloat, it.Seek(3)) it := &populateWithDelSeriesIterator{}
it.reset(ulid.ULID{}, f, chkMetas, nil)
require.Equal(t, chunkenc.ValNone, it.Seek(7))
require.Equal(t, tc.valType, it.Seek(3))
require.Equal(t, int64(1), chkMetas[0].MinTime)
})
}
} }
// Regression when calling Next() with a time bounded to fit within two samples. // Regression when calling Next() with a time bounded to fit within two samples.
// Seek gets called and advances beyond the max time, which was just accepted as a valid sample. // Seek gets called and advances beyond the max time, which was just accepted as a valid sample.
func TestPopulateWithDelSeriesIterator_NextWithMinTime(t *testing.T) { func TestPopulateWithDelSeriesIterator_NextWithMinTime(t *testing.T) {
f, chkMetas := createFakeReaderAndNotPopulatedChunks( cases := []struct {
[]chunks.Sample{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}}, name string
) valType chunkenc.ValueType
chks [][]chunks.Sample
}{
{
name: "float",
valType: chunkenc.ValFloat,
chks: [][]chunks.Sample{
{sample{1, 6, nil, nil}, sample{5, 6, nil, nil}, sample{7, 8, nil, nil}},
},
},
{
name: "histogram",
valType: chunkenc.ValHistogram,
chks: [][]chunks.Sample{
{sample{1, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{5, 0, tsdbutil.GenerateTestHistogram(6), nil}, sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil}},
},
},
{
name: "float histogram",
valType: chunkenc.ValFloatHistogram,
chks: [][]chunks.Sample{
{sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{5, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)}, sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)}},
},
},
}
it := &populateWithDelSeriesIterator{} for _, tc := range cases {
it.reset(ulid.ULID{}, f, chkMetas, tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 4, Maxt: math.MaxInt64})) t.Run(tc.name, func(t *testing.T) {
require.Equal(t, chunkenc.ValNone, it.Next()) f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.chks...)
it := &populateWithDelSeriesIterator{}
it.reset(ulid.ULID{}, f, chkMetas, tombstones.Intervals{{Mint: math.MinInt64, Maxt: 2}}.Add(tombstones.Interval{Mint: 4, Maxt: math.MaxInt64}))
require.Equal(t, chunkenc.ValNone, it.Next())
require.Equal(t, int64(1), chkMetas[0].MinTime)
})
}
} }
// Test the cost of merging series sets for different number of merged sets and their size. // Test the cost of merging series sets for different number of merged sets and their size.

View file

@ -174,7 +174,7 @@ func (r *LiveReader) Record() []byte {
// Rebuild a full record from potentially partial records. Returns false // Rebuild a full record from potentially partial records. Returns false
// if there was an error or if we weren't able to read a record for any reason. // if there was an error or if we weren't able to read a record for any reason.
// Returns true if we read a full record. Any record data is appended to // Returns true if we read a full record. Any record data is appended to
// LiveReader.rec // LiveReader.rec.
func (r *LiveReader) buildRecord() (bool, error) { func (r *LiveReader) buildRecord() (bool, error) {
for { for {
// Check that we have data in the internal buffer to read. // Check that we have data in the internal buffer to read.

View file

@ -622,7 +622,8 @@ func (w *WL) flushPage(clear bool) error {
} }
// First Byte of header format: // First Byte of header format:
// [3 bits unallocated] [1 bit zstd compression flag] [1 bit snappy compression flag] [3 bit record type ] //
// [3 bits unallocated] [1 bit zstd compression flag] [1 bit snappy compression flag] [3 bit record type ]
const ( const (
snappyMask = 1 << 3 snappyMask = 1 << 3
zstdMask = 1 << 4 zstdMask = 1 << 4
@ -836,7 +837,7 @@ func (w *WL) fsync(f *Segment) error {
// Sync forces a file sync on the current write log segment. This function is meant // Sync forces a file sync on the current write log segment. This function is meant
// to be used only on tests due to different behaviour on Operating Systems // to be used only on tests due to different behaviour on Operating Systems
// like windows and linux // like windows and linux.
func (w *WL) Sync() error { func (w *WL) Sync() error {
return w.fsync(w.segment) return w.fsync(w.segment)
} }

View file

@ -21,7 +21,7 @@ import (
"syscall" "syscall"
) )
// Statfs returns the file system type (Unix only) // Statfs returns the file system type (Unix only).
func Statfs(path string) string { func Statfs(path string) string {
// Types of file systems that may be returned by `statfs` // Types of file systems that may be returned by `statfs`
fsTypes := map[int64]string{ fsTypes := map[int64]string{

View file

@ -15,18 +15,18 @@ package testutil
import "time" import "time"
// A MockContext provides a simple stub implementation of a Context // A MockContext provides a simple stub implementation of a Context.
type MockContext struct { type MockContext struct {
Error error Error error
DoneCh chan struct{} DoneCh chan struct{}
} }
// Deadline always will return not set // Deadline always will return not set.
func (c *MockContext) Deadline() (deadline time.Time, ok bool) { func (c *MockContext) Deadline() (deadline time.Time, ok bool) {
return time.Time{}, false return time.Time{}, false
} }
// Done returns a read channel for listening to the Done event // Done returns a read channel for listening to the Done event.
func (c *MockContext) Done() <-chan struct{} { func (c *MockContext) Done() <-chan struct{} {
return c.DoneCh return c.DoneCh
} }
@ -36,7 +36,7 @@ func (c *MockContext) Err() error {
return c.Error return c.Error
} }
// Value ignores the Value and always returns nil // Value ignores the Value and always returns nil.
func (c *MockContext) Value(interface{}) interface{} { func (c *MockContext) Value(interface{}) interface{} {
return nil return nil
} }

View file

@ -33,7 +33,7 @@ const (
// NilCloser is a no-op Closer. // NilCloser is a no-op Closer.
NilCloser = nilCloser(true) NilCloser = nilCloser(true)
// The number of times that a TemporaryDirectory will retry its removal // The number of times that a TemporaryDirectory will retry its removal.
temporaryDirectoryRemoveRetries = 2 temporaryDirectoryRemoveRetries = 2
) )

View file

@ -1294,12 +1294,12 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult {
return apiFuncResult{res, nil, nil, nil} return apiFuncResult{res, nil, nil, nil}
} }
// RuleDiscovery has info for all rules // RuleDiscovery has info for all rules.
type RuleDiscovery struct { type RuleDiscovery struct {
RuleGroups []*RuleGroup `json:"groups"` RuleGroups []*RuleGroup `json:"groups"`
} }
// RuleGroup has info for rules which are part of a group // RuleGroup has info for rules which are part of a group.
type RuleGroup struct { type RuleGroup struct {
Name string `json:"name"` Name string `json:"name"`
File string `json:"file"` File string `json:"file"`

View file

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/codemirror-promql", "name": "@prometheus-io/codemirror-promql",
"version": "0.48.0-rc.1", "version": "0.48.0-rc.2",
"description": "a CodeMirror mode for the PromQL language", "description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts", "types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js", "module": "dist/esm/index.js",
@ -29,7 +29,7 @@
}, },
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": { "dependencies": {
"@prometheus-io/lezer-promql": "0.48.0-rc.1", "@prometheus-io/lezer-promql": "0.48.0-rc.2",
"lru-cache": "^7.18.3" "lru-cache": "^7.18.3"
}, },
"devDependencies": { "devDependencies": {

View file

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/lezer-promql", "name": "@prometheus-io/lezer-promql",
"version": "0.48.0-rc.1", "version": "0.48.0-rc.2",
"description": "lezer-based PromQL grammar", "description": "lezer-based PromQL grammar",
"main": "dist/index.cjs", "main": "dist/index.cjs",
"type": "module", "type": "module",

View file

@ -1,12 +1,12 @@
{ {
"name": "prometheus-io", "name": "prometheus-io",
"version": "0.48.0-rc.1", "version": "0.48.0-rc.2",
"lockfileVersion": 2, "lockfileVersion": 2,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "prometheus-io", "name": "prometheus-io",
"version": "0.48.0-rc.1", "version": "0.48.0-rc.2",
"workspaces": [ "workspaces": [
"react-app", "react-app",
"module/*" "module/*"
@ -30,10 +30,10 @@
}, },
"module/codemirror-promql": { "module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql", "name": "@prometheus-io/codemirror-promql",
"version": "0.48.0-rc.1", "version": "0.48.0-rc.2",
"license": "Apache-2.0", "license": "Apache-2.0",
"dependencies": { "dependencies": {
"@prometheus-io/lezer-promql": "0.48.0-rc.1", "@prometheus-io/lezer-promql": "0.48.0-rc.2",
"lru-cache": "^7.18.3" "lru-cache": "^7.18.3"
}, },
"devDependencies": { "devDependencies": {
@ -70,7 +70,7 @@
}, },
"module/lezer-promql": { "module/lezer-promql": {
"name": "@prometheus-io/lezer-promql", "name": "@prometheus-io/lezer-promql",
"version": "0.48.0-rc.1", "version": "0.48.0-rc.2",
"license": "Apache-2.0", "license": "Apache-2.0",
"devDependencies": { "devDependencies": {
"@lezer/generator": "^1.2.3", "@lezer/generator": "^1.2.3",
@ -20764,7 +20764,7 @@
}, },
"react-app": { "react-app": {
"name": "@prometheus-io/app", "name": "@prometheus-io/app",
"version": "0.48.0-rc.1", "version": "0.48.0-rc.2",
"dependencies": { "dependencies": {
"@codemirror/autocomplete": "^6.7.1", "@codemirror/autocomplete": "^6.7.1",
"@codemirror/commands": "^6.2.4", "@codemirror/commands": "^6.2.4",
@ -20782,7 +20782,7 @@
"@lezer/lr": "^1.3.6", "@lezer/lr": "^1.3.6",
"@nexucis/fuzzy": "^0.4.1", "@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1", "@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.48.0-rc.1", "@prometheus-io/codemirror-promql": "0.48.0-rc.2",
"bootstrap": "^4.6.2", "bootstrap": "^4.6.2",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
"downshift": "^7.6.0", "downshift": "^7.6.0",
@ -23422,7 +23422,7 @@
"@lezer/lr": "^1.3.6", "@lezer/lr": "^1.3.6",
"@nexucis/fuzzy": "^0.4.1", "@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1", "@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.48.0-rc.1", "@prometheus-io/codemirror-promql": "0.48.0-rc.2",
"@testing-library/react-hooks": "^7.0.2", "@testing-library/react-hooks": "^7.0.2",
"@types/enzyme": "^3.10.13", "@types/enzyme": "^3.10.13",
"@types/flot": "0.0.32", "@types/flot": "0.0.32",
@ -23486,7 +23486,7 @@
"@lezer/common": "^1.0.3", "@lezer/common": "^1.0.3",
"@lezer/highlight": "^1.1.6", "@lezer/highlight": "^1.1.6",
"@lezer/lr": "^1.3.6", "@lezer/lr": "^1.3.6",
"@prometheus-io/lezer-promql": "0.48.0-rc.1", "@prometheus-io/lezer-promql": "0.48.0-rc.2",
"isomorphic-fetch": "^3.0.0", "isomorphic-fetch": "^3.0.0",
"lru-cache": "^7.18.3", "lru-cache": "^7.18.3",
"nock": "^13.3.1" "nock": "^13.3.1"

View file

@ -28,5 +28,5 @@
"ts-jest": "^29.1.0", "ts-jest": "^29.1.0",
"typescript": "^4.9.5" "typescript": "^4.9.5"
}, },
"version": "0.48.0-rc.1" "version": "0.48.0-rc.2"
} }

View file

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/app", "name": "@prometheus-io/app",
"version": "0.48.0-rc.1", "version": "0.48.0-rc.2",
"private": true, "private": true,
"dependencies": { "dependencies": {
"@codemirror/autocomplete": "^6.7.1", "@codemirror/autocomplete": "^6.7.1",
@ -19,7 +19,7 @@
"@lezer/lr": "^1.3.6", "@lezer/lr": "^1.3.6",
"@nexucis/fuzzy": "^0.4.1", "@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1", "@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.48.0-rc.1", "@prometheus-io/codemirror-promql": "0.48.0-rc.2",
"bootstrap": "^4.6.2", "bootstrap": "^4.6.2",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
"downshift": "^7.6.0", "downshift": "^7.6.0",

View file

@ -179,7 +179,7 @@ type LocalStorage interface {
api_v1.TSDBAdminStats api_v1.TSDBAdminStats
} }
// Handler serves various HTTP endpoints of the Prometheus server // Handler serves various HTTP endpoints of the Prometheus server.
type Handler struct { type Handler struct {
logger log.Logger logger log.Logger
@ -215,7 +215,7 @@ type Handler struct {
ready atomic.Uint32 // ready is uint32 rather than boolean to be able to use atomic functions. ready atomic.Uint32 // ready is uint32 rather than boolean to be able to use atomic functions.
} }
// ApplyConfig updates the config field of the Handler struct // ApplyConfig updates the config field of the Handler struct.
func (h *Handler) ApplyConfig(conf *config.Config) error { func (h *Handler) ApplyConfig(conf *config.Config) error {
h.mtx.Lock() h.mtx.Lock()
defer h.mtx.Unlock() defer h.mtx.Unlock()
@ -522,7 +522,7 @@ func serveDebug(w http.ResponseWriter, req *http.Request) {
} }
} }
// SetReady sets the ready status of our web Handler // SetReady sets the ready status of our web Handler.
func (h *Handler) SetReady(v bool) { func (h *Handler) SetReady(v bool) {
if v { if v {
h.ready.Store(1) h.ready.Store(1)