Merge remote-tracking branch 'prometheus/main' into upgrade-upstream

This commit is contained in:
Marco Pracucci 2021-11-25 12:01:25 +01:00
commit 9c4410a4b9
No known key found for this signature in database
GPG key ID: 74C1BD403D2DF9B5
26 changed files with 678 additions and 638 deletions

View file

@ -1,7 +1,7 @@
Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) and Levi Harrison (<levi@leviharrison.dev> / @LeviHarrison) are the main/default maintainers, some parts of the codebase have other maintainers: Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) and Levi Harrison (<levi@leviharrison.dev> / @LeviHarrison) are the main/default maintainers, some parts of the codebase have other maintainers:
* `cmd` * `cmd`
* `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl), Jessica Grebenschikov (<jessica.greben1@gmail.com> / @jessicagreben) * `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl), Jessica Grebenschikov (<jessicagreben@prometheus.io> / @jessicagreben)
* `discovery` * `discovery`
* `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz) * `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz)
* `documentation` * `documentation`

View file

@ -288,12 +288,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
} }
func TestTimeMetrics(t *testing.T) { func TestTimeMetrics(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "time_metrics_e2e") tmpDir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(tmpDir))
}()
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
db, err := openDBWithMetrics(tmpDir, log.NewNopLogger(), reg, nil, nil) db, err := openDBWithMetrics(tmpDir, log.NewNopLogger(), reg, nil, nil)

View file

@ -248,11 +248,7 @@ func (p *queryLogTest) run(t *testing.T) {
p.setQueryLog(t, "") p.setQueryLog(t, "")
} }
dir, err := ioutil.TempDir("", "query_log_test") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
params := append([]string{ params := append([]string{
"-test.main", "-test.main",

View file

@ -15,9 +15,7 @@ package main
import ( import (
"context" "context"
"io/ioutil"
"math" "math"
"os"
"sort" "sort"
"testing" "testing"
"time" "time"
@ -687,13 +685,9 @@ after_eof 1 2
t.Run(test.Description, func(t *testing.T) { t.Run(test.Description, func(t *testing.T) {
t.Logf("Test:%s", test.Description) t.Logf("Test:%s", test.Description)
outputDir, err := ioutil.TempDir("", "myDir") outputDir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(outputDir))
}()
err = backfill(test.MaxSamplesInAppender, []byte(test.ToParse), outputDir, false, false, test.MaxBlockDuration) err := backfill(test.MaxSamplesInAppender, []byte(test.ToParse), outputDir, false, false, test.MaxBlockDuration)
if !test.IsOk { if !test.IsOk {
require.Error(t, err, test.Description) require.Error(t, err, test.Description)

View file

@ -17,7 +17,6 @@ import (
"context" "context"
"io/ioutil" "io/ioutil"
"math" "math"
"os"
"path/filepath" "path/filepath"
"testing" "testing"
"time" "time"
@ -72,11 +71,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
} }
for _, tt := range testCases { for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "backfilldata") tmpDir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(tmpDir))
}()
ctx := context.Background() ctx := context.Background()
// Execute the test more than once to simulate running the rule importer twice with the same data. // Execute the test more than once to simulate running the rule importer twice with the same data.
@ -219,11 +214,7 @@ func createMultiRuleTestFiles(path string) error {
// TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics // TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics
// received from Prometheus Query API, including the __name__ label. // received from Prometheus Query API, including the __name__ label.
func TestBackfillLabels(t *testing.T) { func TestBackfillLabels(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "backfilldata") tmpDir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(tmpDir))
}()
ctx := context.Background() ctx := context.Background()
start := time.Date(2009, time.November, 10, 6, 34, 0, 0, time.UTC) start := time.Date(2009, time.November, 10, 6, 34, 0, 0, time.UTC)

View file

@ -1023,8 +1023,8 @@ address defaults to the `host_ip` attribute of the hypervisor.
The following meta labels are available on targets during [relabeling](#relabel_config): The following meta labels are available on targets during [relabeling](#relabel_config):
* `__meta_openstack_hypervisor_host_ip`: the hypervisor node's IP address. * `__meta_openstack_hypervisor_host_ip`: the hypervisor node's IP address.
* `__meta_openstack_hypervisor_hostname`: the hypervisor node's name.
* `__meta_openstack_hypervisor_id`: the hypervisor node's ID. * `__meta_openstack_hypervisor_id`: the hypervisor node's ID.
* `__meta_openstack_hypervisor_name`: the hypervisor node's name.
* `__meta_openstack_hypervisor_state`: the hypervisor node's state. * `__meta_openstack_hypervisor_state`: the hypervisor node's state.
* `__meta_openstack_hypervisor_status`: the hypervisor node's status. * `__meta_openstack_hypervisor_status`: the hypervisor node's status.
* `__meta_openstack_hypervisor_type`: the hypervisor node's type. * `__meta_openstack_hypervisor_type`: the hypervisor node's type.

View file

@ -185,10 +185,10 @@ scrape_configs:
regex: __meta_kubernetes_service_label_(.+) regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace] - source_labels: [__meta_kubernetes_namespace]
action: replace action: replace
target_label: kubernetes_namespace target_label: namespace
- source_labels: [__meta_kubernetes_service_name] - source_labels: [__meta_kubernetes_service_name]
action: replace action: replace
target_label: kubernetes_name target_label: service
# Example scrape config for probing services via the Blackbox Exporter. # Example scrape config for probing services via the Blackbox Exporter.
# #
@ -217,9 +217,9 @@ scrape_configs:
- action: labelmap - action: labelmap
regex: __meta_kubernetes_service_label_(.+) regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace] - source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace target_label: namespace
- source_labels: [__meta_kubernetes_service_name] - source_labels: [__meta_kubernetes_service_name]
target_label: kubernetes_name target_label: service
# Example scrape config for probing ingresses via the Blackbox Exporter. # Example scrape config for probing ingresses via the Blackbox Exporter.
# #
@ -255,9 +255,9 @@ scrape_configs:
- action: labelmap - action: labelmap
regex: __meta_kubernetes_ingress_label_(.+) regex: __meta_kubernetes_ingress_label_(.+)
- source_labels: [__meta_kubernetes_namespace] - source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace target_label: namespace
- source_labels: [__meta_kubernetes_ingress_name] - source_labels: [__meta_kubernetes_ingress_name]
target_label: kubernetes_name target_label: ingress
# Example scrape config for pods # Example scrape config for pods
# #
@ -294,7 +294,7 @@ scrape_configs:
regex: __meta_kubernetes_pod_label_(.+) regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace] - source_labels: [__meta_kubernetes_namespace]
action: replace action: replace
target_label: kubernetes_namespace target_label: namespace
- source_labels: [__meta_kubernetes_pod_name] - source_labels: [__meta_kubernetes_pod_name]
action: replace action: replace
target_label: kubernetes_pod_name target_label: pod

View file

@ -391,7 +391,7 @@
and and
( (
count by (%(prometheusHAGroupLabels)s) ( count by (%(prometheusHAGroupLabels)s) (
changes(process_start_time_seconds{%(prometheusSelector)s}[30m]) > 1 changes(process_start_time_seconds{%(prometheusSelector)s}[1h]) > 1
) )
/ /
count by (%(prometheusHAGroupLabels)s) ( count by (%(prometheusHAGroupLabels)s) (
@ -418,7 +418,7 @@
}, },
annotations: { annotations: {
summary: 'More than half of the Prometheus instances within the same HA group are crashlooping.', summary: 'More than half of the Prometheus instances within the same HA group are crashlooping.',
description: '{{ $value | humanizePercentage }} of Prometheus instances within the %(prometheusHAGroupName)s HA group have had at least 5 total restarts or 2 unclean restarts in the last 30m.' % $._config, description: '{{ $value | humanizePercentage }} of Prometheus instances within the %(prometheusHAGroupName)s HA group have had at least 5 total restarts in the last 30m or 2 unclean restarts in the last 1h.' % $._config,
}, },
}, },
], ],

10
go.mod
View file

@ -9,13 +9,13 @@ require (
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a
github.com/aws/aws-sdk-go v1.42.6 github.com/aws/aws-sdk-go v1.42.10
github.com/cespare/xxhash/v2 v2.1.2 github.com/cespare/xxhash/v2 v2.1.2
github.com/containerd/containerd v1.5.7 // indirect github.com/containerd/containerd v1.5.7 // indirect
github.com/dennwc/varint v1.0.0 github.com/dennwc/varint v1.0.0
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245
github.com/digitalocean/godo v1.71.0 github.com/digitalocean/godo v1.71.0
github.com/docker/docker v20.10.10+incompatible github.com/docker/docker v20.10.11+incompatible
github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-connections v0.4.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 github.com/edsrzf/mmap-go v1.0.0
github.com/envoyproxy/go-control-plane v0.10.1 github.com/envoyproxy/go-control-plane v0.10.1
@ -71,9 +71,9 @@ require (
gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
k8s.io/api v0.22.3 k8s.io/api v0.22.4
k8s.io/apimachinery v0.22.3 k8s.io/apimachinery v0.22.4
k8s.io/client-go v0.22.3 k8s.io/client-go v0.22.4
k8s.io/klog v1.0.0 k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.20.0 k8s.io/klog/v2 v2.20.0
) )

24
go.sum
View file

@ -187,8 +187,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.40.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go v1.40.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/aws/aws-sdk-go v1.42.6 h1:CiJmv8Fdc7wLZhfWy1ZA9TNoOQrFtUC0mhpgyJTaKOs= github.com/aws/aws-sdk-go v1.42.10 h1:PW9G/hnsuKttbFtOcgNKD0vQrp4yfNrtACA+X0p9mjM=
github.com/aws/aws-sdk-go v1.42.6/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go v1.42.10/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps=
@ -381,8 +381,8 @@ github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TT
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.10+incompatible h1:GKkP0T7U4ks6X3lmmHKC2QDprnpRJor2Z5a8m62R9ZM= github.com/docker/docker v20.10.11+incompatible h1:OqzI/g/W54LczvhnccGqniFoQghHx3pklbLuhfXpqGo=
github.com/docker/docker v20.10.10+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
@ -2006,14 +2006,14 @@ k8s.io/api v0.17.5/go.mod h1:0zV5/ungglgy2Rlm3QK8fbxkXVs+BSJWpJP/+8gUVLY=
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
k8s.io/api v0.22.3 h1:wOoES2GoSkUsdped2RB4zYypPqWtvprGoKCENTOOjP4= k8s.io/api v0.22.4 h1:UvyHW0ezB2oIgHAxlYoo6UJQObYXU7awuNarwoHEOjw=
k8s.io/api v0.22.3/go.mod h1:azgiXFiXqiWyLCfI62/eYBOu19rj2LKmIhFPP4+33fs= k8s.io/api v0.22.4/go.mod h1:Rgs+9gIGYC5laXQSZZ9JqT5NevNgoGiOdVWi1BAB3qk=
k8s.io/apimachinery v0.17.5/go.mod h1:ioIo1G/a+uONV7Tv+ZmCbMG1/a3kVw5YcDdncd8ugQ0= k8s.io/apimachinery v0.17.5/go.mod h1:ioIo1G/a+uONV7Tv+ZmCbMG1/a3kVw5YcDdncd8ugQ0=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
k8s.io/apimachinery v0.22.3 h1:mrvBG5CZnEfwgpVqWcrRKvdsYECTrhAR6cApAgdsflk= k8s.io/apimachinery v0.22.4 h1:9uwcvPpukBw/Ri0EUmWz+49cnFtaoiyEhQTK+xOe7Ck=
k8s.io/apimachinery v0.22.3/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.4/go.mod h1:yU6oA6Gnax9RrxGzVvPFFJ+mpnW6PBSqp0sx0I0HHW0=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
@ -2021,8 +2021,8 @@ k8s.io/client-go v0.17.5/go.mod h1:S8uZpBpjJJdEH/fEyxcqg7Rn0P5jH+ilkgBHjriSmNo=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
k8s.io/client-go v0.22.3 h1:6onkOSc+YNdwq5zXE0wFXicq64rrym+mXwHu/CPVGO4= k8s.io/client-go v0.22.4 h1:aAQ1Wk+I3bjCNk35YWUqbaueqrIonkfDPJSPDDe8Kfg=
k8s.io/client-go v0.22.3/go.mod h1:ElDjYf8gvZsKDYexmsmnMQ0DYO8W9RwBjfQ1PI53yow= k8s.io/client-go v0.22.4/go.mod h1:Yzw4e5e7h1LNHA4uqnMVrpEpUs1hJOiuBsJKIlRCHDA=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
@ -2034,8 +2034,8 @@ k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/kube-openapi v0.0.0-20200316234421-82d701f24f9d/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU= k8s.io/kube-openapi v0.0.0-20200316234421-82d701f24f9d/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM= k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c h1:jvamsI1tn9V0S8jicyX82qaFC0H/NKxv2e5mbqsgR80=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=

View file

@ -77,6 +77,9 @@ func (s Series) String() string {
} }
// Point represents a single data point for a given timestamp. // Point represents a single data point for a given timestamp.
//
// Note that Point's JSON marshaling is done in an optimized fashion in
// web/api/v1/api.go. Therefore, no MarshalJSON method is provided here.
type Point struct { type Point struct {
T int64 T int64
V float64 V float64
@ -87,12 +90,6 @@ func (p Point) String() string {
return fmt.Sprintf("%v @[%v]", v, p.T) return fmt.Sprintf("%v @[%v]", v, p.T)
} }
// MarshalJSON implements json.Marshaler.
func (p Point) MarshalJSON() ([]byte, error) {
v := strconv.FormatFloat(p.V, 'f', -1, 64)
return json.Marshal([...]interface{}{float64(p.T) / 1000, v})
}
// Sample is a single sample belonging to a metric. // Sample is a single sample belonging to a metric.
type Sample struct { type Sample struct {
Point Point

View file

@ -75,11 +75,7 @@ func TestSampleDelivery(t *testing.T) {
// batch timeout case. // batch timeout case.
n := 3 n := 3
dir, err := ioutil.TempDir("", "TestSampleDelivery") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil) s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
defer s.Close() defer s.Close()
@ -154,9 +150,7 @@ func TestSampleDelivery(t *testing.T) {
func TestMetadataDelivery(t *testing.T) { func TestMetadataDelivery(t *testing.T) {
c := NewTestWriteClient() c := NewTestWriteClient()
dir, err := ioutil.TempDir("", "TestMetadataDelivery") dir := t.TempDir()
require.NoError(t, err)
defer os.RemoveAll(dir)
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
@ -198,11 +192,7 @@ func TestSampleDeliveryTimeout(t *testing.T) {
cfg.MaxShards = 1 cfg.MaxShards = 1
cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond) cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond)
dir, err := ioutil.TempDir("", "TestSampleDeliveryTimeout") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
@ -241,11 +231,7 @@ func TestSampleDeliveryOrder(t *testing.T) {
c := NewTestWriteClient() c := NewTestWriteClient()
c.expectSamples(samples, series) c.expectSamples(samples, series)
dir, err := ioutil.TempDir("", "TestSampleDeliveryOrder") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
@ -265,11 +251,7 @@ func TestShutdown(t *testing.T) {
deadline := 1 * time.Second deadline := 1 * time.Second
c := NewTestBlockedWriteClient() c := NewTestBlockedWriteClient()
dir, err := ioutil.TempDir("", "TestShutdown") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
@ -308,11 +290,7 @@ func TestSeriesReset(t *testing.T) {
numSegments := 4 numSegments := 4
numSeries := 25 numSeries := 25
dir, err := ioutil.TempDir("", "TestSeriesReset") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
@ -343,11 +321,7 @@ func TestReshard(t *testing.T) {
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
cfg.MaxShards = 1 cfg.MaxShards = 1
dir, err := ioutil.TempDir("", "TestReshard") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
@ -740,9 +714,7 @@ func BenchmarkSampleDelivery(b *testing.B) {
cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond) cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond)
cfg.MaxShards = 1 cfg.MaxShards = 1
dir, err := ioutil.TempDir("", "BenchmarkSampleDelivery") dir := b.TempDir()
require.NoError(b, err)
defer os.RemoveAll(dir)
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
@ -865,11 +837,7 @@ func TestCalculateDesiredShards(t *testing.T) {
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
dir, err := ioutil.TempDir("", "TestCalculateDesiredShards") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration) samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)

View file

@ -15,9 +15,7 @@ package remote
import ( import (
"context" "context"
"io/ioutil"
"net/url" "net/url"
"os"
"sort" "sort"
"testing" "testing"
@ -33,9 +31,7 @@ import (
) )
func TestNoDuplicateReadConfigs(t *testing.T) { func TestNoDuplicateReadConfigs(t *testing.T) {
dir, err := ioutil.TempDir("", "TestNoDuplicateReadConfigs") dir := t.TempDir()
require.NoError(t, err)
defer os.RemoveAll(dir)
cfg1 := config.RemoteReadConfig{ cfg1 := config.RemoteReadConfig{
Name: "write-1", Name: "write-1",

View file

@ -14,9 +14,7 @@
package remote package remote
import ( import (
"io/ioutil"
"net/url" "net/url"
"os"
"testing" "testing"
common_config "github.com/prometheus/common/config" common_config "github.com/prometheus/common/config"
@ -26,9 +24,7 @@ import (
) )
func TestStorageLifecycle(t *testing.T) { func TestStorageLifecycle(t *testing.T) {
dir, err := ioutil.TempDir("", "TestStorageLifecycle") dir := t.TempDir()
require.NoError(t, err)
defer os.RemoveAll(dir)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil) s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
conf := &config.Config{ conf := &config.Config{
@ -60,14 +56,12 @@ func TestStorageLifecycle(t *testing.T) {
// make sure remote write has a queue. // make sure remote write has a queue.
require.Equal(t, 1, len(s.queryables)) require.Equal(t, 1, len(s.queryables))
err = s.Close() err := s.Close()
require.NoError(t, err) require.NoError(t, err)
} }
func TestUpdateRemoteReadConfigs(t *testing.T) { func TestUpdateRemoteReadConfigs(t *testing.T) {
dir, err := ioutil.TempDir("", "TestUpdateRemoteReadConfigs") dir := t.TempDir()
require.NoError(t, err)
defer os.RemoveAll(dir)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil) s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
@ -83,6 +77,6 @@ func TestUpdateRemoteReadConfigs(t *testing.T) {
require.NoError(t, s.ApplyConfig(conf)) require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 1, len(s.queryables)) require.Equal(t, 1, len(s.queryables))
err = s.Close() err := s.Close()
require.NoError(t, err) require.NoError(t, err)
} }

View file

@ -14,9 +14,7 @@
package remote package remote
import ( import (
"io/ioutil"
"net/url" "net/url"
"os"
"testing" "testing"
"time" "time"
@ -44,11 +42,7 @@ func testRemoteWriteConfig() *config.RemoteWriteConfig {
} }
func TestNoDuplicateWriteConfigs(t *testing.T) { func TestNoDuplicateWriteConfigs(t *testing.T) {
dir, err := ioutil.TempDir("", "TestNoDuplicateWriteConfigs") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
cfg1 := config.RemoteWriteConfig{ cfg1 := config.RemoteWriteConfig{
Name: "write-1", Name: "write-1",
@ -132,11 +126,7 @@ func TestNoDuplicateWriteConfigs(t *testing.T) {
} }
func TestRestartOnNameChange(t *testing.T) { func TestRestartOnNameChange(t *testing.T) {
dir, err := ioutil.TempDir("", "TestRestartOnNameChange") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
cfg := testRemoteWriteConfig() cfg := testRemoteWriteConfig()
@ -166,11 +156,7 @@ func TestRestartOnNameChange(t *testing.T) {
} }
func TestUpdateWithRegisterer(t *testing.T) { func TestUpdateWithRegisterer(t *testing.T) {
dir, err := ioutil.TempDir("", "TestRestartWithRegisterer") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil) s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil)
c1 := &config.RemoteWriteConfig{ c1 := &config.RemoteWriteConfig{
@ -205,16 +191,12 @@ func TestUpdateWithRegisterer(t *testing.T) {
require.Equal(t, 10, queue.cfg.MaxShards) require.Equal(t, 10, queue.cfg.MaxShards)
} }
err = s.Close() err := s.Close()
require.NoError(t, err) require.NoError(t, err)
} }
func TestWriteStorageLifecycle(t *testing.T) { func TestWriteStorageLifecycle(t *testing.T) {
dir, err := ioutil.TempDir("", "TestWriteStorageLifecycle") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil) s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil)
conf := &config.Config{ conf := &config.Config{
@ -226,16 +208,12 @@ func TestWriteStorageLifecycle(t *testing.T) {
require.NoError(t, s.ApplyConfig(conf)) require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, 1, len(s.queues)) require.Equal(t, 1, len(s.queues))
err = s.Close() err := s.Close()
require.NoError(t, err) require.NoError(t, err)
} }
func TestUpdateExternalLabels(t *testing.T) { func TestUpdateExternalLabels(t *testing.T) {
dir, err := ioutil.TempDir("", "TestUpdateExternalLabels") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil) s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil)
@ -264,11 +242,7 @@ func TestUpdateExternalLabels(t *testing.T) {
} }
func TestWriteStorageApplyConfigsIdempotent(t *testing.T) { func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
dir, err := ioutil.TempDir("", "TestWriteStorageApplyConfigsIdempotent") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil) s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil)
@ -305,11 +279,7 @@ func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
} }
func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) { func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
dir, err := ioutil.TempDir("", "TestWriteStorageApplyConfigsPartialUpdate") dir := t.TempDir()
require.NoError(t, err)
defer func() {
require.NoError(t, os.RemoveAll(dir))
}()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil) s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil)
@ -383,7 +353,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
secondClient := s.queues[hashes[1]].client() secondClient := s.queues[hashes[1]].client()
// Update c1. // Update c1.
c1.HTTPClientConfig.BearerToken = "bar" c1.HTTPClientConfig.BearerToken = "bar"
err = s.ApplyConfig(conf) err := s.ApplyConfig(conf)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 3, len(s.queues)) require.Equal(t, 3, len(s.queues))

View file

@ -1013,6 +1013,92 @@ func TestWALSegmentSizeOptions(t *testing.T) {
} }
} }
// https://github.com/prometheus/prometheus/issues/9846
// https://github.com/prometheus/prometheus/issues/9859
func TestWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T) {
const (
numRuns = 1
numSamplesBeforeSeriesCreation = 1000
)
// We test both with few and many samples appended after series creation. If samples are < 120 then there's no
// mmap-ed chunk, otherwise there's at least 1 mmap-ed chunk when replaying the WAL.
for _, numSamplesAfterSeriesCreation := range []int{1, 1000} {
for run := 1; run <= numRuns; run++ {
t.Run(fmt.Sprintf("samples after series creation = %d, run = %d", numSamplesAfterSeriesCreation, run), func(t *testing.T) {
testWALReplayRaceOnSamplesLoggedBeforeSeries(t, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation)
})
}
}
}
func testWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation int) {
const numSeries = 1000
db := openTestDB(t, nil, nil)
db.DisableCompactions()
for seriesRef := 1; seriesRef <= numSeries; seriesRef++ {
// Log samples before the series is logged to the WAL.
var enc record.Encoder
var samples []record.RefSample
for ts := 0; ts < numSamplesBeforeSeriesCreation; ts++ {
samples = append(samples, record.RefSample{
Ref: chunks.HeadSeriesRef(uint64(seriesRef)),
T: int64(ts),
V: float64(ts),
})
}
err := db.Head().wal.Log(enc.Samples(samples, nil))
require.NoError(t, err)
// Add samples via appender so that they're logged after the series in the WAL.
app := db.Appender(context.Background())
lbls := labels.FromStrings("series_id", strconv.Itoa(seriesRef))
for ts := numSamplesBeforeSeriesCreation; ts < numSamplesBeforeSeriesCreation+numSamplesAfterSeriesCreation; ts++ {
_, err := app.Append(0, lbls, int64(ts), float64(ts))
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
require.NoError(t, db.Close())
// Reopen the DB, replaying the WAL.
reopenDB, err := Open(db.Dir(), log.NewLogfmtLogger(os.Stderr), nil, nil, nil)
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, reopenDB.Close())
})
// Query back chunks for all series.
q, err := reopenDB.ChunkQuerier(context.Background(), math.MinInt64, math.MaxInt64)
require.NoError(t, err)
set := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "series_id", ".+"))
actualSeries := 0
for set.Next() {
actualSeries++
actualChunks := 0
chunksIt := set.At().Iterator()
for chunksIt.Next() {
actualChunks++
}
require.NoError(t, chunksIt.Err())
// We expect 1 chunk every 120 samples after series creation.
require.Equalf(t, (numSamplesAfterSeriesCreation/120)+1, actualChunks, "series: %s", set.At().Labels().String())
}
require.NoError(t, set.Err())
require.Equal(t, numSeries, actualSeries)
}
func TestTombstoneClean(t *testing.T) { func TestTombstoneClean(t *testing.T) {
numSamples := int64(10) numSamples := int64(10)

View file

@ -51,13 +51,10 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
var mmapOverlappingChunks uint64 var mmapOverlappingChunks uint64
// Start workers that each process samples for a partition of the series ID space. // Start workers that each process samples for a partition of the series ID space.
// They are connected through a ring of channels which ensures that all sample batches
// read from the WAL are processed in order.
var ( var (
wg sync.WaitGroup wg sync.WaitGroup
n = runtime.GOMAXPROCS(0) n = runtime.GOMAXPROCS(0)
inputs = make([]chan []record.RefSample, n) processors = make([]walSubsetProcessor, n)
outputs = make([]chan []record.RefSample, n)
exemplarsInput chan record.RefExemplar exemplarsInput chan record.RefExemplar
dec record.Decoder dec record.Decoder
@ -92,9 +89,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
_, ok := err.(*wal.CorruptionErr) _, ok := err.(*wal.CorruptionErr)
if ok || seriesCreationErr != nil { if ok || seriesCreationErr != nil {
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
close(inputs[i]) processors[i].closeAndDrain()
for range outputs[i] {
}
} }
close(exemplarsInput) close(exemplarsInput)
wg.Wait() wg.Wait()
@ -103,14 +98,13 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
wg.Add(n) wg.Add(n)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
outputs[i] = make(chan []record.RefSample, 300) processors[i].setup()
inputs[i] = make(chan []record.RefSample, 300)
go func(input <-chan []record.RefSample, output chan<- []record.RefSample) { go func(wp *walSubsetProcessor) {
unknown := h.processWALSamples(h.minValidTime.Load(), input, output) unknown := wp.processWALSamples(h)
unknownRefs.Add(unknown) unknownRefs.Add(unknown)
wg.Done() wg.Done()
}(inputs[i], outputs[i]) }(&processors[i])
} }
wg.Add(1) wg.Add(1)
@ -212,11 +206,20 @@ Outer:
h.lastSeriesID.Store(uint64(walSeries.Ref)) h.lastSeriesID.Store(uint64(walSeries.Ref))
} }
idx := uint64(mSeries.ref) % uint64(n)
// It is possible that some old sample is being processed in processWALSamples that
// could cause race below. So we wait for the goroutine to empty input the buffer and finish
// processing all old samples after emptying the buffer.
processors[idx].waitUntilIdle()
// Lock the subset so we can modify the series object
processors[idx].mx.Lock()
mmc := mmappedChunks[walSeries.Ref] mmc := mmappedChunks[walSeries.Ref]
if created { if created {
// This is the first WAL series record for this series. // This is the first WAL series record for this series.
h.setMMappedChunks(mSeries, mmc) h.resetSeriesWithMMappedChunks(mSeries, mmc)
processors[idx].mx.Unlock()
continue continue
} }
@ -226,23 +229,6 @@ Outer:
multiRef[walSeries.Ref] = mSeries.ref multiRef[walSeries.Ref] = mSeries.ref
idx := uint64(mSeries.ref) % uint64(n)
// It is possible that some old sample is being processed in processWALSamples that
// could cause race below. So we wait for the goroutine to empty input the buffer and finish
// processing all old samples after emptying the buffer.
select {
case <-outputs[idx]: // allow output side to drain to avoid deadlock
default:
}
inputs[idx] <- []record.RefSample{}
for len(inputs[idx]) != 0 {
time.Sleep(1 * time.Millisecond)
select {
case <-outputs[idx]: // allow output side to drain to avoid deadlock
default:
}
}
// Checking if the new m-mapped chunks overlap with the already existing ones. // Checking if the new m-mapped chunks overlap with the already existing ones.
if len(mSeries.mmappedChunks) > 0 && len(mmc) > 0 { if len(mSeries.mmappedChunks) > 0 && len(mmc) > 0 {
if overlapsClosedInterval( if overlapsClosedInterval(
@ -266,12 +252,9 @@ Outer:
} }
// Replacing m-mapped chunks with the new ones (could be empty). // Replacing m-mapped chunks with the new ones (could be empty).
h.setMMappedChunks(mSeries, mmc) h.resetSeriesWithMMappedChunks(mSeries, mmc)
// Any samples replayed till now would already be compacted. Resetting the head chunk. processors[idx].mx.Unlock()
mSeries.nextAt = 0
mSeries.headChunk = nil
mSeries.app = nil
} }
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification. //nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
seriesPool.Put(v) seriesPool.Put(v)
@ -287,12 +270,7 @@ Outer:
m = len(samples) m = len(samples)
} }
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
var buf []record.RefSample shards[i] = processors[i].reuseBuf()
select {
case buf = <-outputs[i]:
default:
}
shards[i] = buf[:0]
} }
for _, sam := range samples[:m] { for _, sam := range samples[:m] {
if r, ok := multiRef[sam.Ref]; ok { if r, ok := multiRef[sam.Ref]; ok {
@ -302,7 +280,7 @@ Outer:
shards[mod] = append(shards[mod], sam) shards[mod] = append(shards[mod], sam)
} }
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
inputs[i] <- shards[i] processors[i].input <- shards[i]
} }
samples = samples[m:] samples = samples[m:]
} }
@ -346,9 +324,7 @@ Outer:
// Signal termination to each worker and wait for it to close its output channel. // Signal termination to each worker and wait for it to close its output channel.
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
close(inputs[i]) processors[i].closeAndDrain()
for range outputs[i] {
}
} }
close(exemplarsInput) close(exemplarsInput)
wg.Wait() wg.Wait()
@ -366,7 +342,8 @@ Outer:
return nil return nil
} }
func (h *Head) setMMappedChunks(mSeries *memSeries, mmc []*mmappedChunk) { // resetSeriesWithMMappedChunks is only used during the WAL replay.
func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc []*mmappedChunk) {
h.metrics.chunksCreated.Add(float64(len(mmc))) h.metrics.chunksCreated.Add(float64(len(mmc)))
h.metrics.chunksRemoved.Add(float64(len(mSeries.mmappedChunks))) h.metrics.chunksRemoved.Add(float64(len(mSeries.mmappedChunks)))
h.metrics.chunks.Add(float64(len(mmc) - len(mSeries.mmappedChunks))) h.metrics.chunks.Add(float64(len(mmc) - len(mSeries.mmappedChunks)))
@ -378,20 +355,51 @@ func (h *Head) setMMappedChunks(mSeries *memSeries, mmc []*mmappedChunk) {
mSeries.mmMaxTime = mmc[len(mmc)-1].maxTime mSeries.mmMaxTime = mmc[len(mmc)-1].maxTime
h.updateMinMaxTime(mmc[0].minTime, mSeries.mmMaxTime) h.updateMinMaxTime(mmc[0].minTime, mSeries.mmMaxTime)
} }
// Any samples replayed till now would already be compacted. Resetting the head chunk.
mSeries.nextAt = 0
mSeries.headChunk = nil
mSeries.app = nil
}
type walSubsetProcessor struct {
mx sync.Mutex // Take this lock while modifying series in the subset.
input chan []record.RefSample
output chan []record.RefSample
}
func (wp *walSubsetProcessor) setup() {
wp.output = make(chan []record.RefSample, 300)
wp.input = make(chan []record.RefSample, 300)
}
func (wp *walSubsetProcessor) closeAndDrain() {
close(wp.input)
for range wp.output {
}
}
// If there is a buffer in the output chan, return it for reuse, otherwise return nil.
func (wp *walSubsetProcessor) reuseBuf() []record.RefSample {
select {
case buf := <-wp.output:
return buf[:0]
default:
}
return nil
} }
// processWALSamples adds the samples it receives to the head and passes // processWALSamples adds the samples it receives to the head and passes
// the buffer received to an output channel for reuse. // the buffer received to an output channel for reuse.
// Samples before the minValidTime timestamp are discarded. // Samples before the minValidTime timestamp are discarded.
func (h *Head) processWALSamples( func (wp *walSubsetProcessor) processWALSamples(h *Head) (unknownRefs uint64) {
minValidTime int64, defer close(wp.output)
input <-chan []record.RefSample, output chan<- []record.RefSample,
) (unknownRefs uint64) {
defer close(output)
minValidTime := h.minValidTime.Load()
mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) mint, maxt := int64(math.MaxInt64), int64(math.MinInt64)
for samples := range input { for samples := range wp.input {
wp.mx.Lock()
for _, s := range samples { for _, s := range samples {
if s.T < minValidTime { if s.T < minValidTime {
continue continue
@ -415,13 +423,29 @@ func (h *Head) processWALSamples(
mint = s.T mint = s.T
} }
} }
output <- samples wp.mx.Unlock()
wp.output <- samples
} }
h.updateMinMaxTime(mint, maxt) h.updateMinMaxTime(mint, maxt)
return unknownRefs return unknownRefs
} }
func (wp *walSubsetProcessor) waitUntilIdle() {
select {
case <-wp.output: // Allow output side to drain to avoid deadlock.
default:
}
wp.input <- []record.RefSample{}
for len(wp.input) != 0 {
time.Sleep(1 * time.Millisecond)
select {
case <-wp.output: // Allow output side to drain to avoid deadlock.
default:
}
}
}
const ( const (
chunkSnapshotRecordTypeSeries uint8 = 1 chunkSnapshotRecordTypeSeries uint8 = 1
chunkSnapshotRecordTypeTombstones uint8 = 2 chunkSnapshotRecordTypeTombstones uint8 = 2

View file

@ -18,6 +18,7 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/binary" "encoding/binary"
"fmt"
"hash" "hash"
"hash/crc32" "hash/crc32"
"io" "io"
@ -1853,7 +1854,13 @@ func (dec *Decoder) Postings(b []byte) (int, Postings, error) {
d := encoding.Decbuf{B: b} d := encoding.Decbuf{B: b}
n := d.Be32int() n := d.Be32int()
l := d.Get() l := d.Get()
return n, newBigEndianPostings(l), d.Err() if d.Err() != nil {
return 0, nil, d.Err()
}
if len(l) != 4*n {
return 0, nil, fmt.Errorf("unexpected postings length, should be %d bytes for %d postings, got %d bytes", 4*n, n, len(l))
}
return n, newBigEndianPostings(l), nil
} }
// LabelNamesOffsetsFor decodes the offsets of the name symbols for a given series. // LabelNamesOffsetsFor decodes the offsets of the name symbols for a given series.

View file

@ -674,3 +674,8 @@ func BenchmarkReader_ShardedPostings(b *testing.B) {
}) })
} }
} }
func TestDecoder_Postings_WrongInput(t *testing.T) {
_, _, err := (&Decoder{}).Postings([]byte("the cake is a lie"))
require.Error(t, err)
}

View file

@ -1155,15 +1155,15 @@ type RuleGroup struct {
// In order to preserve rule ordering, while exposing type (alerting or recording) // In order to preserve rule ordering, while exposing type (alerting or recording)
// specific properties, both alerting and recording rules are exposed in the // specific properties, both alerting and recording rules are exposed in the
// same array. // same array.
Rules []rule `json:"rules"` Rules []Rule `json:"rules"`
Interval float64 `json:"interval"` Interval float64 `json:"interval"`
EvaluationTime float64 `json:"evaluationTime"` EvaluationTime float64 `json:"evaluationTime"`
LastEvaluation time.Time `json:"lastEvaluation"` LastEvaluation time.Time `json:"lastEvaluation"`
} }
type rule interface{} type Rule interface{}
type alertingRule struct { type AlertingRule struct {
// State can be "pending", "firing", "inactive". // State can be "pending", "firing", "inactive".
State string `json:"state"` State string `json:"state"`
Name string `json:"name"` Name string `json:"name"`
@ -1180,7 +1180,7 @@ type alertingRule struct {
Type string `json:"type"` Type string `json:"type"`
} }
type recordingRule struct { type RecordingRule struct {
Name string `json:"name"` Name string `json:"name"`
Query string `json:"query"` Query string `json:"query"`
Labels labels.Labels `json:"labels,omitempty"` Labels labels.Labels `json:"labels,omitempty"`
@ -1209,12 +1209,12 @@ func (api *API) rules(r *http.Request) apiFuncResult {
Name: grp.Name(), Name: grp.Name(),
File: grp.File(), File: grp.File(),
Interval: grp.Interval().Seconds(), Interval: grp.Interval().Seconds(),
Rules: []rule{}, Rules: []Rule{},
EvaluationTime: grp.GetEvaluationTime().Seconds(), EvaluationTime: grp.GetEvaluationTime().Seconds(),
LastEvaluation: grp.GetLastEvaluation(), LastEvaluation: grp.GetLastEvaluation(),
} }
for _, r := range grp.Rules() { for _, r := range grp.Rules() {
var enrichedRule rule var enrichedRule Rule
lastError := "" lastError := ""
if r.LastError() != nil { if r.LastError() != nil {
@ -1225,7 +1225,7 @@ func (api *API) rules(r *http.Request) apiFuncResult {
if !returnAlerts { if !returnAlerts {
break break
} }
enrichedRule = alertingRule{ enrichedRule = AlertingRule{
State: rule.State().String(), State: rule.State().String(),
Name: rule.Name(), Name: rule.Name(),
Query: rule.Query().String(), Query: rule.Query().String(),
@ -1243,7 +1243,7 @@ func (api *API) rules(r *http.Request) apiFuncResult {
if !returnRecording { if !returnRecording {
break break
} }
enrichedRule = recordingRule{ enrichedRule = RecordingRule{
Name: rule.Name(), Name: rule.Name(),
Query: rule.Query().String(), Query: rule.Query().String(),
Labels: rule.Labels(), Labels: rule.Labels(),

View file

@ -1472,8 +1472,8 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Name: "grp", Name: "grp",
File: "/path/to/file", File: "/path/to/file",
Interval: 1, Interval: 1,
Rules: []rule{ Rules: []Rule{
alertingRule{ AlertingRule{
State: "inactive", State: "inactive",
Name: "test_metric3", Name: "test_metric3",
Query: "absent(test_metric3) != 1", Query: "absent(test_metric3) != 1",
@ -1484,7 +1484,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "unknown", Health: "unknown",
Type: "alerting", Type: "alerting",
}, },
alertingRule{ AlertingRule{
State: "inactive", State: "inactive",
Name: "test_metric4", Name: "test_metric4",
Query: "up == 1", Query: "up == 1",
@ -1495,7 +1495,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "unknown", Health: "unknown",
Type: "alerting", Type: "alerting",
}, },
recordingRule{ RecordingRule{
Name: "recording-rule-1", Name: "recording-rule-1",
Query: "vector(1)", Query: "vector(1)",
Labels: labels.Labels{}, Labels: labels.Labels{},
@ -1518,8 +1518,8 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Name: "grp", Name: "grp",
File: "/path/to/file", File: "/path/to/file",
Interval: 1, Interval: 1,
Rules: []rule{ Rules: []Rule{
alertingRule{ AlertingRule{
State: "inactive", State: "inactive",
Name: "test_metric3", Name: "test_metric3",
Query: "absent(test_metric3) != 1", Query: "absent(test_metric3) != 1",
@ -1530,7 +1530,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "unknown", Health: "unknown",
Type: "alerting", Type: "alerting",
}, },
alertingRule{ AlertingRule{
State: "inactive", State: "inactive",
Name: "test_metric4", Name: "test_metric4",
Query: "up == 1", Query: "up == 1",
@ -1557,8 +1557,8 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Name: "grp", Name: "grp",
File: "/path/to/file", File: "/path/to/file",
Interval: 1, Interval: 1,
Rules: []rule{ Rules: []Rule{
recordingRule{ RecordingRule{
Name: "recording-rule-1", Name: "recording-rule-1",
Query: "vector(1)", Query: "vector(1)",
Labels: labels.Labels{}, Labels: labels.Labels{},

View file

@ -39,26 +39,26 @@
"@codemirror/highlight": "^0.19.6", "@codemirror/highlight": "^0.19.6",
"@codemirror/language": "^0.19.5", "@codemirror/language": "^0.19.5",
"@codemirror/lint": "^0.19.3", "@codemirror/lint": "^0.19.3",
"@codemirror/state": "^0.19.5", "@codemirror/state": "^0.19.6",
"@codemirror/view": "^0.19.19", "@codemirror/view": "^0.19.20",
"@lezer/common": "^0.15.8", "@lezer/common": "^0.15.8",
"@lezer/generator": "^0.15.2", "@lezer/generator": "^0.15.2",
"@types/chai": "^4.2.22", "@types/chai": "^4.2.22",
"@types/lru-cache": "^5.1.0", "@types/lru-cache": "^5.1.0",
"@types/mocha": "^9.0.0", "@types/mocha": "^9.0.0",
"@types/node": "^16.11.7", "@types/node": "^16.11.9",
"@typescript-eslint/eslint-plugin": "^5.3.1", "@typescript-eslint/eslint-plugin": "^5.3.1",
"@typescript-eslint/parser": "^5.3.1", "@typescript-eslint/parser": "^5.3.1",
"chai": "^4.2.0", "chai": "^4.2.0",
"codecov": "^3.8.1", "codecov": "^3.8.1",
"eslint": "^8.2.0", "eslint": "^8.3.0",
"eslint-config-prettier": "^8.3.0", "eslint-config-prettier": "^8.3.0",
"eslint-plugin-flowtype": "^8.0.3", "eslint-plugin-flowtype": "^8.0.3",
"eslint-plugin-import": "^2.25.3", "eslint-plugin-import": "^2.25.3",
"eslint-plugin-prettier": "^4.0.0", "eslint-plugin-prettier": "^4.0.0",
"isomorphic-fetch": "^3.0.0", "isomorphic-fetch": "^3.0.0",
"mocha": "^8.4.0", "mocha": "^8.4.0",
"nock": "^13.0.11", "nock": "^13.2.1",
"nyc": "^15.1.0", "nyc": "^15.1.0",
"prettier": "^2.4.1", "prettier": "^2.4.1",
"ts-loader": "^7.0.4", "ts-loader": "^7.0.4",
@ -71,8 +71,8 @@
"@codemirror/highlight": "^0.19.6", "@codemirror/highlight": "^0.19.6",
"@codemirror/language": "^0.19.5", "@codemirror/language": "^0.19.5",
"@codemirror/lint": "^0.19.3", "@codemirror/lint": "^0.19.3",
"@codemirror/state": "^0.19.5", "@codemirror/state": "^0.19.6",
"@codemirror/view": "^0.19.19", "@codemirror/view": "^0.19.20",
"@lezer/common": "^0.15.8" "@lezer/common": "^0.15.8"
}, },
"prettier": { "prettier": {

791
web/ui/package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -12,15 +12,15 @@
"@codemirror/language": "^0.19.5", "@codemirror/language": "^0.19.5",
"@codemirror/lint": "^0.19.3", "@codemirror/lint": "^0.19.3",
"@codemirror/matchbrackets": "^0.19.3", "@codemirror/matchbrackets": "^0.19.3",
"@codemirror/search": "^0.19.2", "@codemirror/search": "^0.19.3",
"@codemirror/state": "^0.19.5", "@codemirror/state": "^0.19.6",
"@codemirror/view": "^0.19.19", "@codemirror/view": "^0.19.20",
"@forevolve/bootstrap-dark": "^1.0.0", "@forevolve/bootstrap-dark": "^1.0.0",
"@fortawesome/fontawesome-svg-core": "^1.2.14", "@fortawesome/fontawesome-svg-core": "^1.2.14",
"@fortawesome/free-solid-svg-icons": "^5.7.1", "@fortawesome/free-solid-svg-icons": "^5.7.1",
"@fortawesome/react-fontawesome": "^0.1.4", "@fortawesome/react-fontawesome": "^0.1.16",
"@nexucis/fuzzy": "^0.3.0", "@nexucis/fuzzy": "^0.3.0",
"bootstrap": "^5.1.3", "bootstrap": "^4.6.1",
"codemirror-promql": "0.18.0", "codemirror-promql": "0.18.0",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
"downshift": "^6.1.7", "downshift": "^6.1.7",
@ -67,10 +67,10 @@
"@testing-library/react-hooks": "^7.0.1", "@testing-library/react-hooks": "^7.0.1",
"@types/enzyme": "^3.10.10", "@types/enzyme": "^3.10.10",
"@types/flot": "0.0.32", "@types/flot": "0.0.32",
"@types/jest": "^27.0.2", "@types/jest": "^27.0.3",
"@types/jquery": "^3.5.8", "@types/jquery": "^3.5.9",
"@types/node": "^16.11.7", "@types/node": "^16.11.9",
"@types/react": "^17.0.35", "@types/react": "^17.0.36",
"@types/react-copy-to-clipboard": "^5.0.2", "@types/react-copy-to-clipboard": "^5.0.2",
"@types/react-dom": "^17.0.11", "@types/react-dom": "^17.0.11",
"@types/react-resize-detector": "^6.1.0", "@types/react-resize-detector": "^6.1.0",
@ -88,7 +88,7 @@
"mutationobserver-shim": "^0.3.7", "mutationobserver-shim": "^0.3.7",
"prettier": "^2.4.1", "prettier": "^2.4.1",
"react-scripts": "4.0.3", "react-scripts": "4.0.3",
"sinon": "^11.1.2", "sinon": "^12.0.1",
"typescript": "^4.5.2" "typescript": "^4.5.2"
}, },
"proxy": "http://localhost:9090", "proxy": "http://localhost:9090",

View file

@ -2,7 +2,7 @@ import { HighlightStyle, tags } from '@codemirror/highlight';
import { EditorView } from '@codemirror/view'; import { EditorView } from '@codemirror/view';
export const baseTheme = EditorView.theme({ export const baseTheme = EditorView.theme({
'&': { '&.cm-editor': {
'&.cm-focused': { '&.cm-focused': {
outline: 'none', outline: 'none',
outline_fallback: 'none', outline_fallback: 'none',

View file

@ -594,7 +594,7 @@ func (h *Handler) Run(ctx context.Context, listener net.Listener, webConfig stri
ReadTimeout: h.options.ReadTimeout, ReadTimeout: h.options.ReadTimeout,
} }
errCh := make(chan error) errCh := make(chan error, 1)
go func() { go func() {
errCh <- toolkit_web.Serve(listener, httpSrv, webConfig, h.logger) errCh <- toolkit_web.Serve(listener, httpSrv, webConfig, h.logger)
}() }()