mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
commit
69155c6ba1
|
@ -12,6 +12,7 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// The main package for the Prometheus server executable.
|
// The main package for the Prometheus server executable.
|
||||||
|
// nolint:revive // Many unsued function arguments in this file by design.
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -72,9 +72,11 @@ Loop:
|
||||||
if !startedOk {
|
if !startedOk {
|
||||||
t.Fatal("prometheus didn't start in the specified timeout")
|
t.Fatal("prometheus didn't start in the specified timeout")
|
||||||
}
|
}
|
||||||
if err := prom.Process.Kill(); err == nil {
|
switch err := prom.Process.Kill(); {
|
||||||
|
case err == nil:
|
||||||
t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal")
|
t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal")
|
||||||
} else if stoppedErr != nil && stoppedErr.Error() != "signal: interrupt" { // TODO - find a better way to detect when the process didn't exit as expected!
|
case stoppedErr != nil && stoppedErr.Error() != "signal: interrupt":
|
||||||
|
// TODO: find a better way to detect when the process didn't exit as expected!
|
||||||
t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr)
|
t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,7 +44,7 @@ func sortSamples(samples []backfillSample) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample {
|
func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { // nolint:revive
|
||||||
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
||||||
samples := []backfillSample{}
|
samples := []backfillSample{}
|
||||||
for ss.Next() {
|
for ss.Next() {
|
||||||
|
|
|
@ -68,7 +68,7 @@ func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient que
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadGroups parses groups from a list of recording rule files.
|
// loadGroups parses groups from a list of recording rule files.
|
||||||
func (importer *ruleImporter) loadGroups(ctx context.Context, filenames []string) (errs []error) {
|
func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string) (errs []error) {
|
||||||
groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...)
|
groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...)
|
||||||
if errs != nil {
|
if errs != nil {
|
||||||
return errs
|
return errs
|
||||||
|
|
|
@ -35,7 +35,7 @@ type mockQueryRangeAPI struct {
|
||||||
samples model.Matrix
|
samples model.Matrix
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mockAPI mockQueryRangeAPI) QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) {
|
func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { // nolint:revive
|
||||||
return mockAPI.samples, v1.Warnings{}, nil
|
return mockAPI.samples, v1.Warnings{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -161,7 +161,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestRuleImporter(ctx context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
|
func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
|
||||||
logger := log.NewNopLogger()
|
logger := log.NewNopLogger()
|
||||||
cfg := ruleImporterConfig{
|
cfg := ruleImporterConfig{
|
||||||
outputDir: tmpDir,
|
outputDir: tmpDir,
|
||||||
|
|
|
@ -403,14 +403,15 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error)
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
var block tsdb.BlockReader
|
var block tsdb.BlockReader
|
||||||
if blockID != "" {
|
switch {
|
||||||
|
case blockID != "":
|
||||||
for _, b := range blocks {
|
for _, b := range blocks {
|
||||||
if b.Meta().ULID.String() == blockID {
|
if b.Meta().ULID.String() == blockID {
|
||||||
block = b
|
block = b
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if len(blocks) > 0 {
|
case len(blocks) > 0:
|
||||||
block = blocks[len(blocks)-1]
|
block = blocks[len(blocks)-1]
|
||||||
}
|
}
|
||||||
if block == nil {
|
if block == nil {
|
||||||
|
|
|
@ -164,7 +164,7 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery {
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *EC2Discovery) ec2Client(ctx context.Context) (*ec2.EC2, error) {
|
func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) {
|
||||||
if d.ec2 != nil {
|
if d.ec2 != nil {
|
||||||
return d.ec2, nil
|
return d.ec2, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -285,21 +285,22 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms
|
||||||
for _, lname := range conf.NameList(name) {
|
for _, lname := range conf.NameList(name) {
|
||||||
response, err := lookupFromAnyServer(lname, qtype, conf, logger)
|
response, err := lookupFromAnyServer(lname, qtype, conf, logger)
|
||||||
|
|
||||||
if err != nil {
|
switch {
|
||||||
|
case err != nil:
|
||||||
// We can't go home yet, because a later name
|
// We can't go home yet, because a later name
|
||||||
// may give us a valid, successful answer. However
|
// may give us a valid, successful answer. However
|
||||||
// we can no longer say "this name definitely doesn't
|
// we can no longer say "this name definitely doesn't
|
||||||
// exist", because we did not get that answer for
|
// exist", because we did not get that answer for
|
||||||
// at least one name.
|
// at least one name.
|
||||||
allResponsesValid = false
|
allResponsesValid = false
|
||||||
} else if response.Rcode == dns.RcodeSuccess {
|
case response.Rcode == dns.RcodeSuccess:
|
||||||
// Outcome 1: GOLD!
|
// Outcome 1: GOLD!
|
||||||
return response, nil
|
return response, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if allResponsesValid {
|
if allResponsesValid {
|
||||||
// Outcome 2: everyone says NXDOMAIN, that's good enough for me
|
// Outcome 2: everyone says NXDOMAIN, that's good enough for me.
|
||||||
return &dns.Msg{}, nil
|
return &dns.Msg{}, nil
|
||||||
}
|
}
|
||||||
// Outcome 3: boned.
|
// Outcome 3: boned.
|
||||||
|
|
|
@ -59,7 +59,7 @@ type hcloudDiscovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
|
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
|
||||||
func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, error) {
|
func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) {
|
||||||
d := &hcloudDiscovery{
|
d := &hcloudDiscovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,7 +51,7 @@ type robotDiscovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets.
|
// newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets.
|
||||||
func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, error) {
|
func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) {
|
||||||
d := &robotDiscovery{
|
d := &robotDiscovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
endpoint: conf.robotEndpoint,
|
endpoint: conf.robotEndpoint,
|
||||||
|
@ -69,7 +69,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
|
||||||
req, err := http.NewRequest("GET", d.endpoint+"/server", nil)
|
req, err := http.NewRequest("GET", d.endpoint+"/server", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -60,7 +60,7 @@ type serverDiscovery struct {
|
||||||
datacenterID string
|
datacenterID string
|
||||||
}
|
}
|
||||||
|
|
||||||
func newServerDiscovery(conf *SDConfig, logger log.Logger) (*serverDiscovery, error) {
|
func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) {
|
||||||
d := &serverDiscovery{
|
d := &serverDiscovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
datacenterID: conf.DatacenterID,
|
datacenterID: conf.DatacenterID,
|
||||||
|
|
|
@ -122,11 +122,11 @@ func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code, method, host string) {
|
func (clientGoRequestMetricAdapter) Increment(_ context.Context, code, _, _ string) {
|
||||||
clientGoRequestResultMetricVec.WithLabelValues(code).Inc()
|
clientGoRequestResultMetricVec.WithLabelValues(code).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) {
|
func (clientGoRequestMetricAdapter) Observe(_ context.Context, _ string, u url.URL, latency time.Duration) {
|
||||||
clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds())
|
clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,7 +169,7 @@ func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetr
|
||||||
return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name)
|
return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric {
|
func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(string) workqueue.CounterMetric {
|
||||||
// Retries are not used so the metric is omitted.
|
// Retries are not used so the metric is omitted.
|
||||||
return noopMetric{}
|
return noopMetric{}
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
// nolint:revive // Many legitimately empty blocks in this file.
|
||||||
package kubernetes
|
package kubernetes
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -190,7 +190,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group)
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for e.process(ctx, ch) {
|
for e.process(ctx, ch) { // nolint:revive
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -89,7 +89,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for i.process(ctx, ch) {
|
for i.process(ctx, ch) { // nolint:revive
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -299,12 +299,13 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
|
||||||
err error
|
err error
|
||||||
ownNamespace string
|
ownNamespace string
|
||||||
)
|
)
|
||||||
if conf.KubeConfig != "" {
|
switch {
|
||||||
|
case conf.KubeConfig != "":
|
||||||
kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig)
|
kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else if conf.APIServer.URL == nil {
|
case conf.APIServer.URL == nil:
|
||||||
// Use the Kubernetes provided pod service account
|
// Use the Kubernetes provided pod service account
|
||||||
// as described in https://kubernetes.io/docs/admin/service-accounts-admin/
|
// as described in https://kubernetes.io/docs/admin/service-accounts-admin/
|
||||||
kcfg, err = rest.InClusterConfig()
|
kcfg, err = rest.InClusterConfig()
|
||||||
|
@ -324,7 +325,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
level.Info(l).Log("msg", "Using pod service account via in-cluster config")
|
level.Info(l).Log("msg", "Using pod service account via in-cluster config")
|
||||||
} else {
|
default:
|
||||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd")
|
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -96,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for n.process(ctx, ch) {
|
for n.process(ctx, ch) { // nolint:revive
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -132,7 +132,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for p.process(ctx, ch) {
|
for p.process(ctx, ch) { // nolint:revive
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -92,7 +92,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for s.process(ctx, ch) {
|
for s.process(ctx, ch) { // nolint:revive
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -686,12 +686,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
||||||
case tgs := <-provUpdates:
|
case tgs := <-provUpdates:
|
||||||
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
|
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
|
||||||
for _, got := range discoveryManager.allGroups() {
|
for _, got := range discoveryManager.allGroups() {
|
||||||
assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string {
|
assertEqualGroups(t, got, tc.expectedTargets[x])
|
||||||
return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v",
|
|
||||||
x,
|
|
||||||
got,
|
|
||||||
expected)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -699,7 +694,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) {
|
func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
// Need to sort by the groups's source as the received order is not guaranteed.
|
// Need to sort by the groups's source as the received order is not guaranteed.
|
||||||
|
@ -1079,9 +1074,7 @@ func TestCoordinationWithReceiver(t *testing.T) {
|
||||||
if _, ok := tgs[k]; !ok {
|
if _, ok := tgs[k]; !ok {
|
||||||
t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs)
|
t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs)
|
||||||
}
|
}
|
||||||
assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string {
|
assertEqualGroups(t, tgs[k], expected.tgs[k])
|
||||||
return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -250,19 +250,20 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if detailedIP.Public && publicIPv4 == "" {
|
switch {
|
||||||
|
case detailedIP.Public && publicIPv4 == "":
|
||||||
publicIPv4 = detailedIP.Address
|
publicIPv4 = detailedIP.Address
|
||||||
|
|
||||||
if detailedIP.RDNS != "" && detailedIP.RDNS != "null" {
|
if detailedIP.RDNS != "" && detailedIP.RDNS != "null" {
|
||||||
publicIPv4RDNS = detailedIP.RDNS
|
publicIPv4RDNS = detailedIP.RDNS
|
||||||
}
|
}
|
||||||
} else if !detailedIP.Public && privateIPv4 == "" {
|
case !detailedIP.Public && privateIPv4 == "":
|
||||||
privateIPv4 = detailedIP.Address
|
privateIPv4 = detailedIP.Address
|
||||||
|
|
||||||
if detailedIP.RDNS != "" && detailedIP.RDNS != "null" {
|
if detailedIP.RDNS != "" && detailedIP.RDNS != "null" {
|
||||||
privateIPv4RDNS = detailedIP.RDNS
|
privateIPv4RDNS = detailedIP.RDNS
|
||||||
}
|
}
|
||||||
} else {
|
default:
|
||||||
extraIPs = append(extraIPs, detailedIP.Address)
|
extraIPs = append(extraIPs, detailedIP.Address)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -686,12 +686,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
||||||
case tgs := <-provUpdates:
|
case tgs := <-provUpdates:
|
||||||
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
|
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
|
||||||
for _, got := range discoveryManager.allGroups() {
|
for _, got := range discoveryManager.allGroups() {
|
||||||
assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string {
|
assertEqualGroups(t, got, tc.expectedTargets[x])
|
||||||
return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v",
|
|
||||||
x,
|
|
||||||
got,
|
|
||||||
expected)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -699,7 +694,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) {
|
func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
// Need to sort by the groups's source as the received order is not guaranteed.
|
// Need to sort by the groups's source as the received order is not guaranteed.
|
||||||
|
@ -1129,7 +1124,7 @@ type lockStaticConfig struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s lockStaticConfig) Name() string { return "lockstatic" }
|
func (s lockStaticConfig) Name() string { return "lockstatic" }
|
||||||
func (s lockStaticConfig) NewDiscoverer(options DiscovererOptions) (Discoverer, error) {
|
func (s lockStaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) {
|
||||||
return (lockStaticDiscoverer)(s), nil
|
return (lockStaticDiscoverer)(s), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1330,9 +1325,7 @@ func TestCoordinationWithReceiver(t *testing.T) {
|
||||||
if _, ok := tgs[k]; !ok {
|
if _, ok := tgs[k]; !ok {
|
||||||
t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs)
|
t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs)
|
||||||
}
|
}
|
||||||
assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string {
|
assertEqualGroups(t, tgs[k], expected.tgs[k])
|
||||||
return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1399,7 +1392,7 @@ func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
|
|
||||||
// TestTargetSetTargetGroupsUpdateDuringApplyConfig is used to detect races when
|
// TestTargetSetTargetGroupsUpdateDuringApplyConfig is used to detect races when
|
||||||
// ApplyConfig happens at the same time as targets update.
|
// ApplyConfig happens at the same time as targets update.
|
||||||
func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) {
|
func TestTargetSetTargetGroupsUpdateDuringApplyConfig(*testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
||||||
|
|
|
@ -136,9 +136,10 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(conf.AuthToken) > 0 {
|
switch {
|
||||||
|
case len(conf.AuthToken) > 0:
|
||||||
rt, err = newAuthTokenRoundTripper(conf.AuthToken, rt)
|
rt, err = newAuthTokenRoundTripper(conf.AuthToken, rt)
|
||||||
} else if len(conf.AuthTokenFile) > 0 {
|
case len(conf.AuthTokenFile) > 0:
|
||||||
rt, err = newAuthTokenFileRoundTripper(conf.AuthTokenFile, rt)
|
rt, err = newAuthTokenFileRoundTripper(conf.AuthTokenFile, rt)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -400,19 +401,20 @@ func targetsForApp(app *app) []model.LabelSet {
|
||||||
var labels []map[string]string
|
var labels []map[string]string
|
||||||
var prefix string
|
var prefix string
|
||||||
|
|
||||||
if len(app.Container.PortMappings) != 0 {
|
switch {
|
||||||
|
case len(app.Container.PortMappings) != 0:
|
||||||
// In Marathon 1.5.x the "container.docker.portMappings" object was moved
|
// In Marathon 1.5.x the "container.docker.portMappings" object was moved
|
||||||
// to "container.portMappings".
|
// to "container.portMappings".
|
||||||
ports, labels = extractPortMapping(app.Container.PortMappings, app.isContainerNet())
|
ports, labels = extractPortMapping(app.Container.PortMappings, app.isContainerNet())
|
||||||
prefix = portMappingLabelPrefix
|
prefix = portMappingLabelPrefix
|
||||||
|
|
||||||
} else if len(app.Container.Docker.PortMappings) != 0 {
|
case len(app.Container.Docker.PortMappings) != 0:
|
||||||
// Prior to Marathon 1.5 the port mappings could be found at the path
|
// Prior to Marathon 1.5 the port mappings could be found at the path
|
||||||
// "container.docker.portMappings".
|
// "container.docker.portMappings".
|
||||||
ports, labels = extractPortMapping(app.Container.Docker.PortMappings, app.isContainerNet())
|
ports, labels = extractPortMapping(app.Container.Docker.PortMappings, app.isContainerNet())
|
||||||
prefix = portMappingLabelPrefix
|
prefix = portMappingLabelPrefix
|
||||||
|
|
||||||
} else if len(app.PortDefinitions) != 0 {
|
case len(app.PortDefinitions) != 0:
|
||||||
// PortDefinitions deprecates the "ports" array and can be used to specify
|
// PortDefinitions deprecates the "ports" array and can be used to specify
|
||||||
// a list of ports with metadata in case a mapping is not required.
|
// a list of ports with metadata in case a mapping is not required.
|
||||||
ports = make([]uint32, len(app.PortDefinitions))
|
ports = make([]uint32, len(app.PortDefinitions))
|
||||||
|
|
|
@ -161,7 +161,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) {
|
||||||
opts := &nomad.QueryOptions{
|
opts := &nomad.QueryOptions{
|
||||||
AllowStale: d.allowStale,
|
AllowStale: d.allowStale,
|
||||||
}
|
}
|
||||||
|
|
|
@ -102,7 +102,7 @@ func (d *dedicatedServerDiscovery) getSource() string {
|
||||||
return fmt.Sprintf("%s_%s", d.config.Name(), d.getService())
|
return fmt.Sprintf("%s_%s", d.config.Name(), d.getService())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dedicatedServerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
|
||||||
client, err := createClient(d.config)
|
client, err := createClient(d.config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -117,7 +117,7 @@ func (d *vpsDiscovery) getSource() string {
|
||||||
return fmt.Sprintf("%s_%s", d.config.Name(), d.getService())
|
return fmt.Sprintf("%s_%s", d.config.Name(), d.getService())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *vpsDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
|
||||||
client, err := createClient(d.config)
|
client, err := createClient(d.config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -202,10 +202,8 @@ func (d *Discovery) listInstances(ctx context.Context) ([]govultr.Instance, erro
|
||||||
|
|
||||||
if meta.Links.Next == "" {
|
if meta.Links.Next == "" {
|
||||||
break
|
break
|
||||||
} else {
|
|
||||||
listOptions.Cursor = meta.Links.Next
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
listOptions.Cursor = meta.Links.Next
|
||||||
}
|
}
|
||||||
|
|
||||||
return instances, nil
|
return instances, nil
|
||||||
|
|
|
@ -193,7 +193,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
for _, pathUpdate := range d.pathUpdates {
|
for _, pathUpdate := range d.pathUpdates {
|
||||||
// Drain event channel in case the treecache leaks goroutines otherwise.
|
// Drain event channel in case the treecache leaks goroutines otherwise.
|
||||||
for range pathUpdate {
|
for range pathUpdate { // nolint:revive
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
d.conn.Close()
|
d.conn.Close()
|
||||||
|
|
|
@ -290,13 +290,14 @@ func mergeSamples(a, b []prompb.Sample) []prompb.Sample {
|
||||||
result := make([]prompb.Sample, 0, len(a)+len(b))
|
result := make([]prompb.Sample, 0, len(a)+len(b))
|
||||||
i, j := 0, 0
|
i, j := 0, 0
|
||||||
for i < len(a) && j < len(b) {
|
for i < len(a) && j < len(b) {
|
||||||
if a[i].Timestamp < b[j].Timestamp {
|
switch {
|
||||||
|
case a[i].Timestamp < b[j].Timestamp:
|
||||||
result = append(result, a[i])
|
result = append(result, a[i])
|
||||||
i++
|
i++
|
||||||
} else if a[i].Timestamp > b[j].Timestamp {
|
case a[i].Timestamp > b[j].Timestamp:
|
||||||
result = append(result, b[j])
|
result = append(result, b[j])
|
||||||
j++
|
j++
|
||||||
} else {
|
default:
|
||||||
result = append(result, a[i])
|
result = append(result, a[i])
|
||||||
i++
|
i++
|
||||||
j++
|
j++
|
||||||
|
|
|
@ -824,10 +824,11 @@ mergeLoop: // Merge together all buckets from the original schema that fall into
|
||||||
origIdx += span.Offset
|
origIdx += span.Offset
|
||||||
}
|
}
|
||||||
currIdx := i.targetIdx(origIdx)
|
currIdx := i.targetIdx(origIdx)
|
||||||
if firstPass {
|
switch {
|
||||||
|
case firstPass:
|
||||||
i.currIdx = currIdx
|
i.currIdx = currIdx
|
||||||
firstPass = false
|
firstPass = false
|
||||||
} else if currIdx != i.currIdx {
|
case currIdx != i.currIdx:
|
||||||
// Reached next bucket in targetSchema.
|
// Reached next bucket in targetSchema.
|
||||||
// Do not actually forward to the next bucket, but break out.
|
// Do not actually forward to the next bucket, but break out.
|
||||||
break mergeLoop
|
break mergeLoop
|
||||||
|
|
|
@ -169,11 +169,12 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
|
||||||
b = b[:0]
|
b = b[:0]
|
||||||
i, j := 0, 0
|
i, j := 0, 0
|
||||||
for i < len(ls) && j < len(names) {
|
for i < len(ls) && j < len(names) {
|
||||||
if names[j] < ls[i].Name {
|
switch {
|
||||||
|
case names[j] < ls[i].Name:
|
||||||
j++
|
j++
|
||||||
} else if ls[i].Name < names[j] {
|
case ls[i].Name < names[j]:
|
||||||
i++
|
i++
|
||||||
} else {
|
default:
|
||||||
b = append(b, ls[i].Name...)
|
b = append(b, ls[i].Name...)
|
||||||
b = append(b, seps[0])
|
b = append(b, seps[0])
|
||||||
b = append(b, ls[i].Value...)
|
b = append(b, ls[i].Value...)
|
||||||
|
@ -213,11 +214,12 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte {
|
||||||
b.WriteByte(labelSep)
|
b.WriteByte(labelSep)
|
||||||
i, j := 0, 0
|
i, j := 0, 0
|
||||||
for i < len(ls) && j < len(names) {
|
for i < len(ls) && j < len(names) {
|
||||||
if names[j] < ls[i].Name {
|
switch {
|
||||||
|
case names[j] < ls[i].Name:
|
||||||
j++
|
j++
|
||||||
} else if ls[i].Name < names[j] {
|
case ls[i].Name < names[j]:
|
||||||
i++
|
i++
|
||||||
} else {
|
default:
|
||||||
if b.Len() > 1 {
|
if b.Len() > 1 {
|
||||||
b.WriteByte(seps[0])
|
b.WriteByte(seps[0])
|
||||||
}
|
}
|
||||||
|
|
|
@ -238,9 +238,10 @@ func (p *PromParser) Metric(l *labels.Labels) string {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exemplar writes the exemplar of the current sample into the passed
|
// Exemplar implements the Parser interface. However, since the classic
|
||||||
// exemplar. It returns if an exemplar exists.
|
// Prometheus text format does not support exemplars, this implementation simply
|
||||||
func (p *PromParser) Exemplar(e *exemplar.Exemplar) bool {
|
// returns false and does nothing else.
|
||||||
|
func (p *PromParser) Exemplar(*exemplar.Exemplar) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/util/teststorage"
|
"github.com/prometheus/prometheus/util/teststorage"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setupRangeQueryTestData(stor *teststorage.TestStorage, engine *Engine, interval, numIntervals int) error {
|
func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *Engine, interval, numIntervals int) error {
|
||||||
metrics := []labels.Labels{}
|
metrics := []labels.Labels{}
|
||||||
metrics = append(metrics, labels.FromStrings("__name__", "a_one"))
|
metrics = append(metrics, labels.FromStrings("__name__", "a_one"))
|
||||||
metrics = append(metrics, labels.FromStrings("__name__", "b_one"))
|
metrics = append(metrics, labels.FromStrings("__name__", "b_one"))
|
||||||
|
|
|
@ -400,7 +400,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewInstantQuery returns an evaluation query for the given expression at the given time.
|
// NewInstantQuery returns an evaluation query for the given expression at the given time.
|
||||||
func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) {
|
func (ng *Engine) NewInstantQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) {
|
||||||
expr, err := parser.ParseExpr(qs)
|
expr, err := parser.ParseExpr(qs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -416,7 +416,7 @@ func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts
|
||||||
|
|
||||||
// NewRangeQuery returns an evaluation query for the given time range and with
|
// NewRangeQuery returns an evaluation query for the given time range and with
|
||||||
// the resolution set by the interval.
|
// the resolution set by the interval.
|
||||||
func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) {
|
func (ng *Engine) NewRangeQuery(_ context.Context, q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) {
|
||||||
expr, err := parser.ParseExpr(qs)
|
expr, err := parser.ParseExpr(qs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1957,7 +1957,7 @@ func (ev *evaluator) matrixIterSlice(
|
||||||
// (b) the number of samples is relatively small.
|
// (b) the number of samples is relatively small.
|
||||||
// so a linear search will be as fast as a binary search.
|
// so a linear search will be as fast as a binary search.
|
||||||
var drop int
|
var drop int
|
||||||
for drop = 0; floats[drop].T < mint; drop++ {
|
for drop = 0; floats[drop].T < mint; drop++ { // nolint:revive
|
||||||
}
|
}
|
||||||
ev.currentSamples -= drop
|
ev.currentSamples -= drop
|
||||||
copy(floats, floats[drop:])
|
copy(floats, floats[drop:])
|
||||||
|
@ -1979,7 +1979,7 @@ func (ev *evaluator) matrixIterSlice(
|
||||||
// (b) the number of samples is relatively small.
|
// (b) the number of samples is relatively small.
|
||||||
// so a linear search will be as fast as a binary search.
|
// so a linear search will be as fast as a binary search.
|
||||||
var drop int
|
var drop int
|
||||||
for drop = 0; histograms[drop].T < mint; drop++ {
|
for drop = 0; histograms[drop].T < mint; drop++ { // nolint:revive
|
||||||
}
|
}
|
||||||
ev.currentSamples -= drop
|
ev.currentSamples -= drop
|
||||||
copy(histograms, histograms[drop:])
|
copy(histograms, histograms[drop:])
|
||||||
|
@ -2096,13 +2096,13 @@ func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
|
func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
|
||||||
if matching.Card != parser.CardManyToMany {
|
switch {
|
||||||
|
case matching.Card != parser.CardManyToMany:
|
||||||
panic("set operations must only use many-to-many matching")
|
panic("set operations must only use many-to-many matching")
|
||||||
}
|
case len(lhs) == 0: // Short-circuit.
|
||||||
if len(lhs) == 0 { // Short-circuit.
|
|
||||||
enh.Out = append(enh.Out, rhs...)
|
enh.Out = append(enh.Out, rhs...)
|
||||||
return enh.Out
|
return enh.Out
|
||||||
} else if len(rhs) == 0 {
|
case len(rhs) == 0:
|
||||||
enh.Out = append(enh.Out, lhs...)
|
enh.Out = append(enh.Out, lhs...)
|
||||||
return enh.Out
|
return enh.Out
|
||||||
}
|
}
|
||||||
|
@ -2221,13 +2221,14 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
|
||||||
hl, hr = hr, hl
|
hl, hr = hr, hl
|
||||||
}
|
}
|
||||||
floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr)
|
floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr)
|
||||||
if returnBool {
|
switch {
|
||||||
|
case returnBool:
|
||||||
if keep {
|
if keep {
|
||||||
floatValue = 1.0
|
floatValue = 1.0
|
||||||
} else {
|
} else {
|
||||||
floatValue = 0.0
|
floatValue = 0.0
|
||||||
}
|
}
|
||||||
} else if !keep {
|
case !keep:
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh)
|
metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh)
|
||||||
|
@ -2514,14 +2515,15 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
||||||
if !ok {
|
if !ok {
|
||||||
var m labels.Labels
|
var m labels.Labels
|
||||||
enh.resetBuilder(metric)
|
enh.resetBuilder(metric)
|
||||||
if without {
|
switch {
|
||||||
|
case without:
|
||||||
enh.lb.Del(grouping...)
|
enh.lb.Del(grouping...)
|
||||||
enh.lb.Del(labels.MetricName)
|
enh.lb.Del(labels.MetricName)
|
||||||
m = enh.lb.Labels()
|
m = enh.lb.Labels()
|
||||||
} else if len(grouping) > 0 {
|
case len(grouping) > 0:
|
||||||
enh.lb.Keep(grouping...)
|
enh.lb.Keep(grouping...)
|
||||||
m = enh.lb.Labels()
|
m = enh.lb.Labels()
|
||||||
} else {
|
default:
|
||||||
m = labels.EmptyLabels()
|
m = labels.EmptyLabels()
|
||||||
}
|
}
|
||||||
newAgg := &groupedAggregation{
|
newAgg := &groupedAggregation{
|
||||||
|
@ -2530,9 +2532,10 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
||||||
mean: s.F,
|
mean: s.F,
|
||||||
groupCount: 1,
|
groupCount: 1,
|
||||||
}
|
}
|
||||||
if s.H == nil {
|
switch {
|
||||||
|
case s.H == nil:
|
||||||
newAgg.hasFloat = true
|
newAgg.hasFloat = true
|
||||||
} else if op == parser.SUM {
|
case op == parser.SUM:
|
||||||
newAgg.histogramValue = s.H.Copy()
|
newAgg.histogramValue = s.H.Copy()
|
||||||
newAgg.hasHistogram = true
|
newAgg.hasHistogram = true
|
||||||
}
|
}
|
||||||
|
@ -2542,9 +2545,10 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
||||||
|
|
||||||
inputVecLen := int64(len(vec))
|
inputVecLen := int64(len(vec))
|
||||||
resultSize := k
|
resultSize := k
|
||||||
if k > inputVecLen {
|
switch {
|
||||||
|
case k > inputVecLen:
|
||||||
resultSize = inputVecLen
|
resultSize = inputVecLen
|
||||||
} else if k == 0 {
|
case k == 0:
|
||||||
resultSize = 1
|
resultSize = 1
|
||||||
}
|
}
|
||||||
switch op {
|
switch op {
|
||||||
|
@ -2637,12 +2641,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
||||||
|
|
||||||
case parser.TOPK:
|
case parser.TOPK:
|
||||||
// We build a heap of up to k elements, with the smallest element at heap[0].
|
// We build a heap of up to k elements, with the smallest element at heap[0].
|
||||||
if int64(len(group.heap)) < k {
|
switch {
|
||||||
|
case int64(len(group.heap)) < k:
|
||||||
heap.Push(&group.heap, &Sample{
|
heap.Push(&group.heap, &Sample{
|
||||||
F: s.F,
|
F: s.F,
|
||||||
Metric: s.Metric,
|
Metric: s.Metric,
|
||||||
})
|
})
|
||||||
} else if group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)) {
|
case group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)):
|
||||||
// This new element is bigger than the previous smallest element - overwrite that.
|
// This new element is bigger than the previous smallest element - overwrite that.
|
||||||
group.heap[0] = Sample{
|
group.heap[0] = Sample{
|
||||||
F: s.F,
|
F: s.F,
|
||||||
|
@ -2655,12 +2660,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
||||||
|
|
||||||
case parser.BOTTOMK:
|
case parser.BOTTOMK:
|
||||||
// We build a heap of up to k elements, with the biggest element at heap[0].
|
// We build a heap of up to k elements, with the biggest element at heap[0].
|
||||||
if int64(len(group.reverseHeap)) < k {
|
switch {
|
||||||
|
case int64(len(group.reverseHeap)) < k:
|
||||||
heap.Push(&group.reverseHeap, &Sample{
|
heap.Push(&group.reverseHeap, &Sample{
|
||||||
F: s.F,
|
F: s.F,
|
||||||
Metric: s.Metric,
|
Metric: s.Metric,
|
||||||
})
|
})
|
||||||
} else if group.reverseHeap[0].F > s.F || (math.IsNaN(group.reverseHeap[0].F) && !math.IsNaN(s.F)) {
|
case group.reverseHeap[0].F > s.F || (math.IsNaN(group.reverseHeap[0].F) && !math.IsNaN(s.F)):
|
||||||
// This new element is smaller than the previous biggest element - overwrite that.
|
// This new element is smaller than the previous biggest element - overwrite that.
|
||||||
group.reverseHeap[0] = Sample{
|
group.reverseHeap[0] = Sample{
|
||||||
F: s.F,
|
F: s.F,
|
||||||
|
@ -2819,9 +2825,10 @@ func PreprocessExpr(expr parser.Expr, start, end time.Time) parser.Expr {
|
||||||
func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool {
|
func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool {
|
||||||
switch n := expr.(type) {
|
switch n := expr.(type) {
|
||||||
case *parser.VectorSelector:
|
case *parser.VectorSelector:
|
||||||
if n.StartOrEnd == parser.START {
|
switch n.StartOrEnd {
|
||||||
|
case parser.START:
|
||||||
n.Timestamp = makeInt64Pointer(timestamp.FromTime(start))
|
n.Timestamp = makeInt64Pointer(timestamp.FromTime(start))
|
||||||
} else if n.StartOrEnd == parser.END {
|
case parser.END:
|
||||||
n.Timestamp = makeInt64Pointer(timestamp.FromTime(end))
|
n.Timestamp = makeInt64Pointer(timestamp.FromTime(end))
|
||||||
}
|
}
|
||||||
return n.Timestamp != nil
|
return n.Timestamp != nil
|
||||||
|
@ -2878,9 +2885,10 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool {
|
||||||
if isInvariant {
|
if isInvariant {
|
||||||
n.Expr = newStepInvariantExpr(n.Expr)
|
n.Expr = newStepInvariantExpr(n.Expr)
|
||||||
}
|
}
|
||||||
if n.StartOrEnd == parser.START {
|
switch n.StartOrEnd {
|
||||||
|
case parser.START:
|
||||||
n.Timestamp = makeInt64Pointer(timestamp.FromTime(start))
|
n.Timestamp = makeInt64Pointer(timestamp.FromTime(start))
|
||||||
} else if n.StartOrEnd == parser.END {
|
case parser.END:
|
||||||
n.Timestamp = makeInt64Pointer(timestamp.FromTime(end))
|
n.Timestamp = makeInt64Pointer(timestamp.FromTime(end))
|
||||||
}
|
}
|
||||||
return n.Timestamp != nil
|
return n.Timestamp != nil
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
// nolint:revive // Many unsued function arguments in this file by design.
|
||||||
package promql
|
package promql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -803,12 +804,14 @@ func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) V
|
||||||
// === sgn(Vector parser.ValueTypeVector) Vector ===
|
// === sgn(Vector parser.ValueTypeVector) Vector ===
|
||||||
func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||||
return simpleFunc(vals, enh, func(v float64) float64 {
|
return simpleFunc(vals, enh, func(v float64) float64 {
|
||||||
if v < 0 {
|
switch {
|
||||||
|
case v < 0:
|
||||||
return -1
|
return -1
|
||||||
} else if v > 0 {
|
case v > 0:
|
||||||
return 1
|
return 1
|
||||||
|
default:
|
||||||
|
return v
|
||||||
}
|
}
|
||||||
return v
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -368,13 +368,14 @@ func Children(node Node) []Node {
|
||||||
case *AggregateExpr:
|
case *AggregateExpr:
|
||||||
// While this does not look nice, it should avoid unnecessary allocations
|
// While this does not look nice, it should avoid unnecessary allocations
|
||||||
// caused by slice resizing
|
// caused by slice resizing
|
||||||
if n.Expr == nil && n.Param == nil {
|
switch {
|
||||||
|
case n.Expr == nil && n.Param == nil:
|
||||||
return nil
|
return nil
|
||||||
} else if n.Expr == nil {
|
case n.Expr == nil:
|
||||||
return []Node{n.Param}
|
return []Node{n.Param}
|
||||||
} else if n.Param == nil {
|
case n.Param == nil:
|
||||||
return []Node{n.Expr}
|
return []Node{n.Expr}
|
||||||
} else {
|
default:
|
||||||
return []Node{n.Expr, n.Param}
|
return []Node{n.Expr, n.Param}
|
||||||
}
|
}
|
||||||
case *BinaryExpr:
|
case *BinaryExpr:
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
// nolint:revive // Many legitimately empty blocks in this file.
|
||||||
package parser
|
package parser
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -293,7 +294,7 @@ func (l *Lexer) accept(valid string) bool {
|
||||||
// acceptRun consumes a run of runes from the valid set.
|
// acceptRun consumes a run of runes from the valid set.
|
||||||
func (l *Lexer) acceptRun(valid string) {
|
func (l *Lexer) acceptRun(valid string) {
|
||||||
for strings.ContainsRune(valid, l.next()) {
|
for strings.ContainsRune(valid, l.next()) {
|
||||||
// consume
|
// Consume.
|
||||||
}
|
}
|
||||||
l.backup()
|
l.backup()
|
||||||
}
|
}
|
||||||
|
@ -346,9 +347,10 @@ func lexStatements(l *Lexer) stateFn {
|
||||||
|
|
||||||
switch r := l.next(); {
|
switch r := l.next(); {
|
||||||
case r == eof:
|
case r == eof:
|
||||||
if l.parenDepth != 0 {
|
switch {
|
||||||
|
case l.parenDepth != 0:
|
||||||
return l.errorf("unclosed left parenthesis")
|
return l.errorf("unclosed left parenthesis")
|
||||||
} else if l.bracketOpen {
|
case l.bracketOpen:
|
||||||
return l.errorf("unclosed left bracket")
|
return l.errorf("unclosed left bracket")
|
||||||
}
|
}
|
||||||
l.emit(EOF)
|
l.emit(EOF)
|
||||||
|
@ -370,12 +372,13 @@ func lexStatements(l *Lexer) stateFn {
|
||||||
case r == '^':
|
case r == '^':
|
||||||
l.emit(POW)
|
l.emit(POW)
|
||||||
case r == '=':
|
case r == '=':
|
||||||
if t := l.peek(); t == '=' {
|
switch t := l.peek(); t {
|
||||||
|
case '=':
|
||||||
l.next()
|
l.next()
|
||||||
l.emit(EQLC)
|
l.emit(EQLC)
|
||||||
} else if t == '~' {
|
case '~':
|
||||||
return l.errorf("unexpected character after '=': %q", t)
|
return l.errorf("unexpected character after '=': %q", t)
|
||||||
} else {
|
default:
|
||||||
l.emit(EQL)
|
l.emit(EQL)
|
||||||
}
|
}
|
||||||
case r == '!':
|
case r == '!':
|
||||||
|
@ -790,11 +793,12 @@ Loop:
|
||||||
default:
|
default:
|
||||||
l.backup()
|
l.backup()
|
||||||
word := l.input[l.start:l.pos]
|
word := l.input[l.start:l.pos]
|
||||||
if kw, ok := key[strings.ToLower(word)]; ok {
|
switch kw, ok := key[strings.ToLower(word)]; {
|
||||||
|
case ok:
|
||||||
l.emit(kw)
|
l.emit(kw)
|
||||||
} else if !strings.Contains(word, ":") {
|
case !strings.Contains(word, ":"):
|
||||||
l.emit(IDENTIFIER)
|
l.emit(IDENTIFIER)
|
||||||
} else {
|
default:
|
||||||
l.emit(METRIC_IDENTIFIER)
|
l.emit(METRIC_IDENTIFIER)
|
||||||
}
|
}
|
||||||
break Loop
|
break Loop
|
||||||
|
|
|
@ -270,14 +270,15 @@ var errUnexpected = errors.New("unexpected error")
|
||||||
// recover is the handler that turns panics into returns from the top level of Parse.
|
// recover is the handler that turns panics into returns from the top level of Parse.
|
||||||
func (p *parser) recover(errp *error) {
|
func (p *parser) recover(errp *error) {
|
||||||
e := recover()
|
e := recover()
|
||||||
if _, ok := e.(runtime.Error); ok {
|
switch _, ok := e.(runtime.Error); {
|
||||||
|
case ok:
|
||||||
// Print the stack trace but do not inhibit the running application.
|
// Print the stack trace but do not inhibit the running application.
|
||||||
buf := make([]byte, 64<<10)
|
buf := make([]byte, 64<<10)
|
||||||
buf = buf[:runtime.Stack(buf, false)]
|
buf = buf[:runtime.Stack(buf, false)]
|
||||||
|
|
||||||
fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf)
|
fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf)
|
||||||
*errp = errUnexpected
|
*errp = errUnexpected
|
||||||
} else if e != nil {
|
case e != nil:
|
||||||
*errp = e.(error)
|
*errp = e.(error)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -332,7 +333,7 @@ func (p *parser) Lex(lval *yySymType) int {
|
||||||
// It is a no-op since the parsers error routines are triggered
|
// It is a no-op since the parsers error routines are triggered
|
||||||
// by mechanisms that allow more fine-grained control
|
// by mechanisms that allow more fine-grained control
|
||||||
// For more information, see https://pkg.go.dev/golang.org/x/tools/cmd/goyacc.
|
// For more information, see https://pkg.go.dev/golang.org/x/tools/cmd/goyacc.
|
||||||
func (p *parser) Error(e string) {
|
func (p *parser) Error(string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// InjectItem allows injecting a single Item at the beginning of the token stream
|
// InjectItem allows injecting a single Item at the beginning of the token stream
|
||||||
|
@ -481,9 +482,9 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
|
||||||
// This is made a function instead of a variable, so it is lazily evaluated on demand.
|
// This is made a function instead of a variable, so it is lazily evaluated on demand.
|
||||||
opRange := func() (r PositionRange) {
|
opRange := func() (r PositionRange) {
|
||||||
// Remove whitespace at the beginning and end of the range.
|
// Remove whitespace at the beginning and end of the range.
|
||||||
for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ {
|
for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ { // nolint:revive
|
||||||
}
|
}
|
||||||
for r.End = n.RHS.PositionRange().Start - 1; isSpace(rune(p.lex.input[r.End])); r.End-- {
|
for r.End = n.RHS.PositionRange().Start - 1; isSpace(rune(p.lex.input[r.End])); r.End-- { // nolint:revive
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -518,20 +519,18 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
|
||||||
p.addParseErrf(n.RHS.PositionRange(), "binary expression must contain only scalar and instant vector types")
|
p.addParseErrf(n.RHS.PositionRange(), "binary expression must contain only scalar and instant vector types")
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lt != ValueTypeVector || rt != ValueTypeVector) && n.VectorMatching != nil {
|
switch {
|
||||||
|
case (lt != ValueTypeVector || rt != ValueTypeVector) && n.VectorMatching != nil:
|
||||||
if len(n.VectorMatching.MatchingLabels) > 0 {
|
if len(n.VectorMatching.MatchingLabels) > 0 {
|
||||||
p.addParseErrf(n.PositionRange(), "vector matching only allowed between instant vectors")
|
p.addParseErrf(n.PositionRange(), "vector matching only allowed between instant vectors")
|
||||||
}
|
}
|
||||||
n.VectorMatching = nil
|
n.VectorMatching = nil
|
||||||
} else {
|
case n.Op.IsSetOperator(): // Both operands are Vectors.
|
||||||
// Both operands are Vectors.
|
if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne {
|
||||||
if n.Op.IsSetOperator() {
|
p.addParseErrf(n.PositionRange(), "no grouping allowed for %q operation", n.Op)
|
||||||
if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne {
|
}
|
||||||
p.addParseErrf(n.PositionRange(), "no grouping allowed for %q operation", n.Op)
|
if n.VectorMatching.Card != CardManyToMany {
|
||||||
}
|
p.addParseErrf(n.PositionRange(), "set operations must always be many-to-many")
|
||||||
if n.VectorMatching.Card != CardManyToMany {
|
|
||||||
p.addParseErrf(n.PositionRange(), "set operations must always be many-to-many")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -708,9 +707,10 @@ func (p *parser) addOffset(e Node, offset time.Duration) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// it is already ensured by parseDuration func that there never will be a zero offset modifier
|
// it is already ensured by parseDuration func that there never will be a zero offset modifier
|
||||||
if *orgoffsetp != 0 {
|
switch {
|
||||||
|
case *orgoffsetp != 0:
|
||||||
p.addParseErrf(e.PositionRange(), "offset may not be set multiple times")
|
p.addParseErrf(e.PositionRange(), "offset may not be set multiple times")
|
||||||
} else if orgoffsetp != nil {
|
case orgoffsetp != nil:
|
||||||
*orgoffsetp = offset
|
*orgoffsetp = offset
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -124,17 +124,19 @@ func (node *MatrixSelector) String() string {
|
||||||
// Copy the Vector selector before changing the offset
|
// Copy the Vector selector before changing the offset
|
||||||
vecSelector := *node.VectorSelector.(*VectorSelector)
|
vecSelector := *node.VectorSelector.(*VectorSelector)
|
||||||
offset := ""
|
offset := ""
|
||||||
if vecSelector.OriginalOffset > time.Duration(0) {
|
switch {
|
||||||
|
case vecSelector.OriginalOffset > time.Duration(0):
|
||||||
offset = fmt.Sprintf(" offset %s", model.Duration(vecSelector.OriginalOffset))
|
offset = fmt.Sprintf(" offset %s", model.Duration(vecSelector.OriginalOffset))
|
||||||
} else if vecSelector.OriginalOffset < time.Duration(0) {
|
case vecSelector.OriginalOffset < time.Duration(0):
|
||||||
offset = fmt.Sprintf(" offset -%s", model.Duration(-vecSelector.OriginalOffset))
|
offset = fmt.Sprintf(" offset -%s", model.Duration(-vecSelector.OriginalOffset))
|
||||||
}
|
}
|
||||||
at := ""
|
at := ""
|
||||||
if vecSelector.Timestamp != nil {
|
switch {
|
||||||
|
case vecSelector.Timestamp != nil:
|
||||||
at = fmt.Sprintf(" @ %.3f", float64(*vecSelector.Timestamp)/1000.0)
|
at = fmt.Sprintf(" @ %.3f", float64(*vecSelector.Timestamp)/1000.0)
|
||||||
} else if vecSelector.StartOrEnd == START {
|
case vecSelector.StartOrEnd == START:
|
||||||
at = " @ start()"
|
at = " @ start()"
|
||||||
} else if vecSelector.StartOrEnd == END {
|
case vecSelector.StartOrEnd == END:
|
||||||
at = " @ end()"
|
at = " @ end()"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -162,17 +164,19 @@ func (node *SubqueryExpr) getSubqueryTimeSuffix() string {
|
||||||
step = model.Duration(node.Step).String()
|
step = model.Duration(node.Step).String()
|
||||||
}
|
}
|
||||||
offset := ""
|
offset := ""
|
||||||
if node.OriginalOffset > time.Duration(0) {
|
switch {
|
||||||
|
case node.OriginalOffset > time.Duration(0):
|
||||||
offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset))
|
offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset))
|
||||||
} else if node.OriginalOffset < time.Duration(0) {
|
case node.OriginalOffset < time.Duration(0):
|
||||||
offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset))
|
offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset))
|
||||||
}
|
}
|
||||||
at := ""
|
at := ""
|
||||||
if node.Timestamp != nil {
|
switch {
|
||||||
|
case node.Timestamp != nil:
|
||||||
at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0)
|
at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0)
|
||||||
} else if node.StartOrEnd == START {
|
case node.StartOrEnd == START:
|
||||||
at = " @ start()"
|
at = " @ start()"
|
||||||
} else if node.StartOrEnd == END {
|
case node.StartOrEnd == END:
|
||||||
at = " @ end()"
|
at = " @ end()"
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("[%s:%s]%s%s", model.Duration(node.Range), step, at, offset)
|
return fmt.Sprintf("[%s:%s]%s%s", model.Duration(node.Range), step, at, offset)
|
||||||
|
@ -207,17 +211,19 @@ func (node *VectorSelector) String() string {
|
||||||
labelStrings = append(labelStrings, matcher.String())
|
labelStrings = append(labelStrings, matcher.String())
|
||||||
}
|
}
|
||||||
offset := ""
|
offset := ""
|
||||||
if node.OriginalOffset > time.Duration(0) {
|
switch {
|
||||||
|
case node.OriginalOffset > time.Duration(0):
|
||||||
offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset))
|
offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset))
|
||||||
} else if node.OriginalOffset < time.Duration(0) {
|
case node.OriginalOffset < time.Duration(0):
|
||||||
offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset))
|
offset = fmt.Sprintf(" offset -%s", model.Duration(-node.OriginalOffset))
|
||||||
}
|
}
|
||||||
at := ""
|
at := ""
|
||||||
if node.Timestamp != nil {
|
switch {
|
||||||
|
case node.Timestamp != nil:
|
||||||
at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0)
|
at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0)
|
||||||
} else if node.StartOrEnd == START {
|
case node.StartOrEnd == START:
|
||||||
at = " @ start()"
|
at = " @ start()"
|
||||||
} else if node.StartOrEnd == END {
|
case node.StartOrEnd == END:
|
||||||
at = " @ end()"
|
at = " @ end()"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -169,11 +169,12 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if bucket.Lower < 0 && bucket.Upper > 0 {
|
if bucket.Lower < 0 && bucket.Upper > 0 {
|
||||||
if len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0 {
|
switch {
|
||||||
|
case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0:
|
||||||
// The result is in the zero bucket and the histogram has only
|
// The result is in the zero bucket and the histogram has only
|
||||||
// positive buckets. So we consider 0 to be the lower bound.
|
// positive buckets. So we consider 0 to be the lower bound.
|
||||||
bucket.Lower = 0
|
bucket.Lower = 0
|
||||||
} else if len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0 {
|
case len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0:
|
||||||
// The result is in the zero bucket and the histogram has only
|
// The result is in the zero bucket and the histogram has only
|
||||||
// negative buckets. So we consider 0 to be the upper bound.
|
// negative buckets. So we consider 0 to be the upper bound.
|
||||||
bucket.Upper = 0
|
bucket.Upper = 0
|
||||||
|
@ -244,12 +245,13 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
b := it.At()
|
b := it.At()
|
||||||
if b.Lower < 0 && b.Upper > 0 {
|
if b.Lower < 0 && b.Upper > 0 {
|
||||||
if len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0 {
|
switch {
|
||||||
|
case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0:
|
||||||
// This is the zero bucket and the histogram has only
|
// This is the zero bucket and the histogram has only
|
||||||
// positive buckets. So we consider 0 to be the lower
|
// positive buckets. So we consider 0 to be the lower
|
||||||
// bound.
|
// bound.
|
||||||
b.Lower = 0
|
b.Lower = 0
|
||||||
} else if len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0 {
|
case len(h.PositiveBuckets) == 0 && len(h.NegativeBuckets) > 0:
|
||||||
// This is in the zero bucket and the histogram has only
|
// This is in the zero bucket and the histogram has only
|
||||||
// negative buckets. So we consider 0 to be the upper
|
// negative buckets. So we consider 0 to be the upper
|
||||||
// bound.
|
// bound.
|
||||||
|
|
|
@ -587,10 +587,10 @@ func TestAlertingRuleLimit(t *testing.T) {
|
||||||
evalTime := time.Unix(0, 0)
|
evalTime := time.Unix(0, 0)
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
_, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit)
|
switch _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit); {
|
||||||
if err != nil {
|
case err != nil:
|
||||||
require.EqualError(t, err, test.err)
|
require.EqualError(t, err, test.err)
|
||||||
} else if test.err != "" {
|
case test.err != "":
|
||||||
t.Errorf("Expected errror %s, got none", test.err)
|
t.Errorf("Expected errror %s, got none", test.err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -866,12 +866,13 @@ func (g *Group) RestoreForState(ts time.Time) {
|
||||||
timeSpentPending := downAt.Sub(restoredActiveAt)
|
timeSpentPending := downAt.Sub(restoredActiveAt)
|
||||||
timeRemainingPending := alertHoldDuration - timeSpentPending
|
timeRemainingPending := alertHoldDuration - timeSpentPending
|
||||||
|
|
||||||
if timeRemainingPending <= 0 {
|
switch {
|
||||||
|
case timeRemainingPending <= 0:
|
||||||
// It means that alert was firing when prometheus went down.
|
// It means that alert was firing when prometheus went down.
|
||||||
// In the next Eval, the state of this alert will be set back to
|
// In the next Eval, the state of this alert will be set back to
|
||||||
// firing again if it's still firing in that Eval.
|
// firing again if it's still firing in that Eval.
|
||||||
// Nothing to be done in this case.
|
// Nothing to be done in this case.
|
||||||
} else if timeRemainingPending < g.opts.ForGracePeriod {
|
case timeRemainingPending < g.opts.ForGracePeriod:
|
||||||
// (new) restoredActiveAt = (ts + m.opts.ForGracePeriod) - alertHoldDuration
|
// (new) restoredActiveAt = (ts + m.opts.ForGracePeriod) - alertHoldDuration
|
||||||
// /* new firing time */ /* moving back by hold duration */
|
// /* new firing time */ /* moving back by hold duration */
|
||||||
//
|
//
|
||||||
|
@ -884,7 +885,7 @@ func (g *Group) RestoreForState(ts time.Time) {
|
||||||
// = (ts + m.opts.ForGracePeriod) - ts
|
// = (ts + m.opts.ForGracePeriod) - ts
|
||||||
// = m.opts.ForGracePeriod
|
// = m.opts.ForGracePeriod
|
||||||
restoredActiveAt = ts.Add(g.opts.ForGracePeriod).Add(-alertHoldDuration)
|
restoredActiveAt = ts.Add(g.opts.ForGracePeriod).Add(-alertHoldDuration)
|
||||||
} else {
|
default:
|
||||||
// By shifting ActiveAt to the future (ActiveAt + some_duration),
|
// By shifting ActiveAt to the future (ActiveAt + some_duration),
|
||||||
// the total pending time from the original ActiveAt
|
// the total pending time from the original ActiveAt
|
||||||
// would be `alertHoldDuration + some_duration`.
|
// would be `alertHoldDuration + some_duration`.
|
||||||
|
|
|
@ -481,17 +481,18 @@ func TestForStateRestore(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
// Checking if we have restored it correctly.
|
// Checking if we have restored it correctly.
|
||||||
if tst.noRestore {
|
switch {
|
||||||
|
case tst.noRestore:
|
||||||
require.Equal(t, tst.num, len(got))
|
require.Equal(t, tst.num, len(got))
|
||||||
for _, e := range got {
|
for _, e := range got {
|
||||||
require.Equal(t, e.ActiveAt, restoreTime)
|
require.Equal(t, e.ActiveAt, restoreTime)
|
||||||
}
|
}
|
||||||
} else if tst.gracePeriod {
|
case tst.gracePeriod:
|
||||||
require.Equal(t, tst.num, len(got))
|
require.Equal(t, tst.num, len(got))
|
||||||
for _, e := range got {
|
for _, e := range got {
|
||||||
require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime))
|
require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime))
|
||||||
}
|
}
|
||||||
} else {
|
default:
|
||||||
exp := tst.alerts
|
exp := tst.alerts
|
||||||
require.Equal(t, len(exp), len(got))
|
require.Equal(t, len(exp), len(got))
|
||||||
sortAlerts(exp)
|
sortAlerts(exp)
|
||||||
|
@ -779,13 +780,13 @@ func TestUpdate(t *testing.T) {
|
||||||
rgs.Groups[i].Interval = model.Duration(10)
|
rgs.Groups[i].Interval = model.Duration(10)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs)
|
reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs)
|
||||||
|
|
||||||
// Update limit and reload.
|
// Update limit and reload.
|
||||||
for i := range rgs.Groups {
|
for i := range rgs.Groups {
|
||||||
rgs.Groups[i].Limit = 1
|
rgs.Groups[i].Limit = 1
|
||||||
}
|
}
|
||||||
reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs)
|
reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs)
|
||||||
|
|
||||||
// Change group rules and reload.
|
// Change group rules and reload.
|
||||||
for i, g := range rgs.Groups {
|
for i, g := range rgs.Groups {
|
||||||
|
@ -793,7 +794,7 @@ func TestUpdate(t *testing.T) {
|
||||||
rgs.Groups[i].Rules[j].Expr.SetString(fmt.Sprintf("%s * 0", r.Expr.Value))
|
rgs.Groups[i].Rules[j].Expr.SetString(fmt.Sprintf("%s * 0", r.Expr.Value))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs)
|
reloadAndValidate(rgs, t, tmpFile, ruleManager, ogs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ruleGroupsTest for running tests over rules.
|
// ruleGroupsTest for running tests over rules.
|
||||||
|
@ -836,7 +837,7 @@ func formatRules(r *rulefmt.RuleGroups) ruleGroupsTest {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func reloadAndValidate(rgs *rulefmt.RuleGroups, t *testing.T, tmpFile *os.File, ruleManager *Manager, expected map[string]labels.Labels, ogs map[string]*Group) {
|
func reloadAndValidate(rgs *rulefmt.RuleGroups, t *testing.T, tmpFile *os.File, ruleManager *Manager, ogs map[string]*Group) {
|
||||||
bs, err := yaml.Marshal(formatRules(rgs))
|
bs, err := yaml.Marshal(formatRules(rgs))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
tmpFile.Seek(0, 0)
|
tmpFile.Seek(0, 0)
|
||||||
|
|
|
@ -30,19 +30,19 @@ type unknownRule struct{}
|
||||||
|
|
||||||
func (u unknownRule) Name() string { return "" }
|
func (u unknownRule) Name() string { return "" }
|
||||||
func (u unknownRule) Labels() labels.Labels { return labels.EmptyLabels() }
|
func (u unknownRule) Labels() labels.Labels { return labels.EmptyLabels() }
|
||||||
func (u unknownRule) Eval(ctx context.Context, time time.Time, queryFunc QueryFunc, url *url.URL, i int) (promql.Vector, error) {
|
func (u unknownRule) Eval(context.Context, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
func (u unknownRule) String() string { return "" }
|
func (u unknownRule) String() string { return "" }
|
||||||
func (u unknownRule) Query() parser.Expr { return nil }
|
func (u unknownRule) Query() parser.Expr { return nil }
|
||||||
func (u unknownRule) SetLastError(err error) {}
|
func (u unknownRule) SetLastError(error) {}
|
||||||
func (u unknownRule) LastError() error { return nil }
|
func (u unknownRule) LastError() error { return nil }
|
||||||
func (u unknownRule) SetHealth(health RuleHealth) {}
|
func (u unknownRule) SetHealth(RuleHealth) {}
|
||||||
func (u unknownRule) Health() RuleHealth { return "" }
|
func (u unknownRule) Health() RuleHealth { return "" }
|
||||||
func (u unknownRule) SetEvaluationDuration(duration time.Duration) {}
|
func (u unknownRule) SetEvaluationDuration(time.Duration) {}
|
||||||
func (u unknownRule) GetEvaluationDuration() time.Duration { return 0 }
|
func (u unknownRule) GetEvaluationDuration() time.Duration { return 0 }
|
||||||
func (u unknownRule) SetEvaluationTimestamp(time time.Time) {}
|
func (u unknownRule) SetEvaluationTimestamp(time.Time) {}
|
||||||
func (u unknownRule) GetEvaluationTimestamp() time.Time { return time.Time{} }
|
func (u unknownRule) GetEvaluationTimestamp() time.Time { return time.Time{} }
|
||||||
|
|
||||||
func TestNewRuleDetailPanics(t *testing.T) {
|
func TestNewRuleDetailPanics(t *testing.T) {
|
||||||
require.PanicsWithValue(t, `unknown rule type "rules.unknownRule"`, func() {
|
require.PanicsWithValue(t, `unknown rule type "rules.unknownRule"`, func() {
|
||||||
|
|
|
@ -223,10 +223,10 @@ func TestRecordingRuleLimit(t *testing.T) {
|
||||||
evalTime := time.Unix(0, 0)
|
evalTime := time.Unix(0, 0)
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
_, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit)
|
switch _, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, test.limit); {
|
||||||
if err != nil {
|
case err != nil:
|
||||||
require.EqualError(t, err, test.err)
|
require.EqualError(t, err, test.err)
|
||||||
} else if test.err != "" {
|
case test.err != "":
|
||||||
t.Errorf("Expected error %s, got none", test.err)
|
t.Errorf("Expected error %s, got none", test.err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -288,10 +288,11 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error {
|
||||||
// Cleanup and reload pool if the configuration has changed.
|
// Cleanup and reload pool if the configuration has changed.
|
||||||
var failed bool
|
var failed bool
|
||||||
for name, sp := range m.scrapePools {
|
for name, sp := range m.scrapePools {
|
||||||
if cfg, ok := m.scrapeConfigs[name]; !ok {
|
switch cfg, ok := m.scrapeConfigs[name]; {
|
||||||
|
case !ok:
|
||||||
sp.stop()
|
sp.stop()
|
||||||
delete(m.scrapePools, name)
|
delete(m.scrapePools, name)
|
||||||
} else if !reflect.DeepEqual(sp.config, cfg) {
|
case !reflect.DeepEqual(sp.config, cfg):
|
||||||
err := sp.reload(cfg)
|
err := sp.reload(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name)
|
level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name)
|
||||||
|
|
|
@ -503,9 +503,10 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
|
||||||
// Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage.
|
// Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage.
|
||||||
nonEmpty := false
|
nonEmpty := false
|
||||||
t.LabelsRange(func(l labels.Label) { nonEmpty = true })
|
t.LabelsRange(func(l labels.Label) { nonEmpty = true })
|
||||||
if nonEmpty {
|
switch {
|
||||||
|
case nonEmpty:
|
||||||
all = append(all, t)
|
all = append(all, t)
|
||||||
} else if !t.discoveredLabels.IsEmpty() {
|
case !t.discoveredLabels.IsEmpty():
|
||||||
sp.droppedTargets = append(sp.droppedTargets, t)
|
sp.droppedTargets = append(sp.droppedTargets, t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -946,9 +947,10 @@ func (c *scrapeCache) iterDone(flushCache bool) {
|
||||||
count := len(c.series) + len(c.droppedSeries) + len(c.metadata)
|
count := len(c.series) + len(c.droppedSeries) + len(c.metadata)
|
||||||
c.metaMtx.Unlock()
|
c.metaMtx.Unlock()
|
||||||
|
|
||||||
if flushCache {
|
switch {
|
||||||
|
case flushCache:
|
||||||
c.successfulCount = count
|
c.successfulCount = count
|
||||||
} else if count > c.successfulCount*2+1000 {
|
case count > c.successfulCount*2+1000:
|
||||||
// If a target had varying labels in scrapes that ultimately failed,
|
// If a target had varying labels in scrapes that ultimately failed,
|
||||||
// the caches would grow indefinitely. Force a flush when this happens.
|
// the caches would grow indefinitely. Force a flush when this happens.
|
||||||
// We use the heuristic that this is a doubling of the cache size
|
// We use the heuristic that this is a doubling of the cache size
|
||||||
|
|
|
@ -724,9 +724,10 @@ func TestScrapeLoopStop(t *testing.T) {
|
||||||
// All samples in a scrape must have the same timestamp.
|
// All samples in a scrape must have the same timestamp.
|
||||||
var ts int64
|
var ts int64
|
||||||
for i, s := range appender.result {
|
for i, s := range appender.result {
|
||||||
if i%6 == 0 {
|
switch {
|
||||||
|
case i%6 == 0:
|
||||||
ts = s.t
|
ts = s.t
|
||||||
} else if s.t != ts {
|
case s.t != ts:
|
||||||
t.Fatalf("Unexpected multiple timestamps within single scrape")
|
t.Fatalf("Unexpected multiple timestamps within single scrape")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1139,10 +1140,11 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
|
||||||
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
||||||
numScrapes++
|
numScrapes++
|
||||||
|
|
||||||
if numScrapes == 1 {
|
switch numScrapes {
|
||||||
|
case 1:
|
||||||
w.Write([]byte("metric_a 42\n"))
|
w.Write([]byte("metric_a 42\n"))
|
||||||
return nil
|
return nil
|
||||||
} else if numScrapes == 5 {
|
case 5:
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
return errors.New("scrape failed")
|
return errors.New("scrape failed")
|
||||||
|
@ -1200,13 +1202,14 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
|
||||||
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
||||||
numScrapes++
|
numScrapes++
|
||||||
|
|
||||||
if numScrapes == 1 {
|
switch numScrapes {
|
||||||
|
case 1:
|
||||||
w.Write([]byte("metric_a 42\n"))
|
w.Write([]byte("metric_a 42\n"))
|
||||||
return nil
|
return nil
|
||||||
} else if numScrapes == 2 {
|
case 2:
|
||||||
w.Write([]byte("7&-\n"))
|
w.Write([]byte("7&-\n"))
|
||||||
return nil
|
return nil
|
||||||
} else if numScrapes == 3 {
|
case 3:
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
return errors.New("scrape failed")
|
return errors.New("scrape failed")
|
||||||
|
@ -1265,14 +1268,15 @@ func TestScrapeLoopCache(t *testing.T) {
|
||||||
numScrapes := 0
|
numScrapes := 0
|
||||||
|
|
||||||
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
||||||
if numScrapes == 1 || numScrapes == 2 {
|
switch numScrapes {
|
||||||
|
case 1, 2:
|
||||||
if _, ok := sl.cache.series["metric_a"]; !ok {
|
if _, ok := sl.cache.series["metric_a"]; !ok {
|
||||||
t.Errorf("metric_a missing from cache after scrape %d", numScrapes)
|
t.Errorf("metric_a missing from cache after scrape %d", numScrapes)
|
||||||
}
|
}
|
||||||
if _, ok := sl.cache.series["metric_b"]; !ok {
|
if _, ok := sl.cache.series["metric_b"]; !ok {
|
||||||
t.Errorf("metric_b missing from cache after scrape %d", numScrapes)
|
t.Errorf("metric_b missing from cache after scrape %d", numScrapes)
|
||||||
}
|
}
|
||||||
} else if numScrapes == 3 {
|
case 3:
|
||||||
if _, ok := sl.cache.series["metric_a"]; !ok {
|
if _, ok := sl.cache.series["metric_a"]; !ok {
|
||||||
t.Errorf("metric_a missing from cache after scrape %d", numScrapes)
|
t.Errorf("metric_a missing from cache after scrape %d", numScrapes)
|
||||||
}
|
}
|
||||||
|
@ -1283,13 +1287,14 @@ func TestScrapeLoopCache(t *testing.T) {
|
||||||
|
|
||||||
numScrapes++
|
numScrapes++
|
||||||
|
|
||||||
if numScrapes == 1 {
|
switch numScrapes {
|
||||||
|
case 1:
|
||||||
w.Write([]byte("metric_a 42\nmetric_b 43\n"))
|
w.Write([]byte("metric_a 42\nmetric_b 43\n"))
|
||||||
return nil
|
return nil
|
||||||
} else if numScrapes == 3 {
|
case 3:
|
||||||
w.Write([]byte("metric_a 44\n"))
|
w.Write([]byte("metric_a 44\n"))
|
||||||
return nil
|
return nil
|
||||||
} else if numScrapes == 4 {
|
case 4:
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
return fmt.Errorf("scrape failed")
|
return fmt.Errorf("scrape failed")
|
||||||
|
@ -2280,11 +2285,12 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
_, err := ts.scrape(ctx, io.Discard)
|
_, err := ts.scrape(ctx, io.Discard)
|
||||||
if err == nil {
|
switch {
|
||||||
|
case err == nil:
|
||||||
errc <- errors.New("Expected error but got nil")
|
errc <- errors.New("Expected error but got nil")
|
||||||
} else if ctx.Err() != context.Canceled {
|
case ctx.Err() != context.Canceled:
|
||||||
errc <- errors.Errorf("Expected context cancellation error but got: %s", ctx.Err())
|
errc <- errors.Errorf("Expected context cancellation error but got: %s", ctx.Err())
|
||||||
} else {
|
default:
|
||||||
close(errc)
|
close(errc)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -2405,7 +2411,7 @@ type testScraper struct {
|
||||||
scrapeFunc func(context.Context, io.Writer) error
|
scrapeFunc func(context.Context, io.Writer) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *testScraper) offset(interval time.Duration, jitterSeed uint64) time.Duration {
|
func (ts *testScraper) offset(time.Duration, uint64) time.Duration {
|
||||||
return ts.offsetDur
|
return ts.offsetDur
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2867,7 +2873,7 @@ func TestScrapeAddFast(t *testing.T) {
|
||||||
require.NoError(t, slApp.Commit())
|
require.NoError(t, slApp.Commit())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReuseCacheRace(t *testing.T) {
|
func TestReuseCacheRace(*testing.T) {
|
||||||
var (
|
var (
|
||||||
app = &nopAppendable{}
|
app = &nopAppendable{}
|
||||||
cfg = &config.ScrapeConfig{
|
cfg = &config.ScrapeConfig{
|
||||||
|
|
|
@ -134,7 +134,7 @@ func TestTargetURL(t *testing.T) {
|
||||||
require.Equal(t, expectedURL, target.URL())
|
require.Equal(t, expectedURL, target.URL())
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestTarget(targetURL string, deadline time.Duration, lbls labels.Labels) *Target {
|
func newTestTarget(targetURL string, _ time.Duration, lbls labels.Labels) *Target {
|
||||||
lb := labels.NewBuilder(lbls)
|
lb := labels.NewBuilder(lbls)
|
||||||
lb.Set(model.SchemeLabel, "http")
|
lb.Set(model.SchemeLabel, "http")
|
||||||
lb.Set(model.AddressLabel, strings.TrimPrefix(targetURL, "http://"))
|
lb.Set(model.AddressLabel, strings.TrimPrefix(targetURL, "http://"))
|
||||||
|
|
|
@ -188,8 +188,8 @@ func BenchmarkBufferedSeriesIterator(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
for it.Next() != chunkenc.ValNone {
|
for it.Next() != chunkenc.ValNone { // nolint:revive
|
||||||
// scan everything
|
// Scan everything.
|
||||||
}
|
}
|
||||||
require.NoError(b, it.Err())
|
require.NoError(b, it.Err())
|
||||||
}
|
}
|
||||||
|
|
|
@ -222,9 +222,10 @@ func (f *fanoutAppender) Rollback() (err error) {
|
||||||
|
|
||||||
for _, appender := range f.secondaries {
|
for _, appender := range f.secondaries {
|
||||||
rollbackErr := appender.Rollback()
|
rollbackErr := appender.Rollback()
|
||||||
if err == nil {
|
switch {
|
||||||
|
case err == nil:
|
||||||
err = rollbackErr
|
err = rollbackErr
|
||||||
} else if rollbackErr != nil {
|
case rollbackErr != nil:
|
||||||
level.Error(f.logger).Log("msg", "Squashed rollback error on rollback", "err", rollbackErr)
|
level.Error(f.logger).Log("msg", "Squashed rollback error on rollback", "err", rollbackErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -233,7 +233,7 @@ func (errQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) storage
|
||||||
return storage.ErrSeriesSet(errSelect)
|
return storage.ErrSeriesSet(errSelect)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (errQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) {
|
func (errQuerier) LabelValues(string, ...*labels.Matcher) ([]string, storage.Warnings, error) {
|
||||||
return nil, nil, errors.New("label values error")
|
return nil, nil, errors.New("label values error")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -99,7 +99,7 @@ type MockQueryable struct {
|
||||||
MockQuerier Querier
|
MockQuerier Querier
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *MockQueryable) Querier(ctx context.Context, mint, maxt int64) (Querier, error) {
|
func (q *MockQueryable) Querier(context.Context, int64, int64) (Querier, error) {
|
||||||
return q.MockQuerier, nil
|
return q.MockQuerier, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,11 +118,11 @@ type MockQuerier struct {
|
||||||
SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
|
SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *MockQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, Warnings, error) {
|
func (q *MockQuerier) LabelValues(string, ...*labels.Matcher) ([]string, Warnings, error) {
|
||||||
return nil, nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *MockQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, Warnings, error) {
|
func (q *MockQuerier) LabelNames(...*labels.Matcher) ([]string, Warnings, error) {
|
||||||
return nil, nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -82,8 +82,8 @@ func BenchmarkMemoizedSeriesIterator(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
for it.Next() != chunkenc.ValNone {
|
for it.Next() != chunkenc.ValNone { // nolint:revive
|
||||||
// scan everything
|
// Scan everything.
|
||||||
}
|
}
|
||||||
require.NoError(b, it.Err())
|
require.NoError(b, it.Err())
|
||||||
}
|
}
|
||||||
|
|
|
@ -197,13 +197,14 @@ func mergeStrings(a, b []string) []string {
|
||||||
res := make([]string, 0, maxl*10/9)
|
res := make([]string, 0, maxl*10/9)
|
||||||
|
|
||||||
for len(a) > 0 && len(b) > 0 {
|
for len(a) > 0 && len(b) > 0 {
|
||||||
if a[0] == b[0] {
|
switch {
|
||||||
|
case a[0] == b[0]:
|
||||||
res = append(res, a[0])
|
res = append(res, a[0])
|
||||||
a, b = a[1:], b[1:]
|
a, b = a[1:], b[1:]
|
||||||
} else if a[0] < b[0] {
|
case a[0] < b[0]:
|
||||||
res = append(res, a[0])
|
res = append(res, a[0])
|
||||||
a = a[1:]
|
a = a[1:]
|
||||||
} else {
|
default:
|
||||||
res = append(res, b[0])
|
res = append(res, b[0])
|
||||||
b = b[1:]
|
b = b[1:]
|
||||||
}
|
}
|
||||||
|
@ -722,12 +723,11 @@ func (c *compactChunkIterator) Next() bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if next.MinTime == prev.MinTime &&
|
// Only do something if it is not a perfect duplicate.
|
||||||
next.MaxTime == prev.MaxTime &&
|
if next.MinTime != prev.MinTime ||
|
||||||
bytes.Equal(next.Chunk.Bytes(), prev.Chunk.Bytes()) {
|
next.MaxTime != prev.MaxTime ||
|
||||||
// 1:1 duplicates, skip it.
|
!bytes.Equal(next.Chunk.Bytes(), prev.Chunk.Bytes()) {
|
||||||
} else {
|
// We operate on same series, so labels do not matter here.
|
||||||
// We operate on same series, so labels does not matter here.
|
|
||||||
overlapping = append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), next))
|
overlapping = append(overlapping, newChunkToSeriesDecoder(labels.EmptyLabels(), next))
|
||||||
if next.MaxTime > oMaxTime {
|
if next.MaxTime > oMaxTime {
|
||||||
oMaxTime = next.MaxTime
|
oMaxTime = next.MaxTime
|
||||||
|
|
|
@ -291,13 +291,14 @@ func MergeLabels(primary, secondary []prompb.Label) []prompb.Label {
|
||||||
result := make([]prompb.Label, 0, len(primary)+len(secondary))
|
result := make([]prompb.Label, 0, len(primary)+len(secondary))
|
||||||
i, j := 0, 0
|
i, j := 0, 0
|
||||||
for i < len(primary) && j < len(secondary) {
|
for i < len(primary) && j < len(secondary) {
|
||||||
if primary[i].Name < secondary[j].Name {
|
switch {
|
||||||
|
case primary[i].Name < secondary[j].Name:
|
||||||
result = append(result, primary[i])
|
result = append(result, primary[i])
|
||||||
i++
|
i++
|
||||||
} else if primary[i].Name > secondary[j].Name {
|
case primary[i].Name > secondary[j].Name:
|
||||||
result = append(result, secondary[j])
|
result = append(result, secondary[j])
|
||||||
j++
|
j++
|
||||||
} else {
|
default:
|
||||||
result = append(result, primary[i])
|
result = append(result, primary[i])
|
||||||
i++
|
i++
|
||||||
j++
|
j++
|
||||||
|
@ -429,7 +430,8 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||||
return c.series.histograms[n+c.histogramsCur].Timestamp >= t
|
return c.series.histograms[n+c.histogramsCur].Timestamp >= t
|
||||||
})
|
})
|
||||||
|
|
||||||
if c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms) {
|
switch {
|
||||||
|
case c.floatsCur < len(c.series.floats) && c.histogramsCur < len(c.series.histograms):
|
||||||
// If float samples and histogram samples have overlapping timestamps prefer the float samples.
|
// If float samples and histogram samples have overlapping timestamps prefer the float samples.
|
||||||
if c.series.floats[c.floatsCur].Timestamp <= c.series.histograms[c.histogramsCur].Timestamp {
|
if c.series.floats[c.floatsCur].Timestamp <= c.series.histograms[c.histogramsCur].Timestamp {
|
||||||
c.curValType = chunkenc.ValFloat
|
c.curValType = chunkenc.ValFloat
|
||||||
|
@ -445,9 +447,9 @@ func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||||
c.floatsCur--
|
c.floatsCur--
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if c.floatsCur < len(c.series.floats) {
|
case c.floatsCur < len(c.series.floats):
|
||||||
c.curValType = chunkenc.ValFloat
|
c.curValType = chunkenc.ValFloat
|
||||||
} else if c.histogramsCur < len(c.series.histograms) {
|
case c.histogramsCur < len(c.series.histograms):
|
||||||
c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur])
|
c.curValType = getHistogramValType(&c.series.histograms[c.histogramsCur])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -515,18 +517,19 @@ func (c *concreteSeriesIterator) Next() chunkenc.ValueType {
|
||||||
}
|
}
|
||||||
c.curValType = chunkenc.ValNone
|
c.curValType = chunkenc.ValNone
|
||||||
|
|
||||||
if peekFloatTS < peekHistTS {
|
switch {
|
||||||
|
case peekFloatTS < peekHistTS:
|
||||||
c.floatsCur++
|
c.floatsCur++
|
||||||
c.curValType = chunkenc.ValFloat
|
c.curValType = chunkenc.ValFloat
|
||||||
} else if peekHistTS < peekFloatTS {
|
case peekHistTS < peekFloatTS:
|
||||||
c.histogramsCur++
|
c.histogramsCur++
|
||||||
c.curValType = chunkenc.ValHistogram
|
c.curValType = chunkenc.ValHistogram
|
||||||
} else if peekFloatTS == noTS && peekHistTS == noTS {
|
case peekFloatTS == noTS && peekHistTS == noTS:
|
||||||
// This only happens when the iterator is exhausted; we set the cursors off the end to prevent
|
// This only happens when the iterator is exhausted; we set the cursors off the end to prevent
|
||||||
// Seek() from returning anything afterwards.
|
// Seek() from returning anything afterwards.
|
||||||
c.floatsCur = len(c.series.floats)
|
c.floatsCur = len(c.series.floats)
|
||||||
c.histogramsCur = len(c.series.histograms)
|
c.histogramsCur = len(c.series.histograms)
|
||||||
} else {
|
default:
|
||||||
// Prefer float samples to histogram samples if there's a conflict. We advance the cursor for histograms
|
// Prefer float samples to histogram samples if there's a conflict. We advance the cursor for histograms
|
||||||
// anyway otherwise the histogram sample will get selected on the next call to Next().
|
// anyway otherwise the histogram sample will get selected on the next call to Next().
|
||||||
c.floatsCur++
|
c.floatsCur++
|
||||||
|
|
|
@ -524,7 +524,7 @@ func TestDecodeWriteRequest(t *testing.T) {
|
||||||
require.Equal(t, writeRequestFixture, actual)
|
require.Equal(t, writeRequestFixture, actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNilHistogramProto(t *testing.T) {
|
func TestNilHistogramProto(*testing.T) {
|
||||||
// This function will panic if it impromperly handles nil
|
// This function will panic if it impromperly handles nil
|
||||||
// values, causing the test to fail.
|
// values, causing the test to fail.
|
||||||
HistogramProtoToHistogram(prompb.Histogram{})
|
HistogramProtoToHistogram(prompb.Histogram{})
|
||||||
|
|
|
@ -55,9 +55,10 @@ func (r *ewmaRate) tick() {
|
||||||
r.mutex.Lock()
|
r.mutex.Lock()
|
||||||
defer r.mutex.Unlock()
|
defer r.mutex.Unlock()
|
||||||
|
|
||||||
if r.init {
|
switch {
|
||||||
|
case r.init:
|
||||||
r.lastRate += r.alpha * (instantRate - r.lastRate)
|
r.lastRate += r.alpha * (instantRate - r.lastRate)
|
||||||
} else if newEvents > 0 {
|
case newEvents > 0:
|
||||||
r.init = true
|
r.init = true
|
||||||
r.lastRate = instantRate
|
r.lastRate = instantRate
|
||||||
}
|
}
|
||||||
|
|
|
@ -1030,9 +1030,10 @@ func (t *QueueManager) calculateDesiredShards() int {
|
||||||
return t.numShards
|
return t.numShards
|
||||||
}
|
}
|
||||||
|
|
||||||
if numShards > t.cfg.MaxShards {
|
switch {
|
||||||
|
case numShards > t.cfg.MaxShards:
|
||||||
numShards = t.cfg.MaxShards
|
numShards = t.cfg.MaxShards
|
||||||
} else if numShards < t.cfg.MinShards {
|
case numShards < t.cfg.MinShards:
|
||||||
numShards = t.cfg.MinShards
|
numShards = t.cfg.MinShards
|
||||||
}
|
}
|
||||||
return numShards
|
return numShards
|
||||||
|
@ -1575,10 +1576,11 @@ func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l
|
||||||
}
|
}
|
||||||
|
|
||||||
sleepDuration = backoff
|
sleepDuration = backoff
|
||||||
if backoffErr.retryAfter > 0 {
|
switch {
|
||||||
|
case backoffErr.retryAfter > 0:
|
||||||
sleepDuration = backoffErr.retryAfter
|
sleepDuration = backoffErr.retryAfter
|
||||||
level.Info(l).Log("msg", "Retrying after duration specified by Retry-After header", "duration", sleepDuration)
|
level.Info(l).Log("msg", "Retrying after duration specified by Retry-After header", "duration", sleepDuration)
|
||||||
} else if backoffErr.retryAfter < 0 {
|
case backoffErr.retryAfter < 0:
|
||||||
level.Debug(l).Log("msg", "retry-after cannot be in past, retrying using default backoff mechanism")
|
level.Debug(l).Log("msg", "retry-after cannot be in past, retrying using default backoff mechanism")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -362,7 +362,7 @@ func TestReshard(t *testing.T) {
|
||||||
c.waitForExpectedData(t)
|
c.waitForExpectedData(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReshardRaceWithStop(t *testing.T) {
|
func TestReshardRaceWithStop(*testing.T) {
|
||||||
c := NewTestWriteClient()
|
c := NewTestWriteClient()
|
||||||
var m *QueueManager
|
var m *QueueManager
|
||||||
h := sync.Mutex{}
|
h := sync.Mutex{}
|
||||||
|
@ -864,10 +864,10 @@ func (c *TestBlockingWriteClient) Endpoint() string {
|
||||||
// For benchmarking the send and not the receive side.
|
// For benchmarking the send and not the receive side.
|
||||||
type NopWriteClient struct{}
|
type NopWriteClient struct{}
|
||||||
|
|
||||||
func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} }
|
func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} }
|
||||||
func (c *NopWriteClient) Store(_ context.Context, req []byte) error { return nil }
|
func (c *NopWriteClient) Store(context.Context, []byte) error { return nil }
|
||||||
func (c *NopWriteClient) Name() string { return "nopwriteclient" }
|
func (c *NopWriteClient) Name() string { return "nopwriteclient" }
|
||||||
func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" }
|
func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" }
|
||||||
|
|
||||||
func BenchmarkSampleSend(b *testing.B) {
|
func BenchmarkSampleSend(b *testing.B) {
|
||||||
// Send one sample per series, which is the typical remote_write case
|
// Send one sample per series, which is the typical remote_write case
|
||||||
|
|
|
@ -294,7 +294,7 @@ func (m *mockAppendable) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockAppendable) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||||
if t < m.latestHistogram {
|
if t < m.latestHistogram {
|
||||||
return 0, storage.ErrOutOfOrderSample
|
return 0, storage.ErrOutOfOrderSample
|
||||||
}
|
}
|
||||||
|
|
|
@ -732,22 +732,22 @@ func (db *DB) StartTime() (int64, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Querier implements the Storage interface.
|
// Querier implements the Storage interface.
|
||||||
func (db *DB) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
|
func (db *DB) Querier(context.Context, int64, int64) (storage.Querier, error) {
|
||||||
return nil, ErrUnsupported
|
return nil, ErrUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChunkQuerier implements the Storage interface.
|
// ChunkQuerier implements the Storage interface.
|
||||||
func (db *DB) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) {
|
func (db *DB) ChunkQuerier(context.Context, int64, int64) (storage.ChunkQuerier, error) {
|
||||||
return nil, ErrUnsupported
|
return nil, ErrUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExemplarQuerier implements the Storage interface.
|
// ExemplarQuerier implements the Storage interface.
|
||||||
func (db *DB) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) {
|
func (db *DB) ExemplarQuerier(context.Context) (storage.ExemplarQuerier, error) {
|
||||||
return nil, ErrUnsupported
|
return nil, ErrUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
// Appender implements storage.Storage.
|
// Appender implements storage.Storage.
|
||||||
func (db *DB) Appender(_ context.Context) storage.Appender {
|
func (db *DB) Appender(context.Context) storage.Appender {
|
||||||
return db.appenderPool.Get().(storage.Appender)
|
return db.appenderPool.Get().(storage.Appender)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -823,7 +823,7 @@ func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v flo
|
||||||
return 0, storage.ErrOutOfOrderSample
|
return 0, storage.ErrOutOfOrderSample
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: always modify pendingSamples and sampleSeries together
|
// NOTE: always modify pendingSamples and sampleSeries together.
|
||||||
a.pendingSamples = append(a.pendingSamples, record.RefSample{
|
a.pendingSamples = append(a.pendingSamples, record.RefSample{
|
||||||
Ref: series.ref,
|
Ref: series.ref,
|
||||||
T: t,
|
T: t,
|
||||||
|
@ -849,8 +849,8 @@ func (a *appender) getOrCreate(l labels.Labels) (series *memSeries, created bool
|
||||||
return series, true
|
return series, true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *appender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
|
func (a *appender) AppendExemplar(ref storage.SeriesRef, _ labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
|
||||||
// series references and chunk references are identical for agent mode.
|
// Series references and chunk references are identical for agent mode.
|
||||||
headRef := chunks.HeadSeriesRef(ref)
|
headRef := chunks.HeadSeriesRef(ref)
|
||||||
|
|
||||||
s := a.series.GetByID(headRef)
|
s := a.series.GetByID(headRef)
|
||||||
|
@ -951,7 +951,8 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int
|
||||||
return 0, storage.ErrOutOfOrderSample
|
return 0, storage.ErrOutOfOrderSample
|
||||||
}
|
}
|
||||||
|
|
||||||
if h != nil {
|
switch {
|
||||||
|
case h != nil:
|
||||||
// NOTE: always modify pendingHistograms and histogramSeries together
|
// NOTE: always modify pendingHistograms and histogramSeries together
|
||||||
a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{
|
a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{
|
||||||
Ref: series.ref,
|
Ref: series.ref,
|
||||||
|
@ -959,7 +960,7 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int
|
||||||
H: h,
|
H: h,
|
||||||
})
|
})
|
||||||
a.histogramSeries = append(a.histogramSeries, series)
|
a.histogramSeries = append(a.histogramSeries, series)
|
||||||
} else if fh != nil {
|
case fh != nil:
|
||||||
// NOTE: always modify pendingFloatHistograms and floatHistogramSeries together
|
// NOTE: always modify pendingFloatHistograms and floatHistogramSeries together
|
||||||
a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{
|
a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{
|
||||||
Ref: series.ref,
|
Ref: series.ref,
|
||||||
|
@ -973,7 +974,7 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int
|
||||||
return storage.SeriesRef(series.ref), nil
|
return storage.SeriesRef(series.ref), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
func (a *appender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) {
|
||||||
// TODO: Wire metadata in the Agent's appender.
|
// TODO: Wire metadata in the Agent's appender.
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -107,7 +107,7 @@ func (c *FloatHistogramChunk) Appender() (Appender, error) {
|
||||||
// To get an appender, we must know the state it would have if we had
|
// To get an appender, we must know the state it would have if we had
|
||||||
// appended all existing data from scratch. We iterate through the end
|
// appended all existing data from scratch. We iterate through the end
|
||||||
// and populate via the iterator's state.
|
// and populate via the iterator's state.
|
||||||
for it.Next() == ValFloatHistogram {
|
for it.Next() == ValFloatHistogram { // nolint:revive
|
||||||
}
|
}
|
||||||
if err := it.Err(); err != nil {
|
if err := it.Err(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -111,7 +111,7 @@ func TestFloatHistogramChunkSameBuckets(t *testing.T) {
|
||||||
|
|
||||||
// 3. Now recycle an iterator that was never used to access anything.
|
// 3. Now recycle an iterator that was never used to access anything.
|
||||||
itX := c.Iterator(nil)
|
itX := c.Iterator(nil)
|
||||||
for itX.Next() == ValFloatHistogram {
|
for itX.Next() == ValFloatHistogram { // nolint:revive
|
||||||
// Just iterate through without accessing anything.
|
// Just iterate through without accessing anything.
|
||||||
}
|
}
|
||||||
it3 := c.iterator(itX)
|
it3 := c.iterator(itX)
|
||||||
|
|
|
@ -126,7 +126,7 @@ func (c *HistogramChunk) Appender() (Appender, error) {
|
||||||
// To get an appender, we must know the state it would have if we had
|
// To get an appender, we must know the state it would have if we had
|
||||||
// appended all existing data from scratch. We iterate through the end
|
// appended all existing data from scratch. We iterate through the end
|
||||||
// and populate via the iterator's state.
|
// and populate via the iterator's state.
|
||||||
for it.Next() == ValHistogram {
|
for it.Next() == ValHistogram { // nolint:revive
|
||||||
}
|
}
|
||||||
if err := it.Err(); err != nil {
|
if err := it.Err(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -116,7 +116,7 @@ func TestHistogramChunkSameBuckets(t *testing.T) {
|
||||||
|
|
||||||
// 3. Now recycle an iterator that was never used to access anything.
|
// 3. Now recycle an iterator that was never used to access anything.
|
||||||
itX := c.Iterator(nil)
|
itX := c.Iterator(nil)
|
||||||
for itX.Next() == ValHistogram {
|
for itX.Next() == ValHistogram { // nolint:revive
|
||||||
// Just iterate through without accessing anything.
|
// Just iterate through without accessing anything.
|
||||||
}
|
}
|
||||||
it3 := c.iterator(itX)
|
it3 := c.iterator(itX)
|
||||||
|
|
|
@ -99,7 +99,7 @@ func (c *XORChunk) Appender() (Appender, error) {
|
||||||
// To get an appender we must know the state it would have if we had
|
// To get an appender we must know the state it would have if we had
|
||||||
// appended all existing data from scratch.
|
// appended all existing data from scratch.
|
||||||
// We iterate through the end and populate via the iterator's state.
|
// We iterate through the end and populate via the iterator's state.
|
||||||
for it.Next() != ValNone {
|
for it.Next() != ValNone { // nolint:revive
|
||||||
}
|
}
|
||||||
if err := it.Err(); err != nil {
|
if err := it.Err(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -152,11 +152,11 @@ type xorAppender struct {
|
||||||
trailing uint8
|
trailing uint8
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *xorAppender) AppendHistogram(t int64, h *histogram.Histogram) {
|
func (a *xorAppender) AppendHistogram(int64, *histogram.Histogram) {
|
||||||
panic("appended a histogram to an xor chunk")
|
panic("appended a histogram to an xor chunk")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *xorAppender) AppendFloatHistogram(t int64, h *histogram.FloatHistogram) {
|
func (a *xorAppender) AppendFloatHistogram(int64, *histogram.FloatHistogram) {
|
||||||
panic("appended a float histogram to an xor chunk")
|
panic("appended a float histogram to an xor chunk")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,14 +164,15 @@ func (a *xorAppender) Append(t int64, v float64) {
|
||||||
var tDelta uint64
|
var tDelta uint64
|
||||||
num := binary.BigEndian.Uint16(a.b.bytes())
|
num := binary.BigEndian.Uint16(a.b.bytes())
|
||||||
|
|
||||||
if num == 0 {
|
switch num {
|
||||||
|
case 0:
|
||||||
buf := make([]byte, binary.MaxVarintLen64)
|
buf := make([]byte, binary.MaxVarintLen64)
|
||||||
for _, b := range buf[:binary.PutVarint(buf, t)] {
|
for _, b := range buf[:binary.PutVarint(buf, t)] {
|
||||||
a.b.writeByte(b)
|
a.b.writeByte(b)
|
||||||
}
|
}
|
||||||
a.b.writeBits(math.Float64bits(v), 64)
|
a.b.writeBits(math.Float64bits(v), 64)
|
||||||
|
|
||||||
} else if num == 1 {
|
case 1:
|
||||||
tDelta = uint64(t - a.t)
|
tDelta = uint64(t - a.t)
|
||||||
|
|
||||||
buf := make([]byte, binary.MaxVarintLen64)
|
buf := make([]byte, binary.MaxVarintLen64)
|
||||||
|
@ -181,7 +182,7 @@ func (a *xorAppender) Append(t int64, v float64) {
|
||||||
|
|
||||||
a.writeVDelta(v)
|
a.writeVDelta(v)
|
||||||
|
|
||||||
} else {
|
default:
|
||||||
tDelta = uint64(t - a.t)
|
tDelta = uint64(t - a.t)
|
||||||
dod := int64(tDelta - a.tDelta)
|
dod := int64(tDelta - a.tDelta)
|
||||||
|
|
||||||
|
|
|
@ -999,9 +999,10 @@ func (cdm *ChunkDiskMapper) DeleteCorrupted(originalErr error) error {
|
||||||
cdm.readPathMtx.RLock()
|
cdm.readPathMtx.RLock()
|
||||||
lastSeq := 0
|
lastSeq := 0
|
||||||
for seg := range cdm.mmappedChunkFiles {
|
for seg := range cdm.mmappedChunkFiles {
|
||||||
if seg >= cerr.FileIndex {
|
switch {
|
||||||
|
case seg >= cerr.FileIndex:
|
||||||
segs = append(segs, seg)
|
segs = append(segs, seg)
|
||||||
} else if seg > lastSeq {
|
case seg > lastSeq:
|
||||||
lastSeq = seg
|
lastSeq = seg
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -503,10 +503,10 @@ func createChunkDiskMapper(t *testing.T, dir string) *ChunkDiskMapper {
|
||||||
|
|
||||||
func randomChunk(t *testing.T) chunkenc.Chunk {
|
func randomChunk(t *testing.T) chunkenc.Chunk {
|
||||||
chunk := chunkenc.NewXORChunk()
|
chunk := chunkenc.NewXORChunk()
|
||||||
len := rand.Int() % 120
|
length := rand.Int() % 120
|
||||||
app, err := chunk.Appender()
|
app, err := chunk.Appender()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for i := 0; i < len; i++ {
|
for i := 0; i < length; i++ {
|
||||||
app.Append(rand.Int63(), rand.Float64())
|
app.Append(rand.Int63(), rand.Float64())
|
||||||
}
|
}
|
||||||
return chunk
|
return chunk
|
||||||
|
|
|
@ -467,8 +467,8 @@ func (erringBReader) Size() int64 { return 0 }
|
||||||
|
|
||||||
type nopChunkWriter struct{}
|
type nopChunkWriter struct{}
|
||||||
|
|
||||||
func (nopChunkWriter) WriteChunks(chunks ...chunks.Meta) error { return nil }
|
func (nopChunkWriter) WriteChunks(...chunks.Meta) error { return nil }
|
||||||
func (nopChunkWriter) Close() error { return nil }
|
func (nopChunkWriter) Close() error { return nil }
|
||||||
|
|
||||||
func samplesForRange(minTime, maxTime int64, maxSamplesPerChunk int) (ret [][]sample) {
|
func samplesForRange(minTime, maxTime int64, maxSamplesPerChunk int) (ret [][]sample) {
|
||||||
var curr []sample
|
var curr []sample
|
||||||
|
|
10
tsdb/db.go
10
tsdb/db.go
|
@ -963,10 +963,11 @@ func (db *DB) ApplyConfig(conf *config.Config) error {
|
||||||
// Create WBL if it was not present and if OOO is enabled with WAL enabled.
|
// Create WBL if it was not present and if OOO is enabled with WAL enabled.
|
||||||
var wblog *wlog.WL
|
var wblog *wlog.WL
|
||||||
var err error
|
var err error
|
||||||
if db.head.wbl != nil {
|
switch {
|
||||||
|
case db.head.wbl != nil:
|
||||||
// The existing WBL from the disk might have been replayed while OOO was disabled.
|
// The existing WBL from the disk might have been replayed while OOO was disabled.
|
||||||
wblog = db.head.wbl
|
wblog = db.head.wbl
|
||||||
} else if !db.oooWasEnabled.Load() && oooTimeWindow > 0 && db.opts.WALSegmentSize >= 0 {
|
case !db.oooWasEnabled.Load() && oooTimeWindow > 0 && db.opts.WALSegmentSize >= 0:
|
||||||
segmentSize := wlog.DefaultSegmentSize
|
segmentSize := wlog.DefaultSegmentSize
|
||||||
// Wal is set to a custom size.
|
// Wal is set to a custom size.
|
||||||
if db.opts.WALSegmentSize > 0 {
|
if db.opts.WALSegmentSize > 0 {
|
||||||
|
@ -1532,10 +1533,11 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
toDelete := filepath.Join(db.dir, ulid.String())
|
toDelete := filepath.Join(db.dir, ulid.String())
|
||||||
if _, err := os.Stat(toDelete); os.IsNotExist(err) {
|
switch _, err := os.Stat(toDelete); {
|
||||||
|
case os.IsNotExist(err):
|
||||||
// Noop.
|
// Noop.
|
||||||
continue
|
continue
|
||||||
} else if err != nil {
|
case err != nil:
|
||||||
return errors.Wrapf(err, "stat dir %v", toDelete)
|
return errors.Wrapf(err, "stat dir %v", toDelete)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1426,11 +1426,11 @@ type mockCompactorFailing struct {
|
||||||
max int
|
max int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*mockCompactorFailing) Plan(dir string) ([]string, error) {
|
func (*mockCompactorFailing) Plan(string) ([]string, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *mockCompactorFailing) Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error) {
|
func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ *BlockMeta) (ulid.ULID, error) {
|
||||||
if len(c.blocks) >= c.max {
|
if len(c.blocks) >= c.max {
|
||||||
return ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail")
|
return ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail")
|
||||||
}
|
}
|
||||||
|
@ -1458,7 +1458,7 @@ func (*mockCompactorFailing) Compact(string, []string, []*Block) (ulid.ULID, err
|
||||||
return ulid.ULID{}, nil
|
return ulid.ULID{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*mockCompactorFailing) CompactOOO(dest string, oooHead *OOOCompactionHead) (result []ulid.ULID, err error) {
|
func (*mockCompactorFailing) CompactOOO(string, *OOOCompactionHead) (result []ulid.ULID, err error) {
|
||||||
return nil, fmt.Errorf("mock compaction failing CompactOOO")
|
return nil, fmt.Errorf("mock compaction failing CompactOOO")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -115,17 +115,17 @@ func NewExemplarMetrics(reg prometheus.Registerer) *ExemplarMetrics {
|
||||||
// 1GB of extra memory, accounting for the fact that this is heap allocated space.
|
// 1GB of extra memory, accounting for the fact that this is heap allocated space.
|
||||||
// If len <= 0, then the exemplar storage is essentially a noop storage but can later be
|
// If len <= 0, then the exemplar storage is essentially a noop storage but can later be
|
||||||
// resized to store exemplars.
|
// resized to store exemplars.
|
||||||
func NewCircularExemplarStorage(len int64, m *ExemplarMetrics) (ExemplarStorage, error) {
|
func NewCircularExemplarStorage(length int64, m *ExemplarMetrics) (ExemplarStorage, error) {
|
||||||
if len < 0 {
|
if length < 0 {
|
||||||
len = 0
|
length = 0
|
||||||
}
|
}
|
||||||
c := &CircularExemplarStorage{
|
c := &CircularExemplarStorage{
|
||||||
exemplars: make([]*circularBufferEntry, len),
|
exemplars: make([]*circularBufferEntry, length),
|
||||||
index: make(map[string]*indexEntry, len/estimatedExemplarsPerSeries),
|
index: make(map[string]*indexEntry, length/estimatedExemplarsPerSeries),
|
||||||
metrics: m,
|
metrics: m,
|
||||||
}
|
}
|
||||||
|
|
||||||
c.metrics.maxExemplars.Set(float64(len))
|
c.metrics.maxExemplars.Set(float64(length))
|
||||||
|
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,4 +24,4 @@ import (
|
||||||
//
|
//
|
||||||
// The blank import above is actually what invokes the test of this package. If
|
// The blank import above is actually what invokes the test of this package. If
|
||||||
// the import succeeds (the code compiles), the test passed.
|
// the import succeeds (the code compiles), the test passed.
|
||||||
func Test(t *testing.T) {}
|
func Test(*testing.T) {}
|
||||||
|
|
|
@ -344,9 +344,10 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
|
||||||
}
|
}
|
||||||
|
|
||||||
if value.IsStaleNaN(v) {
|
if value.IsStaleNaN(v) {
|
||||||
if s.lastHistogramValue != nil {
|
switch {
|
||||||
|
case s.lastHistogramValue != nil:
|
||||||
return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil)
|
return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v}, nil)
|
||||||
} else if s.lastFloatHistogramValue != nil {
|
case s.lastFloatHistogramValue != nil:
|
||||||
return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v})
|
return a.AppendHistogram(ref, lset, t, nil, &histogram.FloatHistogram{Sum: v})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -552,9 +553,10 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if created {
|
if created {
|
||||||
if h != nil {
|
switch {
|
||||||
|
case h != nil:
|
||||||
s.lastHistogramValue = &histogram.Histogram{}
|
s.lastHistogramValue = &histogram.Histogram{}
|
||||||
} else if fh != nil {
|
case fh != nil:
|
||||||
s.lastFloatHistogramValue = &histogram.FloatHistogram{}
|
s.lastFloatHistogramValue = &histogram.FloatHistogram{}
|
||||||
}
|
}
|
||||||
a.series = append(a.series, record.RefSeries{
|
a.series = append(a.series, record.RefSeries{
|
||||||
|
@ -564,7 +566,8 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if h != nil {
|
switch {
|
||||||
|
case h != nil:
|
||||||
s.Lock()
|
s.Lock()
|
||||||
if err := s.appendableHistogram(t, h); err != nil {
|
if err := s.appendableHistogram(t, h); err != nil {
|
||||||
s.Unlock()
|
s.Unlock()
|
||||||
|
@ -581,7 +584,7 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
|
||||||
H: h,
|
H: h,
|
||||||
})
|
})
|
||||||
a.histogramSeries = append(a.histogramSeries, s)
|
a.histogramSeries = append(a.histogramSeries, s)
|
||||||
} else if fh != nil {
|
case fh != nil:
|
||||||
s.Lock()
|
s.Lock()
|
||||||
if err := s.appendableFloatHistogram(t, fh); err != nil {
|
if err := s.appendableFloatHistogram(t, fh); err != nil {
|
||||||
s.Unlock()
|
s.Unlock()
|
||||||
|
@ -938,7 +941,10 @@ func (a *headAppender) Commit() (err error) {
|
||||||
|
|
||||||
var ok, chunkCreated bool
|
var ok, chunkCreated bool
|
||||||
|
|
||||||
if err == nil && oooSample {
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
// Do nothing here.
|
||||||
|
case oooSample:
|
||||||
// Sample is OOO and OOO handling is enabled
|
// Sample is OOO and OOO handling is enabled
|
||||||
// and the delta is within the OOO tolerance.
|
// and the delta is within the OOO tolerance.
|
||||||
var mmapRef chunks.ChunkDiskMapperRef
|
var mmapRef chunks.ChunkDiskMapperRef
|
||||||
|
@ -976,7 +982,7 @@ func (a *headAppender) Commit() (err error) {
|
||||||
// TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305.
|
// TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305.
|
||||||
samplesAppended--
|
samplesAppended--
|
||||||
}
|
}
|
||||||
} else if err == nil {
|
default:
|
||||||
ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange)
|
ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange)
|
||||||
if ok {
|
if ok {
|
||||||
if s.T < inOrderMint {
|
if s.T < inOrderMint {
|
||||||
|
@ -1177,14 +1183,15 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
|
||||||
app.RecodeHistogram(h, pBackwardInserts, nBackwardInserts)
|
app.RecodeHistogram(h, pBackwardInserts, nBackwardInserts)
|
||||||
}
|
}
|
||||||
// We have 3 cases here
|
// We have 3 cases here
|
||||||
// - !okToAppend -> We need to cut a new chunk.
|
// - !okToAppend or counterReset -> We need to cut a new chunk.
|
||||||
// - okToAppend but we have inserts → Existing chunk needs
|
// - okToAppend but we have inserts → Existing chunk needs
|
||||||
// recoding before we can append our histogram.
|
// recoding before we can append our histogram.
|
||||||
// - okToAppend and no inserts → Chunk is ready to support our histogram.
|
// - okToAppend and no inserts → Chunk is ready to support our histogram.
|
||||||
if !okToAppend || counterReset {
|
switch {
|
||||||
|
case !okToAppend || counterReset:
|
||||||
c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange)
|
c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange)
|
||||||
chunkCreated = true
|
chunkCreated = true
|
||||||
} else if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
|
case len(pForwardInserts) > 0 || len(nForwardInserts) > 0:
|
||||||
// New buckets have appeared. We need to recode all
|
// New buckets have appeared. We need to recode all
|
||||||
// prior histogram samples within the chunk before we
|
// prior histogram samples within the chunk before we
|
||||||
// can process this one.
|
// can process this one.
|
||||||
|
@ -1270,14 +1277,15 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram,
|
||||||
app.RecodeHistogramm(fh, pBackwardInserts, nBackwardInserts)
|
app.RecodeHistogramm(fh, pBackwardInserts, nBackwardInserts)
|
||||||
}
|
}
|
||||||
// We have 3 cases here
|
// We have 3 cases here
|
||||||
// - !okToAppend -> We need to cut a new chunk.
|
// - !okToAppend or counterReset -> We need to cut a new chunk.
|
||||||
// - okToAppend but we have inserts → Existing chunk needs
|
// - okToAppend but we have inserts → Existing chunk needs
|
||||||
// recoding before we can append our histogram.
|
// recoding before we can append our histogram.
|
||||||
// - okToAppend and no inserts → Chunk is ready to support our histogram.
|
// - okToAppend and no inserts → Chunk is ready to support our histogram.
|
||||||
if !okToAppend || counterReset {
|
switch {
|
||||||
|
case !okToAppend || counterReset:
|
||||||
c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange)
|
c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange)
|
||||||
chunkCreated = true
|
chunkCreated = true
|
||||||
} else if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 {
|
case len(pForwardInserts) > 0 || len(nForwardInserts) > 0:
|
||||||
// New buckets have appeared. We need to recode all
|
// New buckets have appeared. We need to recode all
|
||||||
// prior histogram samples within the chunk before we
|
// prior histogram samples within the chunk before we
|
||||||
// can process this one.
|
// can process this one.
|
||||||
|
|
|
@ -424,7 +424,8 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if chunkRef == meta.OOOLastRef {
|
switch {
|
||||||
|
case chunkRef == meta.OOOLastRef:
|
||||||
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
|
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
|
||||||
meta: chunks.Meta{
|
meta: chunks.Meta{
|
||||||
MinTime: meta.OOOLastMinTime,
|
MinTime: meta.OOOLastMinTime,
|
||||||
|
@ -435,7 +436,7 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper
|
||||||
origMinT: c.minTime,
|
origMinT: c.minTime,
|
||||||
origMaxT: c.maxTime,
|
origMaxT: c.maxTime,
|
||||||
})
|
})
|
||||||
} else if c.OverlapsClosedInterval(mint, maxt) {
|
case c.OverlapsClosedInterval(mint, maxt):
|
||||||
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
|
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
|
||||||
meta: chunks.Meta{
|
meta: chunks.Meta{
|
||||||
MinTime: c.minTime,
|
MinTime: c.minTime,
|
||||||
|
@ -594,12 +595,14 @@ type boundedIterator struct {
|
||||||
func (b boundedIterator) Next() chunkenc.ValueType {
|
func (b boundedIterator) Next() chunkenc.ValueType {
|
||||||
for b.Iterator.Next() == chunkenc.ValFloat {
|
for b.Iterator.Next() == chunkenc.ValFloat {
|
||||||
t, _ := b.Iterator.At()
|
t, _ := b.Iterator.At()
|
||||||
if t < b.minT {
|
switch {
|
||||||
|
case t < b.minT:
|
||||||
continue
|
continue
|
||||||
} else if t > b.maxT {
|
case t > b.maxT:
|
||||||
return chunkenc.ValNone
|
return chunkenc.ValNone
|
||||||
|
default:
|
||||||
|
return chunkenc.ValFloat
|
||||||
}
|
}
|
||||||
return chunkenc.ValFloat
|
|
||||||
}
|
}
|
||||||
return chunkenc.ValNone
|
return chunkenc.ValNone
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
// nolint:revive // Many legitimately empty blocks in this file.
|
||||||
package tsdb
|
package tsdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -103,7 +104,7 @@ func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) {
|
||||||
b.Cleanup(func() { require.NoError(b, h.Close()) })
|
b.Cleanup(func() { require.NoError(b, h.Close()) })
|
||||||
|
|
||||||
ts := int64(1000)
|
ts := int64(1000)
|
||||||
append := func() error {
|
appendSamples := func() error {
|
||||||
var err error
|
var err error
|
||||||
app := h.Appender(context.Background())
|
app := h.Appender(context.Background())
|
||||||
for _, s := range series[:seriesCount] {
|
for _, s := range series[:seriesCount] {
|
||||||
|
@ -120,13 +121,13 @@ func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init series, that's not what we're benchmarking here.
|
// Init series, that's not what we're benchmarking here.
|
||||||
require.NoError(b, append())
|
require.NoError(b, appendSamples())
|
||||||
|
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
require.NoError(b, append())
|
require.NoError(b, appendSamples())
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -2959,10 +2960,11 @@ func TestAppendHistogram(t *testing.T) {
|
||||||
actHistograms := make([]tsdbutil.Sample, 0, len(expHistograms))
|
actHistograms := make([]tsdbutil.Sample, 0, len(expHistograms))
|
||||||
actFloatHistograms := make([]tsdbutil.Sample, 0, len(expFloatHistograms))
|
actFloatHistograms := make([]tsdbutil.Sample, 0, len(expFloatHistograms))
|
||||||
for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() {
|
for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() {
|
||||||
if typ == chunkenc.ValHistogram {
|
switch typ {
|
||||||
|
case chunkenc.ValHistogram:
|
||||||
ts, h := it.AtHistogram()
|
ts, h := it.AtHistogram()
|
||||||
actHistograms = append(actHistograms, sample{t: ts, h: h})
|
actHistograms = append(actHistograms, sample{t: ts, h: h})
|
||||||
} else if typ == chunkenc.ValFloatHistogram {
|
case chunkenc.ValFloatHistogram:
|
||||||
ts, fh := it.AtFloatHistogram()
|
ts, fh := it.AtFloatHistogram()
|
||||||
actFloatHistograms = append(actFloatHistograms, sample{t: ts, fh: fh})
|
actFloatHistograms = append(actFloatHistograms, sample{t: ts, fh: fh})
|
||||||
}
|
}
|
||||||
|
@ -3564,14 +3566,15 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) {
|
||||||
for i, eh := range expHistograms {
|
for i, eh := range expHistograms {
|
||||||
ah := actHistograms[i]
|
ah := actHistograms[i]
|
||||||
if floatHistogram {
|
if floatHistogram {
|
||||||
if value.IsStaleNaN(eh.fh.Sum) {
|
switch {
|
||||||
|
case value.IsStaleNaN(eh.fh.Sum):
|
||||||
actNumStale++
|
actNumStale++
|
||||||
require.True(t, value.IsStaleNaN(ah.fh.Sum))
|
require.True(t, value.IsStaleNaN(ah.fh.Sum))
|
||||||
// To make require.Equal work.
|
// To make require.Equal work.
|
||||||
ah.fh.Sum = 0
|
ah.fh.Sum = 0
|
||||||
eh.fh = eh.fh.Copy()
|
eh.fh = eh.fh.Copy()
|
||||||
eh.fh.Sum = 0
|
eh.fh.Sum = 0
|
||||||
} else if i > 0 {
|
case i > 0:
|
||||||
prev := expHistograms[i-1]
|
prev := expHistograms[i-1]
|
||||||
if prev.fh == nil || value.IsStaleNaN(prev.fh.Sum) {
|
if prev.fh == nil || value.IsStaleNaN(prev.fh.Sum) {
|
||||||
eh.fh.CounterResetHint = histogram.UnknownCounterReset
|
eh.fh.CounterResetHint = histogram.UnknownCounterReset
|
||||||
|
@ -3579,14 +3582,15 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) {
|
||||||
}
|
}
|
||||||
require.Equal(t, eh, ah)
|
require.Equal(t, eh, ah)
|
||||||
} else {
|
} else {
|
||||||
if value.IsStaleNaN(eh.h.Sum) {
|
switch {
|
||||||
|
case value.IsStaleNaN(eh.h.Sum):
|
||||||
actNumStale++
|
actNumStale++
|
||||||
require.True(t, value.IsStaleNaN(ah.h.Sum))
|
require.True(t, value.IsStaleNaN(ah.h.Sum))
|
||||||
// To make require.Equal work.
|
// To make require.Equal work.
|
||||||
ah.h.Sum = 0
|
ah.h.Sum = 0
|
||||||
eh.h = eh.h.Copy()
|
eh.h = eh.h.Copy()
|
||||||
eh.h.Sum = 0
|
eh.h.Sum = 0
|
||||||
} else if i > 0 {
|
case i > 0:
|
||||||
prev := expHistograms[i-1]
|
prev := expHistograms[i-1]
|
||||||
if prev.h == nil || value.IsStaleNaN(prev.h.Sum) {
|
if prev.h == nil || value.IsStaleNaN(prev.h.Sum) {
|
||||||
eh.h.CounterResetHint = histogram.UnknownCounterReset
|
eh.h.CounterResetHint = histogram.UnknownCounterReset
|
||||||
|
@ -4487,19 +4491,19 @@ func TestHistogramValidation(t *testing.T) {
|
||||||
|
|
||||||
for testName, tc := range tests {
|
for testName, tc := range tests {
|
||||||
t.Run(testName, func(t *testing.T) {
|
t.Run(testName, func(t *testing.T) {
|
||||||
err := ValidateHistogram(tc.h)
|
switch err := ValidateHistogram(tc.h); {
|
||||||
if tc.errMsg != "" {
|
case tc.errMsg != "":
|
||||||
require.ErrorContains(t, err, tc.errMsg)
|
require.ErrorContains(t, err, tc.errMsg)
|
||||||
} else {
|
default:
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ValidateFloatHistogram(tc.h.ToFloat())
|
switch err := ValidateFloatHistogram(tc.h.ToFloat()); {
|
||||||
if tc.errMsgFloat != "" {
|
case tc.errMsgFloat != "":
|
||||||
require.ErrorContains(t, err, tc.errMsgFloat)
|
require.ErrorContains(t, err, tc.errMsgFloat)
|
||||||
} else if tc.errMsg != "" {
|
case tc.errMsg != "":
|
||||||
require.ErrorContains(t, err, tc.errMsg)
|
require.ErrorContains(t, err, tc.errMsg)
|
||||||
} else {
|
default:
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
// nolint:revive // Many legitimately empty blocks in this file.
|
||||||
package tsdb
|
package tsdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -565,12 +565,11 @@ func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) {
|
||||||
|
|
||||||
for _, it := range p {
|
for _, it := range p {
|
||||||
// NOTE: mergedPostings struct requires the user to issue an initial Next.
|
// NOTE: mergedPostings struct requires the user to issue an initial Next.
|
||||||
if it.Next() {
|
switch {
|
||||||
|
case it.Next():
|
||||||
ph = append(ph, it)
|
ph = append(ph, it)
|
||||||
} else {
|
case it.Err() != nil:
|
||||||
if it.Err() != nil {
|
return &mergedPostings{err: it.Err()}, true
|
||||||
return &mergedPostings{err: it.Err()}, true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -704,16 +703,16 @@ func (rp *removedPostings) Next() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
fcur, rcur := rp.full.At(), rp.remove.At()
|
switch fcur, rcur := rp.full.At(), rp.remove.At(); {
|
||||||
if fcur < rcur {
|
case fcur < rcur:
|
||||||
rp.cur = fcur
|
rp.cur = fcur
|
||||||
rp.fok = rp.full.Next()
|
rp.fok = rp.full.Next()
|
||||||
|
|
||||||
return true
|
return true
|
||||||
} else if rcur < fcur {
|
case rcur < fcur:
|
||||||
// Forward the remove postings to the right position.
|
// Forward the remove postings to the right position.
|
||||||
rp.rok = rp.remove.Seek(fcur)
|
rp.rok = rp.remove.Seek(fcur)
|
||||||
} else {
|
default:
|
||||||
// Skip the current posting.
|
// Skip the current posting.
|
||||||
rp.fok = rp.full.Next()
|
rp.fok = rp.full.Next()
|
||||||
}
|
}
|
||||||
|
@ -848,9 +847,10 @@ func (it *bigEndianPostings) Err() error {
|
||||||
func FindIntersectingPostings(p Postings, candidates []Postings) (indexes []int, err error) {
|
func FindIntersectingPostings(p Postings, candidates []Postings) (indexes []int, err error) {
|
||||||
h := make(postingsWithIndexHeap, 0, len(candidates))
|
h := make(postingsWithIndexHeap, 0, len(candidates))
|
||||||
for idx, it := range candidates {
|
for idx, it := range candidates {
|
||||||
if it.Next() {
|
switch {
|
||||||
|
case it.Next():
|
||||||
h = append(h, postingsWithIndex{index: idx, p: it})
|
h = append(h, postingsWithIndex{index: idx, p: it})
|
||||||
} else if it.Err() != nil {
|
case it.Err() != nil:
|
||||||
return nil, it.Err()
|
return nil, it.Err()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
// nolint:revive // Many unsued function arguments in this file by design.
|
||||||
package tsdb
|
package tsdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -122,7 +123,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// There is nothing to do if we did not collect any chunk
|
// There is nothing to do if we did not collect any chunk.
|
||||||
if len(tmpChks) == 0 {
|
if len(tmpChks) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -135,14 +136,15 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
|
||||||
// chunks Meta the first chunk that overlaps with others.
|
// chunks Meta the first chunk that overlaps with others.
|
||||||
// Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650)
|
// Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650)
|
||||||
// In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to
|
// In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to
|
||||||
// to return chunk Metas for chunk 5 and chunk 6
|
// to return chunk Metas for chunk 5 and chunk 6e
|
||||||
*chks = append(*chks, tmpChks[0])
|
*chks = append(*chks, tmpChks[0])
|
||||||
maxTime := tmpChks[0].MaxTime // tracks the maxTime of the previous "to be merged chunk"
|
maxTime := tmpChks[0].MaxTime // Tracks the maxTime of the previous "to be merged chunk".
|
||||||
for _, c := range tmpChks[1:] {
|
for _, c := range tmpChks[1:] {
|
||||||
if c.MinTime > maxTime {
|
switch {
|
||||||
|
case c.MinTime > maxTime:
|
||||||
*chks = append(*chks, c)
|
*chks = append(*chks, c)
|
||||||
maxTime = c.MaxTime
|
maxTime = c.MaxTime
|
||||||
} else if c.MaxTime > maxTime {
|
case c.MaxTime > maxTime:
|
||||||
maxTime = c.MaxTime
|
maxTime = c.MaxTime
|
||||||
(*chks)[len(*chks)-1].MaxTime = c.MaxTime
|
(*chks)[len(*chks)-1].MaxTime = c.MaxTime
|
||||||
}
|
}
|
||||||
|
|
|
@ -239,18 +239,20 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, m := range ms {
|
for _, m := range ms {
|
||||||
if m.Name == "" && m.Value == "" { // Special-case for AllPostings, used in tests at least.
|
switch {
|
||||||
|
case m.Name == "" && m.Value == "": // Special-case for AllPostings, used in tests at least.
|
||||||
k, v := index.AllPostingsKey()
|
k, v := index.AllPostingsKey()
|
||||||
allPostings, err := ix.Postings(k, v)
|
allPostings, err := ix.Postings(k, v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
its = append(its, allPostings)
|
its = append(its, allPostings)
|
||||||
} else if labelMustBeSet[m.Name] {
|
case labelMustBeSet[m.Name]:
|
||||||
// If this matcher must be non-empty, we can be smarter.
|
// If this matcher must be non-empty, we can be smarter.
|
||||||
matchesEmpty := m.Matches("")
|
matchesEmpty := m.Matches("")
|
||||||
isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp
|
isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp
|
||||||
if isNot && matchesEmpty { // l!="foo"
|
switch {
|
||||||
|
case isNot && matchesEmpty: // l!="foo"
|
||||||
// If the label can't be empty and is a Not and the inner matcher
|
// If the label can't be empty and is a Not and the inner matcher
|
||||||
// doesn't match empty, then subtract it out at the end.
|
// doesn't match empty, then subtract it out at the end.
|
||||||
inverse, err := m.Inverse()
|
inverse, err := m.Inverse()
|
||||||
|
@ -263,7 +265,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings,
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
notIts = append(notIts, it)
|
notIts = append(notIts, it)
|
||||||
} else if isNot && !matchesEmpty { // l!=""
|
case isNot && !matchesEmpty: // l!=""
|
||||||
// If the label can't be empty and is a Not, but the inner matcher can
|
// If the label can't be empty and is a Not, but the inner matcher can
|
||||||
// be empty we need to use inversePostingsForMatcher.
|
// be empty we need to use inversePostingsForMatcher.
|
||||||
inverse, err := m.Inverse()
|
inverse, err := m.Inverse()
|
||||||
|
@ -279,7 +281,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings,
|
||||||
return index.EmptyPostings(), nil
|
return index.EmptyPostings(), nil
|
||||||
}
|
}
|
||||||
its = append(its, it)
|
its = append(its, it)
|
||||||
} else { // l="a"
|
default: // l="a"
|
||||||
// Non-Not matcher, use normal postingsForMatcher.
|
// Non-Not matcher, use normal postingsForMatcher.
|
||||||
it, err := postingsForMatcher(ix, m)
|
it, err := postingsForMatcher(ix, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -290,7 +292,7 @@ func PostingsForMatchers(ix IndexReader, ms ...*labels.Matcher) (index.Postings,
|
||||||
}
|
}
|
||||||
its = append(its, it)
|
its = append(its, it)
|
||||||
}
|
}
|
||||||
} else { // l=""
|
default: // l=""
|
||||||
// If the matchers for a labelname selects an empty value, it selects all
|
// If the matchers for a labelname selects an empty value, it selects all
|
||||||
// the series which don't have the label name set too. See:
|
// the series which don't have the label name set too. See:
|
||||||
// https://github.com/prometheus/prometheus/issues/3575 and
|
// https://github.com/prometheus/prometheus/issues/3575 and
|
||||||
|
@ -966,23 +968,24 @@ func (m *mergedStringIter) Next() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if !m.aok {
|
switch {
|
||||||
|
case !m.aok:
|
||||||
m.cur = m.b.At()
|
m.cur = m.b.At()
|
||||||
m.bok = m.b.Next()
|
m.bok = m.b.Next()
|
||||||
m.err = m.b.Err()
|
m.err = m.b.Err()
|
||||||
} else if !m.bok {
|
case !m.bok:
|
||||||
m.cur = m.a.At()
|
m.cur = m.a.At()
|
||||||
m.aok = m.a.Next()
|
m.aok = m.a.Next()
|
||||||
m.err = m.a.Err()
|
m.err = m.a.Err()
|
||||||
} else if m.b.At() > m.a.At() {
|
case m.b.At() > m.a.At():
|
||||||
m.cur = m.a.At()
|
m.cur = m.a.At()
|
||||||
m.aok = m.a.Next()
|
m.aok = m.a.Next()
|
||||||
m.err = m.a.Err()
|
m.err = m.a.Err()
|
||||||
} else if m.a.At() > m.b.At() {
|
case m.a.At() > m.b.At():
|
||||||
m.cur = m.b.At()
|
m.cur = m.b.At()
|
||||||
m.bok = m.b.Next()
|
m.bok = m.b.Next()
|
||||||
m.err = m.b.Err()
|
m.err = m.b.Err()
|
||||||
} else { // Equal.
|
default: // Equal.
|
||||||
m.cur = m.b.At()
|
m.cur = m.b.At()
|
||||||
m.aok = m.a.Next()
|
m.aok = m.a.Next()
|
||||||
m.err = m.a.Err()
|
m.err = m.a.Err()
|
||||||
|
@ -1085,7 +1088,7 @@ func newNopChunkReader() ChunkReader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr nopChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
|
func (cr nopChunkReader) Chunk(chunks.Meta) (chunkenc.Chunk, error) {
|
||||||
return cr.emptyChunk, nil
|
return cr.emptyChunk, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -250,7 +250,7 @@ func BenchmarkQuerierSelect(b *testing.B) {
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
ss := q.Select(sorted, nil, matcher)
|
ss := q.Select(sorted, nil, matcher)
|
||||||
for ss.Next() {
|
for ss.Next() { // nolint:revive
|
||||||
}
|
}
|
||||||
require.NoError(b, ss.Err())
|
require.NoError(b, ss.Err())
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
// nolint:revive // Many unsued function arguments in this file by design.
|
||||||
package tsdb
|
package tsdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -190,9 +190,10 @@ type Stone struct {
|
||||||
|
|
||||||
func ReadTombstones(dir string) (Reader, int64, error) {
|
func ReadTombstones(dir string) (Reader, int64, error) {
|
||||||
b, err := os.ReadFile(filepath.Join(dir, TombstonesFilename))
|
b, err := os.ReadFile(filepath.Join(dir, TombstonesFilename))
|
||||||
if os.IsNotExist(err) {
|
switch {
|
||||||
|
case os.IsNotExist(err):
|
||||||
return NewMemTombstones(), 0, nil
|
return NewMemTombstones(), 0, nil
|
||||||
} else if err != nil {
|
case err != nil:
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
21
tsdb/wal.go
21
tsdb/wal.go
|
@ -11,6 +11,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
// nolint:revive // Many unsued function arguments in this file by design.
|
||||||
package tsdb
|
package tsdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -521,9 +522,10 @@ func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if n, err := f.Read(metab); err != nil {
|
switch n, err := f.Read(metab); {
|
||||||
|
case err != nil:
|
||||||
return nil, errors.Wrapf(err, "validate meta %q", f.Name())
|
return nil, errors.Wrapf(err, "validate meta %q", f.Name())
|
||||||
} else if n != 8 {
|
case n != 8:
|
||||||
return nil, errors.Errorf("invalid header size %d in %q", n, f.Name())
|
return nil, errors.Errorf("invalid header size %d in %q", n, f.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1062,9 +1064,10 @@ func (r *walReader) entry(cr io.Reader) (WALEntryType, byte, []byte, error) {
|
||||||
tr := io.TeeReader(cr, r.crc32)
|
tr := io.TeeReader(cr, r.crc32)
|
||||||
|
|
||||||
b := make([]byte, 6)
|
b := make([]byte, 6)
|
||||||
if n, err := tr.Read(b); err != nil {
|
switch n, err := tr.Read(b); {
|
||||||
|
case err != nil:
|
||||||
return 0, 0, nil, err
|
return 0, 0, nil, err
|
||||||
} else if n != 6 {
|
case n != 6:
|
||||||
return 0, 0, nil, r.corruptionErr("invalid entry header size %d", n)
|
return 0, 0, nil, r.corruptionErr("invalid entry header size %d", n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1086,15 +1089,17 @@ func (r *walReader) entry(cr io.Reader) (WALEntryType, byte, []byte, error) {
|
||||||
}
|
}
|
||||||
buf := r.buf[:length]
|
buf := r.buf[:length]
|
||||||
|
|
||||||
if n, err := tr.Read(buf); err != nil {
|
switch n, err := tr.Read(buf); {
|
||||||
|
case err != nil:
|
||||||
return 0, 0, nil, err
|
return 0, 0, nil, err
|
||||||
} else if n != length {
|
case n != length:
|
||||||
return 0, 0, nil, r.corruptionErr("invalid entry body size %d", n)
|
return 0, 0, nil, r.corruptionErr("invalid entry body size %d", n)
|
||||||
}
|
}
|
||||||
|
|
||||||
if n, err := cr.Read(b[:4]); err != nil {
|
switch n, err := cr.Read(b[:4]); {
|
||||||
|
case err != nil:
|
||||||
return 0, 0, nil, err
|
return 0, 0, nil, err
|
||||||
} else if n != 4 {
|
case n != 4:
|
||||||
return 0, 0, nil, r.corruptionErr("invalid checksum length %d", n)
|
return 0, 0, nil, r.corruptionErr("invalid checksum length %d", n)
|
||||||
}
|
}
|
||||||
if exp, has := binary.BigEndian.Uint32(b[:4]), r.crc32.Sum32(); has != exp {
|
if exp, has := binary.BigEndian.Uint32(b[:4]), r.crc32.Sum32(); has != exp {
|
||||||
|
|
|
@ -126,9 +126,10 @@ func (r *LiveReader) Next() bool {
|
||||||
// we return EOF and the user can try again later. If we have a full
|
// we return EOF and the user can try again later. If we have a full
|
||||||
// page, buildRecord is guaranteed to return a record or a non-EOF; it
|
// page, buildRecord is guaranteed to return a record or a non-EOF; it
|
||||||
// has checks the records fit in pages.
|
// has checks the records fit in pages.
|
||||||
if ok, err := r.buildRecord(); ok {
|
switch ok, err := r.buildRecord(); {
|
||||||
|
case ok:
|
||||||
return true
|
return true
|
||||||
} else if err != nil && err != io.EOF {
|
case err != nil && err != io.EOF:
|
||||||
r.err = err
|
r.err = err
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -533,7 +533,7 @@ func TestReaderData(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
reader := fn(sr)
|
reader := fn(sr)
|
||||||
for reader.Next() {
|
for reader.Next() { // nolint:revive
|
||||||
}
|
}
|
||||||
require.NoError(t, reader.Err())
|
require.NoError(t, reader.Err())
|
||||||
|
|
||||||
|
|
|
@ -405,9 +405,10 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
|
||||||
|
|
||||||
// Ignore errors reading to end of segment whilst replaying the WAL.
|
// Ignore errors reading to end of segment whilst replaying the WAL.
|
||||||
if !tail {
|
if !tail {
|
||||||
if err != nil && errors.Cause(err) != io.EOF {
|
switch {
|
||||||
|
case err != nil && errors.Cause(err) != io.EOF:
|
||||||
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err)
|
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err)
|
||||||
} else if reader.Offset() != size {
|
case reader.Offset() != size:
|
||||||
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
|
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -425,9 +426,10 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
|
||||||
|
|
||||||
// Ignore all errors reading to end of segment whilst replaying the WAL.
|
// Ignore all errors reading to end of segment whilst replaying the WAL.
|
||||||
if !tail {
|
if !tail {
|
||||||
if err != nil && errors.Cause(err) != io.EOF {
|
switch {
|
||||||
|
case err != nil && errors.Cause(err) != io.EOF:
|
||||||
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err)
|
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err)
|
||||||
} else if reader.Offset() != size {
|
case reader.Offset() != size:
|
||||||
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
|
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -164,7 +164,7 @@ func TestWALRepair_ReadingError(t *testing.T) {
|
||||||
sr := NewSegmentBufReader(s)
|
sr := NewSegmentBufReader(s)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
r := NewReader(sr)
|
r := NewReader(sr)
|
||||||
for r.Next() {
|
for r.Next() { // nolint:revive
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close the segment so we don't break things on Windows.
|
// Close the segment so we don't break things on Windows.
|
||||||
|
|
|
@ -22,7 +22,7 @@ import (
|
||||||
|
|
||||||
type counter int
|
type counter int
|
||||||
|
|
||||||
func (c *counter) Log(keyvals ...interface{}) error {
|
func (c *counter) Log(...interface{}) error {
|
||||||
(*c)++
|
(*c)++
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,6 +37,6 @@ func (c *MockContext) Err() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Value ignores the Value and always returns nil
|
// Value ignores the Value and always returns nil
|
||||||
func (c *MockContext) Value(key interface{}) interface{} {
|
func (c *MockContext) Value(interface{}) interface{} {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,7 @@ type roundTrip struct {
|
||||||
theError error
|
theError error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rt *roundTrip) RoundTrip(r *http.Request) (*http.Response, error) {
|
func (rt *roundTrip) RoundTrip(*http.Request) (*http.Response, error) {
|
||||||
return rt.theResponse, rt.theError
|
return rt.theResponse, rt.theError
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -116,7 +116,7 @@ func (tc *ZookeeperTreeCache) Stop() {
|
||||||
tc.stop <- struct{}{}
|
tc.stop <- struct{}{}
|
||||||
go func() {
|
go func() {
|
||||||
// Drain tc.head.events so that go routines can make progress and exit.
|
// Drain tc.head.events so that go routines can make progress and exit.
|
||||||
for range tc.head.events {
|
for range tc.head.events { // nolint:revive
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -176,11 +176,11 @@ func (tc *ZookeeperTreeCache) loop(path string) {
|
||||||
node = childNode
|
node = childNode
|
||||||
}
|
}
|
||||||
|
|
||||||
err := tc.recursiveNodeUpdate(ev.Path, node)
|
switch err := tc.recursiveNodeUpdate(ev.Path, node); {
|
||||||
if err != nil {
|
case err != nil:
|
||||||
level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", err)
|
level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", err)
|
||||||
failure()
|
failure()
|
||||||
} else if tc.head.data == nil {
|
case tc.head.data == nil:
|
||||||
level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", "path no longer exists", "path", tc.prefix)
|
level.Error(tc.logger).Log("msg", "Error during processing of Zookeeper event", "err", "path no longer exists", "path", tc.prefix)
|
||||||
failure()
|
failure()
|
||||||
}
|
}
|
||||||
|
@ -214,13 +214,14 @@ func (tc *ZookeeperTreeCache) loop(path string) {
|
||||||
|
|
||||||
func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTreeCacheNode) error {
|
func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTreeCacheNode) error {
|
||||||
data, _, dataWatcher, err := tc.conn.GetW(path)
|
data, _, dataWatcher, err := tc.conn.GetW(path)
|
||||||
if errors.Is(err, zk.ErrNoNode) {
|
switch {
|
||||||
|
case errors.Is(err, zk.ErrNoNode):
|
||||||
tc.recursiveDelete(path, node)
|
tc.recursiveDelete(path, node)
|
||||||
if node == tc.head {
|
if node == tc.head {
|
||||||
return fmt.Errorf("path %s does not exist", path)
|
return fmt.Errorf("path %s does not exist", path)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
} else if err != nil {
|
case err != nil:
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -230,10 +231,11 @@ func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTr
|
||||||
}
|
}
|
||||||
|
|
||||||
children, _, childWatcher, err := tc.conn.ChildrenW(path)
|
children, _, childWatcher, err := tc.conn.ChildrenW(path)
|
||||||
if errors.Is(err, zk.ErrNoNode) {
|
switch {
|
||||||
|
case errors.Is(err, zk.ErrNoNode):
|
||||||
tc.recursiveDelete(path, node)
|
tc.recursiveDelete(path, node)
|
||||||
return nil
|
return nil
|
||||||
} else if err != nil {
|
case err != nil:
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -117,7 +117,7 @@ type RulesRetriever interface {
|
||||||
|
|
||||||
type StatsRenderer func(context.Context, *stats.Statistics, string) stats.QueryStats
|
type StatsRenderer func(context.Context, *stats.Statistics, string) stats.QueryStats
|
||||||
|
|
||||||
func defaultStatsRenderer(ctx context.Context, s *stats.Statistics, param string) stats.QueryStats {
|
func defaultStatsRenderer(_ context.Context, s *stats.Statistics, param string) stats.QueryStats {
|
||||||
if param != "" {
|
if param != "" {
|
||||||
return stats.NewQueryStats(s)
|
return stats.NewQueryStats(s)
|
||||||
}
|
}
|
||||||
|
@ -392,7 +392,7 @@ func invalidParamError(err error, parameter string) apiFuncResult {
|
||||||
}, nil, nil}
|
}, nil, nil}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *API) options(r *http.Request) apiFuncResult {
|
func (api *API) options(*http.Request) apiFuncResult {
|
||||||
return apiFuncResult{nil, nil, nil, nil}
|
return apiFuncResult{nil, nil, nil, nil}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -989,12 +989,14 @@ func (api *API) targets(r *http.Request) apiFuncResult {
|
||||||
ScrapeURL: target.URL().String(),
|
ScrapeURL: target.URL().String(),
|
||||||
GlobalURL: globalURL.String(),
|
GlobalURL: globalURL.String(),
|
||||||
LastError: func() string {
|
LastError: func() string {
|
||||||
if err == nil && lastErrStr == "" {
|
switch {
|
||||||
|
case err == nil && lastErrStr == "":
|
||||||
return ""
|
return ""
|
||||||
} else if err != nil {
|
case err != nil:
|
||||||
return errors.Wrapf(err, lastErrStr).Error()
|
return errors.Wrapf(err, lastErrStr).Error()
|
||||||
|
default:
|
||||||
|
return lastErrStr
|
||||||
}
|
}
|
||||||
return lastErrStr
|
|
||||||
}(),
|
}(),
|
||||||
LastScrape: target.LastScrape(),
|
LastScrape: target.LastScrape(),
|
||||||
LastScrapeDuration: target.LastScrapeDuration().Seconds(),
|
LastScrapeDuration: target.LastScrapeDuration().Seconds(),
|
||||||
|
@ -1565,7 +1567,7 @@ func (api *API) snapshot(r *http.Request) apiFuncResult {
|
||||||
}{name}, nil, nil, nil}
|
}{name}, nil, nil, nil}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *API) cleanTombstones(r *http.Request) apiFuncResult {
|
func (api *API) cleanTombstones(*http.Request) apiFuncResult {
|
||||||
if !api.enableAdmin {
|
if !api.enableAdmin {
|
||||||
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
|
||||||
}
|
}
|
||||||
|
@ -1764,7 +1766,7 @@ func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||||
stream.WriteObjectEnd()
|
stream.WriteObjectEnd()
|
||||||
}
|
}
|
||||||
|
|
||||||
func marshalSeriesJSONIsEmpty(ptr unsafe.Pointer) bool {
|
func marshalSeriesJSONIsEmpty(unsafe.Pointer) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1817,7 +1819,7 @@ func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||||
stream.WriteObjectEnd()
|
stream.WriteObjectEnd()
|
||||||
}
|
}
|
||||||
|
|
||||||
func marshalSampleJSONIsEmpty(ptr unsafe.Pointer) bool {
|
func marshalSampleJSONIsEmpty(unsafe.Pointer) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1841,7 +1843,7 @@ func marshalHPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||||
stream.WriteArrayEnd()
|
stream.WriteArrayEnd()
|
||||||
}
|
}
|
||||||
|
|
||||||
func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool {
|
func marshalPointJSONIsEmpty(unsafe.Pointer) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1878,6 +1880,6 @@ func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||||
stream.WriteObjectEnd()
|
stream.WriteObjectEnd()
|
||||||
}
|
}
|
||||||
|
|
||||||
func marshalExemplarJSONEmpty(ptr unsafe.Pointer) bool {
|
func marshalExemplarJSONEmpty(unsafe.Pointer) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -2560,9 +2560,9 @@ type fakeDB struct {
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeDB) CleanTombstones() error { return f.err }
|
func (f *fakeDB) CleanTombstones() error { return f.err }
|
||||||
func (f *fakeDB) Delete(mint, maxt int64, ms ...*labels.Matcher) error { return f.err }
|
func (f *fakeDB) Delete(int64, int64, ...*labels.Matcher) error { return f.err }
|
||||||
func (f *fakeDB) Snapshot(dir string, withHead bool) error { return f.err }
|
func (f *fakeDB) Snapshot(string, bool) error { return f.err }
|
||||||
func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) {
|
func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) {
|
||||||
dbDir, err := os.MkdirTemp("", "tsdb-api-ready")
|
dbDir, err := os.MkdirTemp("", "tsdb-api-ready")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
// nolint:revive // Many unsued function arguments in this file by design.
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
@ -388,13 +388,13 @@ func TestFederationWithNativeHistograms(t *testing.T) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if et == textparse.EntryHelp {
|
|
||||||
metricFamilies++
|
|
||||||
}
|
|
||||||
if et == textparse.EntryHistogram || et == textparse.EntrySeries {
|
if et == textparse.EntryHistogram || et == textparse.EntrySeries {
|
||||||
p.Metric(&l)
|
p.Metric(&l)
|
||||||
}
|
}
|
||||||
if et == textparse.EntryHistogram {
|
switch et {
|
||||||
|
case textparse.EntryHelp:
|
||||||
|
metricFamilies++
|
||||||
|
case textparse.EntryHistogram:
|
||||||
_, parsedTimestamp, h, fh := p.Histogram()
|
_, parsedTimestamp, h, fh := p.Histogram()
|
||||||
require.Nil(t, h)
|
require.Nil(t, h)
|
||||||
actVec = append(actVec, promql.Sample{
|
actVec = append(actVec, promql.Sample{
|
||||||
|
@ -402,7 +402,7 @@ func TestFederationWithNativeHistograms(t *testing.T) {
|
||||||
H: fh,
|
H: fh,
|
||||||
Metric: l,
|
Metric: l,
|
||||||
})
|
})
|
||||||
} else if et == textparse.EntrySeries {
|
case textparse.EntrySeries:
|
||||||
_, parsedTimestamp, f := p.Series()
|
_, parsedTimestamp, f := p.Series()
|
||||||
actVec = append(actVec, promql.Sample{
|
actVec = append(actVec, promql.Sample{
|
||||||
T: *parsedTimestamp,
|
T: *parsedTimestamp,
|
||||||
|
|
|
@ -755,14 +755,14 @@ func toFloat64(f *io_prometheus_client.MetricFamily) float64 {
|
||||||
return math.NaN()
|
return math.NaN()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) version(w http.ResponseWriter, r *http.Request) {
|
func (h *Handler) version(w http.ResponseWriter, _ *http.Request) {
|
||||||
dec := json.NewEncoder(w)
|
dec := json.NewEncoder(w)
|
||||||
if err := dec.Encode(h.versionInfo); err != nil {
|
if err := dec.Encode(h.versionInfo); err != nil {
|
||||||
http.Error(w, fmt.Sprintf("error encoding JSON: %s", err), http.StatusInternalServerError)
|
http.Error(w, fmt.Sprintf("error encoding JSON: %s", err), http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) quit(w http.ResponseWriter, r *http.Request) {
|
func (h *Handler) quit(w http.ResponseWriter, _ *http.Request) {
|
||||||
var closed bool
|
var closed bool
|
||||||
h.quitOnce.Do(func() {
|
h.quitOnce.Do(func() {
|
||||||
closed = true
|
closed = true
|
||||||
|
@ -774,7 +774,7 @@ func (h *Handler) quit(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) reload(w http.ResponseWriter, r *http.Request) {
|
func (h *Handler) reload(w http.ResponseWriter, _ *http.Request) {
|
||||||
rc := make(chan error)
|
rc := make(chan error)
|
||||||
h.reloadCh <- rc
|
h.reloadCh <- rc
|
||||||
if err := <-rc; err != nil {
|
if err := <-rc; err != nil {
|
||||||
|
|
Loading…
Reference in a new issue