2017-04-10 11:59:45 -07:00
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2016-12-04 04:16:11 -08:00
package tsdb
2017-04-18 09:22:13 -07:00
import (
2020-07-21 00:02:13 -07:00
"bufio"
2020-02-06 07:58:38 -08:00
"context"
2019-12-03 23:37:49 -08:00
"encoding/binary"
2021-11-19 02:11:32 -08:00
"flag"
2018-06-04 05:35:36 -07:00
"fmt"
2019-12-03 23:37:49 -08:00
"hash/crc32"
2017-08-28 15:39:17 -07:00
"math"
2017-05-23 05:43:30 -07:00
"math/rand"
2017-04-18 09:22:13 -07:00
"os"
2018-05-25 14:19:32 -07:00
"path"
2018-05-28 13:00:36 -07:00
"path/filepath"
2017-08-28 15:39:17 -07:00
"sort"
2019-12-03 23:37:49 -08:00
"strconv"
2021-05-06 13:18:59 -07:00
"strings"
2019-12-24 13:55:22 -08:00
"sync"
2017-04-18 09:22:13 -07:00
"testing"
2018-06-04 05:35:36 -07:00
"time"
2017-04-18 09:22:13 -07:00
2021-06-11 09:17:59 -07:00
"github.com/go-kit/log"
2018-05-28 13:00:36 -07:00
"github.com/oklog/ulid"
2018-11-28 01:23:50 -08:00
"github.com/prometheus/client_golang/prometheus"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
2020-10-29 02:43:23 -07:00
"github.com/stretchr/testify/require"
2023-11-24 03:38:38 -08:00
"go.uber.org/atomic"
2020-10-22 02:00:08 -07:00
"go.uber.org/goleak"
2022-09-20 10:05:50 -07:00
"github.com/prometheus/prometheus/config"
2022-08-29 03:05:03 -07:00
"github.com/prometheus/prometheus/model/histogram"
2021-11-08 06:23:17 -08:00
"github.com/prometheus/prometheus/model/labels"
2022-07-19 01:58:52 -07:00
"github.com/prometheus/prometheus/model/metadata"
2020-02-06 07:58:38 -08:00
"github.com/prometheus/prometheus/storage"
2021-05-06 13:18:59 -07:00
"github.com/prometheus/prometheus/tsdb/chunkenc"
2019-08-13 01:34:14 -07:00
"github.com/prometheus/prometheus/tsdb/chunks"
2020-02-06 07:58:38 -08:00
"github.com/prometheus/prometheus/tsdb/fileutil"
2019-08-13 01:34:14 -07:00
"github.com/prometheus/prometheus/tsdb/index"
2019-09-19 02:15:41 -07:00
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
2019-08-13 01:34:14 -07:00
"github.com/prometheus/prometheus/tsdb/tsdbutil"
2022-10-10 08:08:46 -07:00
"github.com/prometheus/prometheus/tsdb/wlog"
2023-09-14 09:57:31 -07:00
"github.com/prometheus/prometheus/util/annotations"
2019-08-14 02:07:02 -07:00
"github.com/prometheus/prometheus/util/testutil"
2017-04-18 09:22:13 -07:00
)
2020-07-21 01:08:06 -07:00
func TestMain ( m * testing . M ) {
2021-11-19 02:11:32 -08:00
var isolationEnabled bool
flag . BoolVar ( & isolationEnabled , "test.tsdb-isolation" , true , "enable isolation" )
flag . Parse ( )
defaultIsolationDisabled = ! isolationEnabled
2021-08-28 19:42:22 -07:00
goleak . VerifyTestMain ( m , goleak . IgnoreTopFunction ( "github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func1" ) , goleak . IgnoreTopFunction ( "github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func2" ) )
2020-07-21 01:08:06 -07:00
}
2020-07-21 01:39:02 -07:00
func openTestDB ( t testing . TB , opts * Options , rngs [ ] int64 ) ( db * DB ) {
2022-01-22 01:55:01 -08:00
tmpdir := t . TempDir ( )
var err error
2017-08-28 15:39:17 -07:00
2022-09-14 05:08:34 -07:00
if opts == nil {
opts = DefaultOptions ( )
}
opts . EnableNativeHistograms = true
2020-02-06 07:58:38 -08:00
if len ( rngs ) == 0 {
2021-06-05 07:29:32 -07:00
db , err = Open ( tmpdir , nil , nil , opts , nil )
2020-02-06 07:58:38 -08:00
} else {
opts , rngs = validateOpts ( opts , rngs )
2021-06-05 07:29:32 -07:00
db , err = open ( tmpdir , nil , nil , opts , rngs , nil )
2020-02-06 07:58:38 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-08-28 15:39:17 -07:00
2020-07-21 01:39:02 -07:00
// Do not Close() the test database by default as it will deadlock on test failures.
return db
2017-08-28 15:39:17 -07:00
}
2017-11-13 03:16:58 -08:00
// query runs a matcher query against the querier and fully expands its data.
2023-08-24 06:21:17 -07:00
func query ( t testing . TB , q storage . Querier , matchers ... * labels . Matcher ) map [ string ] [ ] chunks . Sample {
2023-09-12 03:37:38 -07:00
ss := q . Select ( context . Background ( ) , false , nil , matchers ... )
2019-02-14 05:29:41 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , q . Close ( ) )
2019-02-14 05:29:41 -08:00
} ( )
2017-11-13 03:16:58 -08:00
2022-09-20 10:16:45 -07:00
var it chunkenc . Iterator
2023-08-24 06:21:17 -07:00
result := map [ string ] [ ] chunks . Sample { }
2017-04-20 06:24:35 -07:00
for ss . Next ( ) {
series := ss . At ( )
2023-08-24 06:21:17 -07:00
samples := [ ] chunks . Sample { }
2022-09-20 10:16:45 -07:00
it = series . Iterator ( it )
2022-08-29 03:05:03 -07:00
for typ := it . Next ( ) ; typ != chunkenc . ValNone ; typ = it . Next ( ) {
switch typ {
case chunkenc . ValFloat :
ts , v := it . At ( )
2023-03-30 10:50:13 -07:00
samples = append ( samples , sample { t : ts , f : v } )
2022-08-29 03:05:03 -07:00
case chunkenc . ValHistogram :
remote write 2.0: sync with `main` branch (#13510)
* consoles: exclude iowait and steal from CPU Utilisation
'iowait' and 'steal' indicate specific idle/wait states, which shouldn't
be counted into CPU Utilisation. Also see
https://github.com/prometheus-operator/kube-prometheus/pull/796 and
https://github.com/kubernetes-monitoring/kubernetes-mixin/pull/667.
Per the iostat man page:
%idle
Show the percentage of time that the CPU or CPUs were idle and the
system did not have an outstanding disk I/O request.
%iowait
Show the percentage of time that the CPU or CPUs were idle during
which the system had an outstanding disk I/O request.
%steal
Show the percentage of time spent in involuntary wait by the
virtual CPU or CPUs while the hypervisor was servicing another
virtual processor.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
* tsdb: shrink txRing with smaller integers
4 billion active transactions ought to be enough for anyone.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: create isolation transaction slice on demand
When Prometheus restarts it creates every series read in from the WAL,
but many of those series will be finished, and never receive any more
samples. By defering allocation of the txRing slice to when it is first
needed, we save 32 bytes per stale series.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* add cluster variable to Overview dashboard
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* promql: simplify Native Histogram arithmetics
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
* Cut 2.49.0-rc.0 (#13270)
* Cut 2.49.0-rc.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Removed the duplicate.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Add unit protobuf parser
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Go on adding protobuf parsing for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* ui: create a reproduction for https://github.com/prometheus/prometheus/issues/13292
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Get conditional right
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Get VM Scale Set NIC (#13283)
Calling `*armnetwork.InterfacesClient.Get()` doesn't work for Scale Set
VM NIC, because these use a different Resource ID format.
Use `*armnetwork.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface()`
instead. This needs both the scale set name and the instance ID, so
add an `InstanceID` field to the `virtualMachine` struct. `InstanceID`
is empty for a VM that isn't a ScaleSetVM.
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
* Cut v2.49.0-rc.1
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Delete debugging lines, amend error message for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Correct order in error message
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Consider storage.ErrTooOldSample as non-retryable
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
* scrape_test.go: Increase scrape interval in TestScrapeLoopCache to reduce potential flakiness
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Avoid creating string for suffix, consider counters without _total suffix
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* build(deps): bump github.com/prometheus/client_golang
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.17.0 to 1.18.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0)
---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump actions/setup-node from 3.8.1 to 4.0.1
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 3.8.1 to 4.0.1.
- [Release notes](https://github.com/actions/setup-node/releases)
- [Commits](https://github.com/actions/setup-node/compare/5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d...b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8)
---
updated-dependencies:
- dependency-name: actions/setup-node
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
* scripts: sort file list in embed directive
Otherwise the resulting string depends on find, which afaict depends on
the underlying filesystem. A stable file list make it easier to detect
UI changes in downstreams that need to track UI assets.
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
* Fix DataTableProps['data'] for resultType string
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* Fix handling of scalar and string in isHeatmapData
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* build(deps): bump github.com/influxdata/influxdb
Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.2 to 1.11.4.
- [Release notes](https://github.com/influxdata/influxdb/releases)
- [Commits](https://github.com/influxdata/influxdb/compare/v1.11.2...v1.11.4)
---
updated-dependencies:
- dependency-name: github.com/influxdata/influxdb
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump github.com/prometheus/prometheus
Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.48.0 to 0.48.1.
- [Release notes](https://github.com/prometheus/prometheus/releases)
- [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/prometheus/compare/v0.48.0...v0.48.1)
---
updated-dependencies:
- dependency-name: github.com/prometheus/prometheus
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* Bump client_golang to v1.18.0 (#13373)
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Drop old inmemory samples (#13002)
* Drop old inmemory samples
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Avoid copying timeseries when the feature is disabled
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Run gofmt
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Clarify docs
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Add more logging info
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Remove loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* optimize function and add tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Simplify filter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Update help info from metrics
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use metrics to keep track of drop elements during buildWriteRequest
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var in tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* pass time.Now as parameter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Change buildwriterequest during retries
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Revert "Remove loggers"
This reverts commit 54f91dfcae20488944162335ab4ad8be459df1ab.
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use log level debug for loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Fix linter
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove noisy debug-level logs; add 'reason' label to drop metrics
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove accidentally committed files
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Propagate logger to buildWriteRequest to log dropped data
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix docs comment
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Make drop reason more specific
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove unnecessary pass of logger
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Use snake_case for reason label
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix dropped samples metric
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
---------
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
* fix(discovery): allow requireUpdate util to timeout in discovery/file/file_test.go.
The loop ran indefinitely if the condition isn't met.
Before, each iteration created a new timer channel which was always outpaced by
the other timer channel with smaller duration.
minor detail: There was a memory leak: resources of the ~10 previous timers were
constantly kept. With the fix, we may keep the resources of one timer around for defaultWait
but this isn't worth the changes to make it right.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Merge pull request #13371 from kevinmingtarja/fix-isHeatmapData
ui: fix handling of scalar and string in isHeatmapData
* tsdb/{index,compact}: allow using custom postings encoding format (#13242)
* tsdb/{index,compact}: allow using custom postings encoding format
We would like to experiment with a different postings encoding format in
Thanos so in this change I am proposing adding another argument to
`NewWriter` which would allow users to change the format if needed.
Also, wire the leveled compactor so that it would be possible to change
the format there too.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb/compact: use a struct for leveled compactor options
As discussed on Slack, let's use a struct for the options in leveled
compactor.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: make changes after Bryan's review
- Make changes less intrusive
- Turn the postings encoder type into a function
- Add NewWriterWithEncoder()
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
---------
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0-rc.2
Signed-off-by: bwplotka <bwplotka@gmail.com>
* build(deps): bump actions/setup-go from 3.5.0 to 5.0.0 in /scripts (#13362)
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3.5.0 to 5.0.0.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/6edd4406fa81c3da01a34fa6f6343087c207a568...0c52d547c9bc32b1aa3301fd7a9cb496313a4491)
---
updated-dependencies:
- dependency-name: actions/setup-go
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump github/codeql-action from 2.22.8 to 3.22.12 (#13358)
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.22.8 to 3.22.12.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/407ffafae6a767df3e0230c3df91b6443ae8df75...012739e5082ff0c22ca6d6ab32e07c36df03c4a4)
---
updated-dependencies:
- dependency-name: github/codeql-action
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* put @nexucis has a release shepherd (#13383)
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
* Add analyze histograms command to promtool (#12331)
Add `query analyze` command to promtool
This command analyzes the buckets of classic and native histograms,
based on data queried from the Prometheus query API, i.e. it
doesn't require direct access to the TSDB files.
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* included instance in all necessary descriptions
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* tsdb/compact: fix passing merge func
Fixing a very small logical problem I've introduced :(.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: add enable overlapping compaction
This functionality is needed in downstream projects because they have a
separate component that does compaction.
Upstreaming
https://github.com/grafana/mimir-prometheus/blob/7c8e9a2a76fc729e9078889782928b2fdfe240e9/tsdb/compact.go#L323-L325.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* promtool: allow setting multiple matchers to "promtool tsdb dump" command. (#13296)
Conditions are ANDed inside the same matcher but matchers are ORed
Including unit tests for "promtool tsdb dump".
Refactor some matchers scraping utils.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Fixed changelog
Signed-off-by: bwplotka <bwplotka@gmail.com>
* tsdb/main: wire "EnableOverlappingCompaction" to tsdb.Options (#13398)
This added the https://github.com/prometheus/prometheus/pull/13393
"EnableOverlappingCompaction" parameter to the compactor code but not to
the tsdb.Options. I forgot about that. Add it to `tsdb.Options` too and
set it to `true` in Prometheus.
Copy/paste the description from
https://github.com/prometheus/prometheus/pull/13393#issuecomment-1891787986
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Issue #13268: fix quality value in accept header
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
* Cut 2.49.1 with scrape q= bugfix.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Cut 2.49.1 web package.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Restore more efficient version of NewPossibleNonCounterInfo annotation (#13022)
Restore more efficient version of NewPossibleNonCounterInfo annotation
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Fix regressions introduced by #13242
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* fix slice copy in 1.20 (#13389)
The slices package is added to the standard library in Go 1.21;
we need to import from the exp area to maintain compatibility with Go 1.20.
Signed-off-by: tyltr <tylitianrui@126.com>
* Docs: Query Basics: link to rate (#10538)
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
* chore(kubernetes): check preconditions earlier and avoid unnecessary checks or iterations
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Examples: link to `rate` for new users (#10535)
* Examples: link to `rate` for new users
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
* promql: use natural sort in sort_by_label and sort_by_label_desc (#13411)
These functions are intended for humans, as robots can already sort the results
however they please. Humans like things sorted "naturally":
* https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
A similar thing has been done to Grafana, which is also used by humans:
* https://github.com/grafana/grafana/pull/78024
* https://github.com/grafana/grafana/pull/78494
Signed-off-by: Ivan Babrou <github@ivan.computer>
* TestLabelValuesWithMatchers: Add test case
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* remove obsolete build tag
Signed-off-by: tyltr <tylitianrui@126.com>
* Upgrade some golang dependencies for resty 2.11
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
* Native Histograms: support `native_histogram_min_bucket_factor` in scrape_config (#13222)
Native Histograms: support native_histogram_min_bucket_factor in scrape_config
---------
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram (#13392)
Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Minor fixes to otlp vendor update script
Signed-off-by: Goutham <gouthamve@gmail.com>
* build(deps): bump github.com/hetznercloud/hcloud-go/v2
Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.4.0 to 2.6.0.
- [Release notes](https://github.com/hetznercloud/hcloud-go/releases)
- [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.4.0...v2.6.0)
---
updated-dependencies:
- dependency-name: github.com/hetznercloud/hcloud-go/v2
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* Enhanced visibility for `promtool test rules` with JSON colored formatting (#13342)
* Added diff flag for unit test to improvise readability & debugging
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Removed blank spaces
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed linting error
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Added cli flags to documentation
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Revert unrrelated linting fixes
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed review suggestions
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Cleanup
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
---------
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* storage: skip merging when no remote storage configured
Prometheus is hard-coded to use a fanout storage between TSDB and
a remote storage which by default is empty.
This change detects the empty storage and skips merging between
result sets, which would make `Select()` sort results.
Bottom line: we skip a sort unless there really is some remote storage
configured.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Remove csmarchbanks from remote write owners (#13432)
I have not had the time to keep up with remote write and have no plans
to work on it in the near future so I am withdrawing my maintainership
of that part of the codebase. I continue to focus on client_python.
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
* add more context cancellation check at evaluation time
Signed-off-by: Ben Ye <benye@amazon.com>
* Optimize label values with matchers by taking shortcuts (#13426)
Don't calculate postings beforehand: we may not need them. If all
matchers are for the requested label, we can just filter its values.
Also, if there are no values at all, no need to run any kind of
logic.
Also add more labelValuesWithMatchers benchmarks
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Add automatic memory limit handling
Enable automatic detection of memory limits and configure GOMEMLIMIT to
match.
* Also includes a flag to allow controlling the reserved ratio.
Signed-off-by: SuperQ <superq@gmail.com>
* Update OSSF badge link (#13433)
Provide a more user friendly interface
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
* SD Managers taking over responsibility for registration of debug metrics (#13375)
SD Managers take over responsibility for SD metrics registration
---------
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Optimize histogram iterators (#13340)
Optimize histogram iterators
Histogram iterators allocate new objects in the AtHistogram and
AtFloatHistogram methods, which makes calculating rates over long
ranges expensive.
In #13215 we allowed an existing object to be reused
when converting an integer histogram to a float histogram. This commit follows
the same idea and allows injecting an existing object in the AtHistogram and
AtFloatHistogram methods. When the injected value is nil, iterators allocate
new histograms, otherwise they populate and return the injected object.
The commit also adds a CopyTo method to Histogram and FloatHistogram which
is used in the BufferedIterator to overwrite items in the ring instead of making
new copies.
Note that a specialized HPoint pool is needed for all of this to work
(`matrixSelectorHPool`).
---------
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
* doc: Mark `mad_over_time` as experimental (#13440)
We forgot to do that in
https://github.com/prometheus/prometheus/pull/13059
Signed-off-by: beorn7 <beorn@grafana.com>
* Change metric label for Puppetdb from 'http' to 'puppetdb'
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
* mirror metrics.proto change & generate code
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
* TestHeadLabelValuesWithMatchers: Add test case (#13414)
Add test case to TestHeadLabelValuesWithMatchers, while fixing a couple
of typos in other test cases. Also enclosing some implicit sub-tests in a
`t.Run` call to make them explicitly sub-tests.
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* update all go dependencies (#13438)
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
* build(deps): bump the k8s-io group with 2 updates (#13454)
Bumps the k8s-io group with 2 updates: [k8s.io/api](https://github.com/kubernetes/api) and [k8s.io/client-go](https://github.com/kubernetes/client-go).
Updates `k8s.io/api` from 0.28.4 to 0.29.1
- [Commits](https://github.com/kubernetes/api/compare/v0.28.4...v0.29.1)
Updates `k8s.io/client-go` from 0.28.4 to 0.29.1
- [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/kubernetes/client-go/compare/v0.28.4...v0.29.1)
---
updated-dependencies:
- dependency-name: k8s.io/api
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
- dependency-name: k8s.io/client-go
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump the go-opentelemetry-io group with 1 update (#13453)
Bumps the go-opentelemetry-io group with 1 update: [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector).
Updates `go.opentelemetry.io/collector/semconv` from 0.92.0 to 0.93.0
- [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.92.0...v0.93.0)
---
updated-dependencies:
- dependency-name: go.opentelemetry.io/collector/semconv
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: go-opentelemetry-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump actions/upload-artifact from 3.1.3 to 4.0.0 (#13355)
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3.1.3 to 4.0.0.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/a8a3f3ad30e3422c9c7b888a15615d19a852ae32...c7d193f32edcb7bfad88892161225aeda64e9392)
---
updated-dependencies:
- dependency-name: actions/upload-artifact
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump bufbuild/buf-push-action (#13357)
Bumps [bufbuild/buf-push-action](https://github.com/bufbuild/buf-push-action) from 342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 to a654ff18effe4641ebea4a4ce242c49800728459.
- [Release notes](https://github.com/bufbuild/buf-push-action/releases)
- [Commits](https://github.com/bufbuild/buf-push-action/compare/342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1...a654ff18effe4641ebea4a4ce242c49800728459)
---
updated-dependencies:
- dependency-name: bufbuild/buf-push-action
dependency-type: direct:production
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* Labels: Add DropMetricName function, used in PromQL (#13446)
This function is called very frequently when executing PromQL functions,
and we can do it much more efficiently inside Labels.
In the common case that `__name__` comes first in the labels, we simply
re-point to start at the next label, which is nearly free.
`DropMetricName` is now so cheap I removed the cache - benchmarks show
everything still goes faster.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: simplify internal series delete function (#13261)
Lifting an optimisation from Agent code, `seriesHashmap.del` can use
the unique series reference, doesn't need to check Labels.
Also streamline the logic for deleting from `unique` and `conflicts` maps,
and add some comments to help the next person.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* otlptranslator/update-copy.sh: Fix sed command lines
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Rollback k8s.io requirements (#13462)
Rollback k8s.io Go modules to v0.28.6 to avoid forcing upgrade of Go to
1.21. This allows us to keep compatibility with the currently supported
upstream Go releases.
Signed-off-by: SuperQ <superq@gmail.com>
* Make update-copy.sh work for both OSX and GNU sed
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Name @beorn7 and @krajorama as maintainers for native histograms
I have been the de-facto maintainer for native histograms from the
beginning. So let's put this into MAINTAINERS.md.
In addition, I hereby proposose George Krajcsovits AKA Krajo as a
co-maintainer. He has contributed a lot of native histogram code, but
more importantly, he has contributed substantially to reviewing other
contributors' native histogram code, up to a point where I was merely
rubberstamping the PRs he had already reviewed. I'm confident that he
is ready to to be granted commit rights as outlined in the
"Maintainers" section of the governance:
https://prometheus.io/governance/#maintainers
According to the same section of the governance, I will announce the
proposed change on the developers mailing list and will give some time
for lazy consensus before merging this PR.
Signed-off-by: beorn7 <beorn@grafana.com>
* ui/fix: correct url handling for stacked graphs (#13460)
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
* tsdb: use cheaper Mutex on series
Mutex is 8 bytes; RWMutex is 24 bytes and much more complicated. Since
`RLock` is only used in two places, `UpdateMetadata` and `Delete`,
neither of which are hotspots, we should use the cheaper one.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Fix last_over_time for native histograms
The last_over_time retains a histogram sample without making a copy.
This sample is now coming from the buffered iterator used for windowing functions,
and can be reused for reading subsequent samples as the iterator progresses.
I would propose copying the sample in the last_over_time function, similar to
how it is done for rate, sum_over_time and others.
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
* Implementation
NOTE:
Rebased from main after refactor in #13014
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Add feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactor concurrency control
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Optimising dependencies/dependents funcs to not produce new slices each request
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Rename flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring for performance, and to allow controller to be overridden
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Block until all rules, both sync & async, have completed evaluating
Updated & added tests
Review feedback nits
Return empty map if not indeterminate
Use highWatermark to track inflight requests counter
Appease the linter
Clarify feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Fix typo in CLI flag description
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Fixed auto-generated doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improve doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Simplify the design to update concurrency controller once the rule evaluation has done
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Add more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Added more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improved RuleConcurrencyController interface doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Introduced sequentialRuleEvalController
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Remove superfluous nil check in Group.metrics
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* api: Serialize discovered and target labels into JSON directly (#13469)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* api: Serialize discovered labels into JSON directly in dropped targets (#13484)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* Add ShardedPostings() support to TSDB (#10421)
This PR is a reference implementation of the proposal described in #10420.
In addition to what described in #10420, in this PR I've introduced labels.StableHash(). The idea is to offer an hashing function which doesn't change over time, and that's used by query sharding in order to get a stable behaviour over time. The implementation of labels.StableHash() is the hashing function used by Prometheus before stringlabels, and what's used by Grafana Mimir for query sharding (because built before stringlabels was a thing).
Follow up work
As mentioned in #10420, if this PR is accepted I'm also open to upload another foundamental piece used by Grafana Mimir query sharding to accelerate the query execution: an optional, configurable and fast in-memory cache for the series hashes.
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* storage/remote: document why two benchmarks are skipped
One was silently doing nothing; one was doing something but the work
didn't go up linearly with iteration count.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Pod status changes not discovered by Kube Endpoints SD (#13337)
* fix(discovery/kubernetes/endpoints): react to changes on Pods because some modifications can occur on them without triggering an update on the related Endpoints (The Pod phase changing from Pending to Running e.g.).
---------
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
* Small improvements, add const, remove copypasta (#8106)
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
* Proposal to improve FPointSlice and HPointSlice allocation. (#13448)
* Reusing points slice from previous series when the slice is under utilized
* Adding comments on the bench test
Signed-off-by: Alan Protasio <alanprot@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* go mod tidy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
---------
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
Signed-off-by: Erik Sommer <ersotech@posteo.de>
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Signed-off-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: tyltr <tylitianrui@126.com>
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Signed-off-by: Ivan Babrou <github@ivan.computer>
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Signed-off-by: Goutham <gouthamve@gmail.com>
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
Signed-off-by: Ben Ye <benye@amazon.com>
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Signed-off-by: SuperQ <superq@gmail.com>
Signed-off-by: Ben Kochie <superq@gmail.com>
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Signed-off-by: beorn7 <beorn@grafana.com>
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
Signed-off-by: Alan Protasio <alanprot@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
Co-authored-by: Julian Wiedmann <jwi@linux.ibm.com>
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
Co-authored-by: Erik Sommer <ersotech@posteo.de>
Co-authored-by: Linas Medziunas <linas.medziunas@gmail.com>
Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
Co-authored-by: Arianna Vespri <arianna.vespri@yahoo.it>
Co-authored-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: daniel-resdiary <109083091+daniel-resdiary@users.noreply.github.com>
Co-authored-by: Daniel Kerbel <nmdanny@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jan Fajerski <jfajersk@redhat.com>
Co-authored-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Marc Tudurí <marctc@protonmail.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Co-authored-by: Augustin Husson <husson.augustin@gmail.com>
Co-authored-by: Björn Rabenstein <beorn@grafana.com>
Co-authored-by: zenador <zenador@users.noreply.github.com>
Co-authored-by: gotjosh <josue.abreu@gmail.com>
Co-authored-by: Ben Kochie <superq@gmail.com>
Co-authored-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Co-authored-by: tyltr <tylitianrui@126.com>
Co-authored-by: Ted Robertson <10043369+tredondo@users.noreply.github.com>
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
Co-authored-by: Matthias Loibl <mail@matthiasloibl.com>
Co-authored-by: Ivan Babrou <github@ivan.computer>
Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com>
Co-authored-by: Israel Blancas <iblancasa@gmail.com>
Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Goutham <gouthamve@gmail.com>
Co-authored-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Co-authored-by: Chris Marchbanks <csmarchbanks@gmail.com>
Co-authored-by: Ben Ye <benye@amazon.com>
Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Co-authored-by: Paulin Todev <paulin.todev@gmail.com>
Co-authored-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: Yury Molodov <yurymolodov@gmail.com>
Co-authored-by: Danny Kopping <danny.kopping@grafana.com>
Co-authored-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
Co-authored-by: Mikhail Fesenko <proggga@gmail.com>
Co-authored-by: Alan Protasio <alanprot@gmail.com>
2024-02-02 10:38:50 -08:00
ts , h := it . AtHistogram ( nil )
2022-08-29 03:05:03 -07:00
samples = append ( samples , sample { t : ts , h : h } )
2022-12-28 00:55:07 -08:00
case chunkenc . ValFloatHistogram :
remote write 2.0: sync with `main` branch (#13510)
* consoles: exclude iowait and steal from CPU Utilisation
'iowait' and 'steal' indicate specific idle/wait states, which shouldn't
be counted into CPU Utilisation. Also see
https://github.com/prometheus-operator/kube-prometheus/pull/796 and
https://github.com/kubernetes-monitoring/kubernetes-mixin/pull/667.
Per the iostat man page:
%idle
Show the percentage of time that the CPU or CPUs were idle and the
system did not have an outstanding disk I/O request.
%iowait
Show the percentage of time that the CPU or CPUs were idle during
which the system had an outstanding disk I/O request.
%steal
Show the percentage of time spent in involuntary wait by the
virtual CPU or CPUs while the hypervisor was servicing another
virtual processor.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
* tsdb: shrink txRing with smaller integers
4 billion active transactions ought to be enough for anyone.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: create isolation transaction slice on demand
When Prometheus restarts it creates every series read in from the WAL,
but many of those series will be finished, and never receive any more
samples. By defering allocation of the txRing slice to when it is first
needed, we save 32 bytes per stale series.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* add cluster variable to Overview dashboard
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* promql: simplify Native Histogram arithmetics
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
* Cut 2.49.0-rc.0 (#13270)
* Cut 2.49.0-rc.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Removed the duplicate.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Add unit protobuf parser
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Go on adding protobuf parsing for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* ui: create a reproduction for https://github.com/prometheus/prometheus/issues/13292
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Get conditional right
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Get VM Scale Set NIC (#13283)
Calling `*armnetwork.InterfacesClient.Get()` doesn't work for Scale Set
VM NIC, because these use a different Resource ID format.
Use `*armnetwork.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface()`
instead. This needs both the scale set name and the instance ID, so
add an `InstanceID` field to the `virtualMachine` struct. `InstanceID`
is empty for a VM that isn't a ScaleSetVM.
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
* Cut v2.49.0-rc.1
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Delete debugging lines, amend error message for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Correct order in error message
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Consider storage.ErrTooOldSample as non-retryable
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
* scrape_test.go: Increase scrape interval in TestScrapeLoopCache to reduce potential flakiness
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Avoid creating string for suffix, consider counters without _total suffix
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* build(deps): bump github.com/prometheus/client_golang
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.17.0 to 1.18.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0)
---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump actions/setup-node from 3.8.1 to 4.0.1
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 3.8.1 to 4.0.1.
- [Release notes](https://github.com/actions/setup-node/releases)
- [Commits](https://github.com/actions/setup-node/compare/5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d...b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8)
---
updated-dependencies:
- dependency-name: actions/setup-node
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
* scripts: sort file list in embed directive
Otherwise the resulting string depends on find, which afaict depends on
the underlying filesystem. A stable file list make it easier to detect
UI changes in downstreams that need to track UI assets.
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
* Fix DataTableProps['data'] for resultType string
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* Fix handling of scalar and string in isHeatmapData
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* build(deps): bump github.com/influxdata/influxdb
Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.2 to 1.11.4.
- [Release notes](https://github.com/influxdata/influxdb/releases)
- [Commits](https://github.com/influxdata/influxdb/compare/v1.11.2...v1.11.4)
---
updated-dependencies:
- dependency-name: github.com/influxdata/influxdb
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump github.com/prometheus/prometheus
Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.48.0 to 0.48.1.
- [Release notes](https://github.com/prometheus/prometheus/releases)
- [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/prometheus/compare/v0.48.0...v0.48.1)
---
updated-dependencies:
- dependency-name: github.com/prometheus/prometheus
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* Bump client_golang to v1.18.0 (#13373)
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Drop old inmemory samples (#13002)
* Drop old inmemory samples
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Avoid copying timeseries when the feature is disabled
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Run gofmt
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Clarify docs
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Add more logging info
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Remove loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* optimize function and add tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Simplify filter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Update help info from metrics
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use metrics to keep track of drop elements during buildWriteRequest
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var in tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* pass time.Now as parameter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Change buildwriterequest during retries
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Revert "Remove loggers"
This reverts commit 54f91dfcae20488944162335ab4ad8be459df1ab.
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use log level debug for loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Fix linter
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove noisy debug-level logs; add 'reason' label to drop metrics
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove accidentally committed files
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Propagate logger to buildWriteRequest to log dropped data
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix docs comment
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Make drop reason more specific
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove unnecessary pass of logger
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Use snake_case for reason label
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix dropped samples metric
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
---------
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
* fix(discovery): allow requireUpdate util to timeout in discovery/file/file_test.go.
The loop ran indefinitely if the condition isn't met.
Before, each iteration created a new timer channel which was always outpaced by
the other timer channel with smaller duration.
minor detail: There was a memory leak: resources of the ~10 previous timers were
constantly kept. With the fix, we may keep the resources of one timer around for defaultWait
but this isn't worth the changes to make it right.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Merge pull request #13371 from kevinmingtarja/fix-isHeatmapData
ui: fix handling of scalar and string in isHeatmapData
* tsdb/{index,compact}: allow using custom postings encoding format (#13242)
* tsdb/{index,compact}: allow using custom postings encoding format
We would like to experiment with a different postings encoding format in
Thanos so in this change I am proposing adding another argument to
`NewWriter` which would allow users to change the format if needed.
Also, wire the leveled compactor so that it would be possible to change
the format there too.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb/compact: use a struct for leveled compactor options
As discussed on Slack, let's use a struct for the options in leveled
compactor.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: make changes after Bryan's review
- Make changes less intrusive
- Turn the postings encoder type into a function
- Add NewWriterWithEncoder()
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
---------
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0-rc.2
Signed-off-by: bwplotka <bwplotka@gmail.com>
* build(deps): bump actions/setup-go from 3.5.0 to 5.0.0 in /scripts (#13362)
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3.5.0 to 5.0.0.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/6edd4406fa81c3da01a34fa6f6343087c207a568...0c52d547c9bc32b1aa3301fd7a9cb496313a4491)
---
updated-dependencies:
- dependency-name: actions/setup-go
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump github/codeql-action from 2.22.8 to 3.22.12 (#13358)
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.22.8 to 3.22.12.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/407ffafae6a767df3e0230c3df91b6443ae8df75...012739e5082ff0c22ca6d6ab32e07c36df03c4a4)
---
updated-dependencies:
- dependency-name: github/codeql-action
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* put @nexucis has a release shepherd (#13383)
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
* Add analyze histograms command to promtool (#12331)
Add `query analyze` command to promtool
This command analyzes the buckets of classic and native histograms,
based on data queried from the Prometheus query API, i.e. it
doesn't require direct access to the TSDB files.
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* included instance in all necessary descriptions
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* tsdb/compact: fix passing merge func
Fixing a very small logical problem I've introduced :(.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: add enable overlapping compaction
This functionality is needed in downstream projects because they have a
separate component that does compaction.
Upstreaming
https://github.com/grafana/mimir-prometheus/blob/7c8e9a2a76fc729e9078889782928b2fdfe240e9/tsdb/compact.go#L323-L325.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* promtool: allow setting multiple matchers to "promtool tsdb dump" command. (#13296)
Conditions are ANDed inside the same matcher but matchers are ORed
Including unit tests for "promtool tsdb dump".
Refactor some matchers scraping utils.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Fixed changelog
Signed-off-by: bwplotka <bwplotka@gmail.com>
* tsdb/main: wire "EnableOverlappingCompaction" to tsdb.Options (#13398)
This added the https://github.com/prometheus/prometheus/pull/13393
"EnableOverlappingCompaction" parameter to the compactor code but not to
the tsdb.Options. I forgot about that. Add it to `tsdb.Options` too and
set it to `true` in Prometheus.
Copy/paste the description from
https://github.com/prometheus/prometheus/pull/13393#issuecomment-1891787986
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Issue #13268: fix quality value in accept header
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
* Cut 2.49.1 with scrape q= bugfix.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Cut 2.49.1 web package.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Restore more efficient version of NewPossibleNonCounterInfo annotation (#13022)
Restore more efficient version of NewPossibleNonCounterInfo annotation
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Fix regressions introduced by #13242
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* fix slice copy in 1.20 (#13389)
The slices package is added to the standard library in Go 1.21;
we need to import from the exp area to maintain compatibility with Go 1.20.
Signed-off-by: tyltr <tylitianrui@126.com>
* Docs: Query Basics: link to rate (#10538)
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
* chore(kubernetes): check preconditions earlier and avoid unnecessary checks or iterations
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Examples: link to `rate` for new users (#10535)
* Examples: link to `rate` for new users
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
* promql: use natural sort in sort_by_label and sort_by_label_desc (#13411)
These functions are intended for humans, as robots can already sort the results
however they please. Humans like things sorted "naturally":
* https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
A similar thing has been done to Grafana, which is also used by humans:
* https://github.com/grafana/grafana/pull/78024
* https://github.com/grafana/grafana/pull/78494
Signed-off-by: Ivan Babrou <github@ivan.computer>
* TestLabelValuesWithMatchers: Add test case
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* remove obsolete build tag
Signed-off-by: tyltr <tylitianrui@126.com>
* Upgrade some golang dependencies for resty 2.11
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
* Native Histograms: support `native_histogram_min_bucket_factor` in scrape_config (#13222)
Native Histograms: support native_histogram_min_bucket_factor in scrape_config
---------
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram (#13392)
Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Minor fixes to otlp vendor update script
Signed-off-by: Goutham <gouthamve@gmail.com>
* build(deps): bump github.com/hetznercloud/hcloud-go/v2
Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.4.0 to 2.6.0.
- [Release notes](https://github.com/hetznercloud/hcloud-go/releases)
- [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.4.0...v2.6.0)
---
updated-dependencies:
- dependency-name: github.com/hetznercloud/hcloud-go/v2
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* Enhanced visibility for `promtool test rules` with JSON colored formatting (#13342)
* Added diff flag for unit test to improvise readability & debugging
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Removed blank spaces
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed linting error
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Added cli flags to documentation
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Revert unrrelated linting fixes
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed review suggestions
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Cleanup
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
---------
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* storage: skip merging when no remote storage configured
Prometheus is hard-coded to use a fanout storage between TSDB and
a remote storage which by default is empty.
This change detects the empty storage and skips merging between
result sets, which would make `Select()` sort results.
Bottom line: we skip a sort unless there really is some remote storage
configured.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Remove csmarchbanks from remote write owners (#13432)
I have not had the time to keep up with remote write and have no plans
to work on it in the near future so I am withdrawing my maintainership
of that part of the codebase. I continue to focus on client_python.
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
* add more context cancellation check at evaluation time
Signed-off-by: Ben Ye <benye@amazon.com>
* Optimize label values with matchers by taking shortcuts (#13426)
Don't calculate postings beforehand: we may not need them. If all
matchers are for the requested label, we can just filter its values.
Also, if there are no values at all, no need to run any kind of
logic.
Also add more labelValuesWithMatchers benchmarks
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Add automatic memory limit handling
Enable automatic detection of memory limits and configure GOMEMLIMIT to
match.
* Also includes a flag to allow controlling the reserved ratio.
Signed-off-by: SuperQ <superq@gmail.com>
* Update OSSF badge link (#13433)
Provide a more user friendly interface
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
* SD Managers taking over responsibility for registration of debug metrics (#13375)
SD Managers take over responsibility for SD metrics registration
---------
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Optimize histogram iterators (#13340)
Optimize histogram iterators
Histogram iterators allocate new objects in the AtHistogram and
AtFloatHistogram methods, which makes calculating rates over long
ranges expensive.
In #13215 we allowed an existing object to be reused
when converting an integer histogram to a float histogram. This commit follows
the same idea and allows injecting an existing object in the AtHistogram and
AtFloatHistogram methods. When the injected value is nil, iterators allocate
new histograms, otherwise they populate and return the injected object.
The commit also adds a CopyTo method to Histogram and FloatHistogram which
is used in the BufferedIterator to overwrite items in the ring instead of making
new copies.
Note that a specialized HPoint pool is needed for all of this to work
(`matrixSelectorHPool`).
---------
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
* doc: Mark `mad_over_time` as experimental (#13440)
We forgot to do that in
https://github.com/prometheus/prometheus/pull/13059
Signed-off-by: beorn7 <beorn@grafana.com>
* Change metric label for Puppetdb from 'http' to 'puppetdb'
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
* mirror metrics.proto change & generate code
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
* TestHeadLabelValuesWithMatchers: Add test case (#13414)
Add test case to TestHeadLabelValuesWithMatchers, while fixing a couple
of typos in other test cases. Also enclosing some implicit sub-tests in a
`t.Run` call to make them explicitly sub-tests.
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* update all go dependencies (#13438)
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
* build(deps): bump the k8s-io group with 2 updates (#13454)
Bumps the k8s-io group with 2 updates: [k8s.io/api](https://github.com/kubernetes/api) and [k8s.io/client-go](https://github.com/kubernetes/client-go).
Updates `k8s.io/api` from 0.28.4 to 0.29.1
- [Commits](https://github.com/kubernetes/api/compare/v0.28.4...v0.29.1)
Updates `k8s.io/client-go` from 0.28.4 to 0.29.1
- [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/kubernetes/client-go/compare/v0.28.4...v0.29.1)
---
updated-dependencies:
- dependency-name: k8s.io/api
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
- dependency-name: k8s.io/client-go
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump the go-opentelemetry-io group with 1 update (#13453)
Bumps the go-opentelemetry-io group with 1 update: [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector).
Updates `go.opentelemetry.io/collector/semconv` from 0.92.0 to 0.93.0
- [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.92.0...v0.93.0)
---
updated-dependencies:
- dependency-name: go.opentelemetry.io/collector/semconv
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: go-opentelemetry-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump actions/upload-artifact from 3.1.3 to 4.0.0 (#13355)
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3.1.3 to 4.0.0.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/a8a3f3ad30e3422c9c7b888a15615d19a852ae32...c7d193f32edcb7bfad88892161225aeda64e9392)
---
updated-dependencies:
- dependency-name: actions/upload-artifact
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump bufbuild/buf-push-action (#13357)
Bumps [bufbuild/buf-push-action](https://github.com/bufbuild/buf-push-action) from 342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 to a654ff18effe4641ebea4a4ce242c49800728459.
- [Release notes](https://github.com/bufbuild/buf-push-action/releases)
- [Commits](https://github.com/bufbuild/buf-push-action/compare/342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1...a654ff18effe4641ebea4a4ce242c49800728459)
---
updated-dependencies:
- dependency-name: bufbuild/buf-push-action
dependency-type: direct:production
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* Labels: Add DropMetricName function, used in PromQL (#13446)
This function is called very frequently when executing PromQL functions,
and we can do it much more efficiently inside Labels.
In the common case that `__name__` comes first in the labels, we simply
re-point to start at the next label, which is nearly free.
`DropMetricName` is now so cheap I removed the cache - benchmarks show
everything still goes faster.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: simplify internal series delete function (#13261)
Lifting an optimisation from Agent code, `seriesHashmap.del` can use
the unique series reference, doesn't need to check Labels.
Also streamline the logic for deleting from `unique` and `conflicts` maps,
and add some comments to help the next person.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* otlptranslator/update-copy.sh: Fix sed command lines
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Rollback k8s.io requirements (#13462)
Rollback k8s.io Go modules to v0.28.6 to avoid forcing upgrade of Go to
1.21. This allows us to keep compatibility with the currently supported
upstream Go releases.
Signed-off-by: SuperQ <superq@gmail.com>
* Make update-copy.sh work for both OSX and GNU sed
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Name @beorn7 and @krajorama as maintainers for native histograms
I have been the de-facto maintainer for native histograms from the
beginning. So let's put this into MAINTAINERS.md.
In addition, I hereby proposose George Krajcsovits AKA Krajo as a
co-maintainer. He has contributed a lot of native histogram code, but
more importantly, he has contributed substantially to reviewing other
contributors' native histogram code, up to a point where I was merely
rubberstamping the PRs he had already reviewed. I'm confident that he
is ready to to be granted commit rights as outlined in the
"Maintainers" section of the governance:
https://prometheus.io/governance/#maintainers
According to the same section of the governance, I will announce the
proposed change on the developers mailing list and will give some time
for lazy consensus before merging this PR.
Signed-off-by: beorn7 <beorn@grafana.com>
* ui/fix: correct url handling for stacked graphs (#13460)
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
* tsdb: use cheaper Mutex on series
Mutex is 8 bytes; RWMutex is 24 bytes and much more complicated. Since
`RLock` is only used in two places, `UpdateMetadata` and `Delete`,
neither of which are hotspots, we should use the cheaper one.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Fix last_over_time for native histograms
The last_over_time retains a histogram sample without making a copy.
This sample is now coming from the buffered iterator used for windowing functions,
and can be reused for reading subsequent samples as the iterator progresses.
I would propose copying the sample in the last_over_time function, similar to
how it is done for rate, sum_over_time and others.
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
* Implementation
NOTE:
Rebased from main after refactor in #13014
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Add feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactor concurrency control
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Optimising dependencies/dependents funcs to not produce new slices each request
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Rename flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring for performance, and to allow controller to be overridden
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Block until all rules, both sync & async, have completed evaluating
Updated & added tests
Review feedback nits
Return empty map if not indeterminate
Use highWatermark to track inflight requests counter
Appease the linter
Clarify feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Fix typo in CLI flag description
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Fixed auto-generated doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improve doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Simplify the design to update concurrency controller once the rule evaluation has done
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Add more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Added more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improved RuleConcurrencyController interface doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Introduced sequentialRuleEvalController
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Remove superfluous nil check in Group.metrics
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* api: Serialize discovered and target labels into JSON directly (#13469)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* api: Serialize discovered labels into JSON directly in dropped targets (#13484)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* Add ShardedPostings() support to TSDB (#10421)
This PR is a reference implementation of the proposal described in #10420.
In addition to what described in #10420, in this PR I've introduced labels.StableHash(). The idea is to offer an hashing function which doesn't change over time, and that's used by query sharding in order to get a stable behaviour over time. The implementation of labels.StableHash() is the hashing function used by Prometheus before stringlabels, and what's used by Grafana Mimir for query sharding (because built before stringlabels was a thing).
Follow up work
As mentioned in #10420, if this PR is accepted I'm also open to upload another foundamental piece used by Grafana Mimir query sharding to accelerate the query execution: an optional, configurable and fast in-memory cache for the series hashes.
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* storage/remote: document why two benchmarks are skipped
One was silently doing nothing; one was doing something but the work
didn't go up linearly with iteration count.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Pod status changes not discovered by Kube Endpoints SD (#13337)
* fix(discovery/kubernetes/endpoints): react to changes on Pods because some modifications can occur on them without triggering an update on the related Endpoints (The Pod phase changing from Pending to Running e.g.).
---------
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
* Small improvements, add const, remove copypasta (#8106)
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
* Proposal to improve FPointSlice and HPointSlice allocation. (#13448)
* Reusing points slice from previous series when the slice is under utilized
* Adding comments on the bench test
Signed-off-by: Alan Protasio <alanprot@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* go mod tidy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
---------
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
Signed-off-by: Erik Sommer <ersotech@posteo.de>
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Signed-off-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: tyltr <tylitianrui@126.com>
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Signed-off-by: Ivan Babrou <github@ivan.computer>
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Signed-off-by: Goutham <gouthamve@gmail.com>
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
Signed-off-by: Ben Ye <benye@amazon.com>
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Signed-off-by: SuperQ <superq@gmail.com>
Signed-off-by: Ben Kochie <superq@gmail.com>
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Signed-off-by: beorn7 <beorn@grafana.com>
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
Signed-off-by: Alan Protasio <alanprot@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
Co-authored-by: Julian Wiedmann <jwi@linux.ibm.com>
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
Co-authored-by: Erik Sommer <ersotech@posteo.de>
Co-authored-by: Linas Medziunas <linas.medziunas@gmail.com>
Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
Co-authored-by: Arianna Vespri <arianna.vespri@yahoo.it>
Co-authored-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: daniel-resdiary <109083091+daniel-resdiary@users.noreply.github.com>
Co-authored-by: Daniel Kerbel <nmdanny@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jan Fajerski <jfajersk@redhat.com>
Co-authored-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Marc Tudurí <marctc@protonmail.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Co-authored-by: Augustin Husson <husson.augustin@gmail.com>
Co-authored-by: Björn Rabenstein <beorn@grafana.com>
Co-authored-by: zenador <zenador@users.noreply.github.com>
Co-authored-by: gotjosh <josue.abreu@gmail.com>
Co-authored-by: Ben Kochie <superq@gmail.com>
Co-authored-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Co-authored-by: tyltr <tylitianrui@126.com>
Co-authored-by: Ted Robertson <10043369+tredondo@users.noreply.github.com>
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
Co-authored-by: Matthias Loibl <mail@matthiasloibl.com>
Co-authored-by: Ivan Babrou <github@ivan.computer>
Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com>
Co-authored-by: Israel Blancas <iblancasa@gmail.com>
Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Goutham <gouthamve@gmail.com>
Co-authored-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Co-authored-by: Chris Marchbanks <csmarchbanks@gmail.com>
Co-authored-by: Ben Ye <benye@amazon.com>
Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Co-authored-by: Paulin Todev <paulin.todev@gmail.com>
Co-authored-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: Yury Molodov <yurymolodov@gmail.com>
Co-authored-by: Danny Kopping <danny.kopping@grafana.com>
Co-authored-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
Co-authored-by: Mikhail Fesenko <proggga@gmail.com>
Co-authored-by: Alan Protasio <alanprot@gmail.com>
2024-02-02 10:38:50 -08:00
ts , fh := it . AtFloatHistogram ( nil )
2022-12-28 00:55:07 -08:00
samples = append ( samples , sample { t : ts , fh : fh } )
2022-08-29 03:05:03 -07:00
default :
t . Fatalf ( "unknown sample type in query %s" , typ . String ( ) )
}
2017-04-20 06:24:35 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , it . Err ( ) )
2017-04-20 06:24:35 -07:00
2020-02-12 11:22:27 -08:00
if len ( samples ) == 0 {
continue
}
2017-04-20 06:24:35 -07:00
name := series . Labels ( ) . String ( )
result [ name ] = samples
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , ss . Err ( ) )
2023-12-07 03:35:01 -08:00
require . Empty ( t , ss . Warnings ( ) )
2017-08-28 15:39:17 -07:00
return result
2017-04-20 06:24:35 -07:00
}
2023-02-21 00:32:59 -08:00
// queryAndExpandChunks runs a matcher query against the querier and fully expands its data into samples.
2023-08-24 06:21:17 -07:00
func queryAndExpandChunks ( t testing . TB , q storage . ChunkQuerier , matchers ... * labels . Matcher ) map [ string ] [ ] [ ] chunks . Sample {
2023-02-21 00:32:59 -08:00
s := queryChunks ( t , q , matchers ... )
2023-08-24 06:21:17 -07:00
res := make ( map [ string ] [ ] [ ] chunks . Sample )
2023-02-21 00:32:59 -08:00
for k , v := range s {
2023-08-24 06:21:17 -07:00
var samples [ ] [ ] chunks . Sample
2023-02-21 00:32:59 -08:00
for _ , chk := range v {
sam , err := storage . ExpandSamples ( chk . Chunk . Iterator ( nil ) , nil )
require . NoError ( t , err )
samples = append ( samples , sam )
}
res [ k ] = samples
}
return res
}
// queryChunks runs a matcher query against the querier and expands its data.
2020-07-31 08:03:02 -07:00
func queryChunks ( t testing . TB , q storage . ChunkQuerier , matchers ... * labels . Matcher ) map [ string ] [ ] chunks . Meta {
2023-09-12 03:37:38 -07:00
ss := q . Select ( context . Background ( ) , false , nil , matchers ... )
2020-07-31 08:03:02 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , q . Close ( ) )
2020-07-31 08:03:02 -07:00
} ( )
2022-09-20 10:16:45 -07:00
var it chunks . Iterator
2020-07-31 08:03:02 -07:00
result := map [ string ] [ ] chunks . Meta { }
for ss . Next ( ) {
series := ss . At ( )
chks := [ ] chunks . Meta { }
2022-09-20 10:16:45 -07:00
it = series . Iterator ( it )
2020-07-31 08:03:02 -07:00
for it . Next ( ) {
chks = append ( chks , it . At ( ) )
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , it . Err ( ) )
2020-07-31 08:03:02 -07:00
if len ( chks ) == 0 {
continue
}
name := series . Labels ( ) . String ( )
result [ name ] = chks
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , ss . Err ( ) )
2023-12-07 03:35:01 -08:00
require . Empty ( t , ss . Warnings ( ) )
2020-07-31 08:03:02 -07:00
return result
}
2018-05-28 13:00:36 -07:00
// Ensure that blocks are held in memory in their time order
// and not in ULID order as they are read from the directory.
func TestDB_reloadOrder ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2018-05-28 13:00:36 -07:00
2018-12-29 03:20:51 -08:00
metas := [ ] BlockMeta {
{ MinTime : 90 , MaxTime : 100 } ,
{ MinTime : 70 , MaxTime : 80 } ,
{ MinTime : 100 , MaxTime : 110 } ,
2018-05-28 13:00:36 -07:00
}
for _ , m := range metas {
2019-01-28 03:24:49 -08:00
createBlock ( t , db . Dir ( ) , genSeries ( 1 , 1 , m . MinTime , m . MaxTime ) )
2018-05-28 13:00:36 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . reloadBlocks ( ) )
2018-05-28 13:00:36 -07:00
blocks := db . Blocks ( )
2023-12-07 03:35:01 -08:00
require . Len ( t , blocks , 3 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , metas [ 1 ] . MinTime , blocks [ 0 ] . Meta ( ) . MinTime )
require . Equal ( t , metas [ 1 ] . MaxTime , blocks [ 0 ] . Meta ( ) . MaxTime )
require . Equal ( t , metas [ 0 ] . MinTime , blocks [ 1 ] . Meta ( ) . MinTime )
require . Equal ( t , metas [ 0 ] . MaxTime , blocks [ 1 ] . Meta ( ) . MaxTime )
require . Equal ( t , metas [ 2 ] . MinTime , blocks [ 2 ] . Meta ( ) . MinTime )
require . Equal ( t , metas [ 2 ] . MaxTime , blocks [ 2 ] . Meta ( ) . MaxTime )
2018-05-28 13:00:36 -07:00
}
2017-04-18 09:22:13 -07:00
func TestDataAvailableOnlyAfterCommit ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-04-18 09:22:13 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2017-08-28 15:39:17 -07:00
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-04-18 09:22:13 -07:00
2023-09-12 03:37:38 -07:00
querier , err := db . Querier ( 0 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-11-18 11:53:33 -08:00
seriesSet := query ( t , querier , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2023-08-24 06:21:17 -07:00
require . Equal ( t , map [ string ] [ ] chunks . Sample { } , seriesSet )
2017-04-18 09:22:13 -07:00
err = app . Commit ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-04-18 09:22:13 -07:00
2023-09-12 03:37:38 -07:00
querier , err = db . Querier ( 0 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-04-18 09:22:13 -07:00
defer querier . Close ( )
2019-11-18 11:53:33 -08:00
seriesSet = query ( t , querier , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2017-08-28 15:39:17 -07:00
2023-08-24 06:21:17 -07:00
require . Equal ( t , map [ string ] [ ] chunks . Sample { ` { foo="bar"} ` : { sample { t : 0 , f : 0 } } } , seriesSet )
2017-04-18 09:22:13 -07:00
}
2020-11-19 05:00:47 -08:00
// TestNoPanicAfterWALCorruption ensures that querying the db after a WAL corruption doesn't cause a panic.
2020-07-21 00:02:13 -07:00
// https://github.com/prometheus/prometheus/issues/7548
2020-11-19 05:00:47 -08:00
func TestNoPanicAfterWALCorruption ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , & Options { WALSegmentSize : 32 * 1024 } , nil )
2020-07-21 00:02:13 -07:00
2020-09-20 10:42:01 -07:00
// Append until the first mmaped head chunk.
2020-07-21 00:02:13 -07:00
// This is to ensure that all samples can be read from the mmaped chunks when the WAL is corrupted.
2023-08-24 06:21:17 -07:00
var expSamples [ ] chunks . Sample
2020-07-21 00:02:13 -07:00
var maxt int64
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
2020-07-21 00:02:13 -07:00
{
2022-01-10 05:36:45 -08:00
// Appending 121 samples because on the 121st a new chunk will be created.
for i := 0 ; i < 121 ; i ++ {
2020-07-24 07:10:51 -07:00
app := db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , maxt , 0 )
2023-03-30 10:50:13 -07:00
expSamples = append ( expSamples , sample { t : maxt , f : 0 } )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2020-07-21 00:02:13 -07:00
maxt ++
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-07-21 00:02:13 -07:00
}
// Corrupt the WAL after the first sample of the series so that it has at least one sample and
// it is not garbage collected.
// The repair deletes all WAL records after the corrupted record and these are read from the mmaped chunk.
{
2022-04-27 02:24:36 -07:00
walFiles , err := os . ReadDir ( path . Join ( db . Dir ( ) , "wal" ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-10-22 01:06:44 -07:00
f , err := os . OpenFile ( path . Join ( db . Dir ( ) , "wal" , walFiles [ 0 ] . Name ( ) ) , os . O_RDWR , 0 o666 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2022-10-10 08:08:46 -07:00
r := wlog . NewReader ( bufio . NewReader ( f ) )
2020-10-29 02:43:23 -07:00
require . True ( t , r . Next ( ) , "reading the series record" )
require . True ( t , r . Next ( ) , "reading the first sample record" )
2020-07-21 00:02:13 -07:00
// Write an invalid record header to corrupt everything after the first wal sample.
_ , err = f . WriteAt ( [ ] byte { 99 } , r . Offset ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-21 00:02:13 -07:00
f . Close ( )
}
// Query the data.
{
2021-06-05 07:29:32 -07:00
db , err := Open ( db . Dir ( ) , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-21 00:02:13 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-07-21 00:02:13 -07:00
} ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . walCorruptionsTotal ) , "WAL corruption count mismatch" )
2020-07-21 00:02:13 -07:00
2023-09-12 03:37:38 -07:00
querier , err := db . Querier ( 0 , maxt )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-21 00:02:13 -07:00
seriesSet := query ( t , querier , labels . MustNewMatcher ( labels . MatchEqual , "" , "" ) )
// The last sample should be missing as it was after the WAL segment corruption.
2023-08-24 06:21:17 -07:00
require . Equal ( t , map [ string ] [ ] chunks . Sample { ` { foo="bar"} ` : expSamples [ 0 : len ( expSamples ) - 1 ] } , seriesSet )
2020-07-21 00:02:13 -07:00
}
}
2017-04-18 09:22:13 -07:00
func TestDataNotAvailableAfterRollback ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-04-18 09:22:13 -07:00
2020-07-24 07:10:51 -07:00
app := db . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-04-18 09:22:13 -07:00
err = app . Rollback ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-04-18 09:22:13 -07:00
2023-09-12 03:37:38 -07:00
querier , err := db . Querier ( 0 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-04-18 09:22:13 -07:00
defer querier . Close ( )
2019-11-18 11:53:33 -08:00
seriesSet := query ( t , querier , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2017-08-28 15:39:17 -07:00
2023-08-24 06:21:17 -07:00
require . Equal ( t , map [ string ] [ ] chunks . Sample { } , seriesSet )
2017-04-18 09:22:13 -07:00
}
2017-04-28 06:24:28 -07:00
func TestDBAppenderAddRef ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-04-28 06:24:28 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app1 := db . Appender ( ctx )
2017-04-28 06:24:28 -07:00
2021-02-18 04:07:00 -08:00
ref1 , err := app1 . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 123 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-04-28 06:24:28 -07:00
2017-09-05 02:45:18 -07:00
// Reference should already work before commit.
2022-03-09 14:17:29 -08:00
ref2 , err := app1 . Append ( ref1 , labels . EmptyLabels ( ) , 124 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
require . Equal ( t , ref1 , ref2 )
2017-05-17 07:43:01 -07:00
2017-05-18 07:09:30 -07:00
err = app1 . Commit ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-05-17 07:43:01 -07:00
2020-07-24 07:10:51 -07:00
app2 := db . Appender ( ctx )
2017-09-05 02:45:18 -07:00
// first ref should already work in next transaction.
2022-03-09 14:17:29 -08:00
ref3 , err := app2 . Append ( ref1 , labels . EmptyLabels ( ) , 125 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
require . Equal ( t , ref1 , ref3 )
2017-05-17 07:43:01 -07:00
2021-02-18 04:07:00 -08:00
ref4 , err := app2 . Append ( ref1 , labels . FromStrings ( "a" , "b" ) , 133 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
require . Equal ( t , ref1 , ref4 )
2017-09-05 02:45:18 -07:00
2017-04-28 06:24:28 -07:00
// Reference must be valid to add another sample.
2022-03-09 14:17:29 -08:00
ref5 , err := app2 . Append ( ref2 , labels . EmptyLabels ( ) , 143 , 2 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
require . Equal ( t , ref1 , ref5 )
2017-04-28 06:24:28 -07:00
2021-02-18 04:07:00 -08:00
// Missing labels & invalid refs should fail.
2022-03-09 14:17:29 -08:00
_ , err = app2 . Append ( 9999999 , labels . EmptyLabels ( ) , 1 , 1 )
2023-11-16 10:54:41 -08:00
require . ErrorIs ( t , err , ErrInvalidSample )
2017-08-28 15:39:17 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , app2 . Commit ( ) )
2017-08-28 15:39:17 -07:00
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( 0 , 200 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-10-09 06:21:46 -07:00
2019-11-18 11:53:33 -08:00
res := query ( t , q , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2017-08-28 15:39:17 -07:00
2023-08-24 06:21:17 -07:00
require . Equal ( t , map [ string ] [ ] chunks . Sample {
2018-05-07 05:39:54 -07:00
labels . FromStrings ( "a" , "b" ) . String ( ) : {
2023-03-30 10:50:13 -07:00
sample { t : 123 , f : 0 } ,
sample { t : 124 , f : 1 } ,
sample { t : 125 , f : 0 } ,
sample { t : 133 , f : 1 } ,
sample { t : 143 , f : 2 } ,
2017-08-28 15:39:17 -07:00
} ,
} , res )
2017-04-28 06:24:28 -07:00
}
2017-05-23 05:43:30 -07:00
2019-05-07 03:00:16 -07:00
func TestAppendEmptyLabelsIgnored ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-05-07 03:00:16 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-05-07 03:00:16 -07:00
} ( )
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app1 := db . Appender ( ctx )
2019-05-07 03:00:16 -07:00
2021-02-18 04:07:00 -08:00
ref1 , err := app1 . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 123 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-05-07 03:00:16 -07:00
2022-03-09 14:17:29 -08:00
// Add with empty label.
ref2 , err := app1 . Append ( 0 , labels . FromStrings ( "a" , "b" , "c" , "" ) , 124 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-05-07 03:00:16 -07:00
// Should be the same series.
2020-10-29 02:43:23 -07:00
require . Equal ( t , ref1 , ref2 )
2019-05-07 03:00:16 -07:00
err = app1 . Commit ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-05-07 03:00:16 -07:00
}
2017-05-23 05:43:30 -07:00
func TestDeleteSimple ( t * testing . T ) {
2023-09-13 06:43:06 -07:00
const numSamples int64 = 10
2017-05-23 05:43:30 -07:00
cases := [ ] struct {
2019-09-19 02:15:41 -07:00
Intervals tombstones . Intervals
2017-05-23 05:43:30 -07:00
remaint [ ] int64
} {
2019-01-08 09:08:41 -08:00
{
2019-09-19 02:15:41 -07:00
Intervals : tombstones . Intervals { { Mint : 0 , Maxt : 3 } } ,
2019-01-08 09:08:41 -08:00
remaint : [ ] int64 { 4 , 5 , 6 , 7 , 8 , 9 } ,
} ,
{
2019-09-19 02:15:41 -07:00
Intervals : tombstones . Intervals { { Mint : 1 , Maxt : 3 } } ,
2019-01-08 09:08:41 -08:00
remaint : [ ] int64 { 0 , 4 , 5 , 6 , 7 , 8 , 9 } ,
} ,
2017-05-23 05:43:30 -07:00
{
2019-09-19 02:15:41 -07:00
Intervals : tombstones . Intervals { { Mint : 1 , Maxt : 3 } , { Mint : 4 , Maxt : 7 } } ,
2017-05-23 05:43:30 -07:00
remaint : [ ] int64 { 0 , 8 , 9 } ,
} ,
2019-01-08 09:08:41 -08:00
{
2019-09-19 02:15:41 -07:00
Intervals : tombstones . Intervals { { Mint : 1 , Maxt : 3 } , { Mint : 4 , Maxt : 700 } } ,
2019-01-08 09:08:41 -08:00
remaint : [ ] int64 { 0 } ,
} ,
{ // This case is to ensure that labels and symbols are deleted.
2019-09-19 02:15:41 -07:00
Intervals : tombstones . Intervals { { Mint : 0 , Maxt : 9 } } ,
2019-01-08 09:08:41 -08:00
remaint : [ ] int64 { } ,
} ,
2017-05-23 05:43:30 -07:00
}
Outer :
for _ , c := range cases {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-02-08 03:26:28 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-02-08 03:26:28 -08:00
} ( )
2019-01-08 09:08:41 -08:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2019-01-08 09:08:41 -08:00
smpls := make ( [ ] float64 , numSamples )
for i := int64 ( 0 ) ; i < numSamples ; i ++ {
smpls [ i ] = rand . Float64 ( )
2022-03-09 14:17:29 -08:00
app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , i , smpls [ i ] )
2019-01-08 09:08:41 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2019-01-08 09:08:41 -08:00
2017-05-23 05:43:30 -07:00
// TODO(gouthamve): Reset the tombstones somehow.
// Delete the ranges.
2019-09-19 02:15:41 -07:00
for _ , r := range c . Intervals {
2023-09-13 06:43:06 -07:00
require . NoError ( t , db . Delete ( ctx , r . Mint , r . Maxt , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) ) )
2017-05-23 05:43:30 -07:00
}
// Compare the result.
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( 0 , numSamples )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-10-09 06:21:46 -07:00
2023-09-12 03:37:38 -07:00
res := q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2017-05-23 05:43:30 -07:00
2023-08-24 06:21:17 -07:00
expSamples := make ( [ ] chunks . Sample , 0 , len ( c . remaint ) )
2017-05-23 05:43:30 -07:00
for _ , ts := range c . remaint {
2021-11-28 23:54:23 -08:00
expSamples = append ( expSamples , sample { ts , smpls [ ts ] , nil , nil } )
2017-05-23 05:43:30 -07:00
}
2020-02-06 07:58:38 -08:00
expss := newMockSeriesSet ( [ ] storage . Series {
2020-07-31 08:03:02 -07:00
storage . NewListSeries ( labels . FromStrings ( "a" , "b" ) , expSamples ) ,
2017-05-23 05:43:30 -07:00
} )
for {
eok , rok := expss . Next ( ) , res . Next ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , eok , rok )
2017-05-23 05:43:30 -07:00
if ! eok {
2023-12-07 03:35:01 -08:00
require . Empty ( t , res . Warnings ( ) )
2017-05-23 05:43:30 -07:00
continue Outer
}
sexp := expss . At ( )
sres := res . At ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , sexp . Labels ( ) , sres . Labels ( ) )
2017-05-23 05:43:30 -07:00
2022-09-20 10:16:45 -07:00
smplExp , errExp := storage . ExpandSamples ( sexp . Iterator ( nil ) , nil )
smplRes , errRes := storage . ExpandSamples ( sres . Iterator ( nil ) , nil )
2017-05-23 05:43:30 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , errExp , errRes )
require . Equal ( t , smplExp , smplRes )
2017-05-23 05:43:30 -07:00
}
}
}
2017-08-28 15:39:17 -07:00
2022-12-28 00:55:07 -08:00
func TestAmendHistogramDatapointCausesError ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-08-28 15:39:17 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2022-03-09 14:17:29 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2022-03-09 14:17:29 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 0 , 0 )
2022-09-19 00:40:30 -07:00
require . NoError ( t , err )
2022-03-09 14:17:29 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 0 , 1 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrDuplicateSampleForTimestamp , err )
require . NoError ( t , app . Rollback ( ) )
2022-09-19 00:40:30 -07:00
h := histogram . Histogram {
Schema : 3 ,
2023-10-13 00:58:26 -07:00
Count : 52 ,
2022-09-19 00:40:30 -07:00
Sum : 2.7 ,
ZeroThreshold : 0.1 ,
ZeroCount : 42 ,
PositiveSpans : [ ] histogram . Span {
{ Offset : 0 , Length : 4 } ,
{ Offset : 10 , Length : 3 } ,
} ,
PositiveBuckets : [ ] int64 { 1 , 2 , - 2 , 1 , - 1 , 0 , 0 } ,
}
2023-11-29 06:15:57 -08:00
fh := h . ToFloat ( nil )
2022-09-19 00:40:30 -07:00
app = db . Appender ( ctx )
2022-12-28 00:55:07 -08:00
_ , err = app . AppendHistogram ( 0 , labels . FromStrings ( "a" , "c" ) , 0 , h . Copy ( ) , nil )
2022-09-19 00:40:30 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
app = db . Appender ( ctx )
2022-12-28 00:55:07 -08:00
_ , err = app . AppendHistogram ( 0 , labels . FromStrings ( "a" , "c" ) , 0 , h . Copy ( ) , nil )
2022-09-19 00:40:30 -07:00
require . NoError ( t , err )
h . Schema = 2
2022-12-28 00:55:07 -08:00
_ , err = app . AppendHistogram ( 0 , labels . FromStrings ( "a" , "c" ) , 0 , h . Copy ( ) , nil )
require . Equal ( t , storage . ErrDuplicateSampleForTimestamp , err )
require . NoError ( t , app . Rollback ( ) )
// Float histogram.
app = db . Appender ( ctx )
_ , err = app . AppendHistogram ( 0 , labels . FromStrings ( "a" , "d" ) , 0 , nil , fh . Copy ( ) )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
app = db . Appender ( ctx )
_ , err = app . AppendHistogram ( 0 , labels . FromStrings ( "a" , "d" ) , 0 , nil , fh . Copy ( ) )
require . NoError ( t , err )
fh . Schema = 2
_ , err = app . AppendHistogram ( 0 , labels . FromStrings ( "a" , "d" ) , 0 , nil , fh . Copy ( ) )
2022-09-19 00:40:30 -07:00
require . Equal ( t , storage . ErrDuplicateSampleForTimestamp , err )
require . NoError ( t , app . Rollback ( ) )
2017-08-28 15:39:17 -07:00
}
func TestDuplicateNaNDatapointNoAmendError ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-08-28 15:39:17 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2022-03-09 14:17:29 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 0 , math . NaN ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2022-03-09 14:17:29 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 0 , math . NaN ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-08-28 15:39:17 -07:00
}
func TestNonDuplicateNaNDatapointsCausesAmendError ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2020-02-10 15:15:01 -08:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2022-03-09 14:17:29 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 0 , math . Float64frombits ( 0x7ff0000000000001 ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2022-03-09 14:17:29 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 0 , math . Float64frombits ( 0x7ff0000000000002 ) )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrDuplicateSampleForTimestamp , err )
2017-08-28 15:39:17 -07:00
}
2020-03-01 23:18:05 -08:00
func TestEmptyLabelsetCausesError ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2020-03-01 23:18:05 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-03-01 23:18:05 -08:00
} ( )
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . Labels { } , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . Error ( t , err )
require . Equal ( t , "empty labelset: invalid sample" , err . Error ( ) )
2020-03-01 23:18:05 -08:00
}
2017-08-28 15:39:17 -07:00
func TestSkippingInvalidValuesInSameTxn ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-08-28 15:39:17 -07:00
// Append AmendedValue.
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2022-03-09 14:17:29 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 0 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2022-03-09 14:17:29 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 0 , 2 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
// Make sure the right value is stored.
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( 0 , 10 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-10-09 06:21:46 -07:00
2019-11-18 11:53:33 -08:00
ssMap := query ( t , q , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2017-08-28 15:39:17 -07:00
2023-08-24 06:21:17 -07:00
require . Equal ( t , map [ string ] [ ] chunks . Sample {
2021-11-28 23:54:23 -08:00
labels . New ( labels . Label { Name : "a" , Value : "b" } ) . String ( ) : { sample { 0 , 1 , nil , nil } } ,
2017-08-28 15:39:17 -07:00
} , ssMap )
// Append Out of Order Value.
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2022-03-09 14:17:29 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 10 , 3 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2022-03-09 14:17:29 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 7 , 5 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
2023-09-12 03:37:38 -07:00
q , err = db . Querier ( 0 , 10 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-10-09 06:21:46 -07:00
2019-11-18 11:53:33 -08:00
ssMap = query ( t , q , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2017-08-28 15:39:17 -07:00
2023-08-24 06:21:17 -07:00
require . Equal ( t , map [ string ] [ ] chunks . Sample {
2021-11-28 23:54:23 -08:00
labels . New ( labels . Label { Name : "a" , Value : "b" } ) . String ( ) : { sample { 0 , 1 , nil , nil } , sample { 10 , 3 , nil , nil } } ,
2017-08-28 15:39:17 -07:00
} , ssMap )
}
2017-10-03 05:06:26 -07:00
func TestDB_Snapshot ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2017-10-03 05:06:26 -07:00
// append data
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2017-10-03 05:06:26 -07:00
mint := int64 ( 1414141414000 )
for i := 0 ; i < 1000 ; i ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , mint + int64 ( i ) , 1.0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-10-03 05:06:26 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2017-10-03 05:06:26 -07:00
// create snapshot
2022-01-22 01:55:01 -08:00
snap := t . TempDir ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Snapshot ( snap , true ) )
require . NoError ( t , db . Close ( ) )
2017-10-03 05:06:26 -07:00
// reopen DB from snapshot
2022-01-22 01:55:01 -08:00
db , err := Open ( snap , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , db . Close ( ) ) } ( )
2017-10-03 05:06:26 -07:00
2023-09-12 03:37:38 -07:00
querier , err := db . Querier ( mint , mint + 1000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , querier . Close ( ) ) } ( )
2017-10-03 05:06:26 -07:00
// sum values
2023-09-12 03:37:38 -07:00
seriesSet := querier . Select ( context . Background ( ) , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2022-09-20 10:16:45 -07:00
var series chunkenc . Iterator
2017-10-03 05:06:26 -07:00
sum := 0.0
for seriesSet . Next ( ) {
2022-09-20 10:16:45 -07:00
series = seriesSet . At ( ) . Iterator ( series )
2021-11-28 23:54:23 -08:00
for series . Next ( ) == chunkenc . ValFloat {
2017-10-03 05:06:26 -07:00
_ , v := series . At ( )
sum += v
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , series . Err ( ) )
2017-10-03 05:06:26 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , seriesSet . Err ( ) )
2023-12-07 03:35:01 -08:00
require . Empty ( t , seriesSet . Warnings ( ) )
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1000.0 , sum )
2017-10-03 05:06:26 -07:00
}
2019-07-03 03:47:31 -07:00
// TestDB_Snapshot_ChunksOutsideOfCompactedRange ensures that a snapshot removes chunks samples
// that are outside the set block time range.
// See https://github.com/prometheus/prometheus/issues/5105
func TestDB_Snapshot_ChunksOutsideOfCompactedRange ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-07-03 03:47:31 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2019-07-03 03:47:31 -07:00
mint := int64 ( 1414141414000 )
for i := 0 ; i < 1000 ; i ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , mint + int64 ( i ) , 1.0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-07-03 03:47:31 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2019-07-03 03:47:31 -07:00
2022-01-22 01:55:01 -08:00
snap := t . TempDir ( )
2019-07-03 03:47:31 -07:00
// Hackingly introduce "race", by having lower max time then maxTime in last chunk.
2020-07-27 21:42:42 -07:00
db . head . maxTime . Sub ( 10 )
2019-07-03 03:47:31 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Snapshot ( snap , true ) )
require . NoError ( t , db . Close ( ) )
2019-07-03 03:47:31 -07:00
// Reopen DB from snapshot.
2022-01-22 01:55:01 -08:00
db , err := Open ( snap , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , db . Close ( ) ) } ( )
2019-07-03 03:47:31 -07:00
2023-09-12 03:37:38 -07:00
querier , err := db . Querier ( mint , mint + 1000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , querier . Close ( ) ) } ( )
2019-07-03 03:47:31 -07:00
// Sum values.
2023-09-12 03:37:38 -07:00
seriesSet := querier . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2022-09-20 10:16:45 -07:00
var series chunkenc . Iterator
2019-07-03 03:47:31 -07:00
sum := 0.0
for seriesSet . Next ( ) {
2022-09-20 10:16:45 -07:00
series = seriesSet . At ( ) . Iterator ( series )
2021-11-28 23:54:23 -08:00
for series . Next ( ) == chunkenc . ValFloat {
2019-07-03 03:47:31 -07:00
_ , v := series . At ( )
sum += v
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , series . Err ( ) )
2019-07-03 03:47:31 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , seriesSet . Err ( ) )
2023-12-07 03:35:01 -08:00
require . Empty ( t , seriesSet . Warnings ( ) )
2019-07-03 03:47:31 -07:00
// Since we snapshotted with MaxTime - 10, so expect 10 less samples.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1000.0 - 10 , sum )
2019-07-03 03:47:31 -07:00
}
2017-11-22 04:28:06 -08:00
func TestDB_SnapshotWithDelete ( t * testing . T ) {
2023-09-13 06:43:06 -07:00
const numSamples int64 = 10
2017-11-22 04:28:06 -08:00
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2021-10-25 06:14:40 -07:00
defer func ( ) { require . NoError ( t , db . Close ( ) ) } ( )
2017-11-22 04:28:06 -08:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2017-11-22 04:28:06 -08:00
smpls := make ( [ ] float64 , numSamples )
for i := int64 ( 0 ) ; i < numSamples ; i ++ {
smpls [ i ] = rand . Float64 ( )
2022-03-09 14:17:29 -08:00
app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , i , smpls [ i ] )
2017-11-22 04:28:06 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2017-11-22 04:28:06 -08:00
cases := [ ] struct {
2019-09-19 02:15:41 -07:00
intervals tombstones . Intervals
2017-11-22 04:28:06 -08:00
remaint [ ] int64
} {
{
2019-09-19 02:15:41 -07:00
intervals : tombstones . Intervals { { Mint : 1 , Maxt : 3 } , { Mint : 4 , Maxt : 7 } } ,
2017-11-22 04:28:06 -08:00
remaint : [ ] int64 { 0 , 8 , 9 } ,
} ,
}
Outer :
for _ , c := range cases {
// TODO(gouthamve): Reset the tombstones somehow.
// Delete the ranges.
for _ , r := range c . intervals {
2023-09-13 06:43:06 -07:00
require . NoError ( t , db . Delete ( ctx , r . Mint , r . Maxt , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) ) )
2017-11-22 04:28:06 -08:00
}
// create snapshot
2022-01-22 01:55:01 -08:00
snap := t . TempDir ( )
2018-02-28 03:04:55 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Snapshot ( snap , true ) )
2017-11-22 04:28:06 -08:00
// reopen DB from snapshot
2021-10-25 06:14:40 -07:00
newDB , err := Open ( snap , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-10-25 06:14:40 -07:00
defer func ( ) { require . NoError ( t , newDB . Close ( ) ) } ( )
2017-11-22 04:28:06 -08:00
// Compare the result.
2023-09-12 03:37:38 -07:00
q , err := newDB . Querier ( 0 , numSamples )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , q . Close ( ) ) } ( )
2017-11-22 04:28:06 -08:00
2023-09-12 03:37:38 -07:00
res := q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2017-11-22 04:28:06 -08:00
2023-08-24 06:21:17 -07:00
expSamples := make ( [ ] chunks . Sample , 0 , len ( c . remaint ) )
2017-11-22 04:28:06 -08:00
for _ , ts := range c . remaint {
2021-11-28 23:54:23 -08:00
expSamples = append ( expSamples , sample { ts , smpls [ ts ] , nil , nil } )
2017-11-22 04:28:06 -08:00
}
2020-02-06 07:58:38 -08:00
expss := newMockSeriesSet ( [ ] storage . Series {
2020-07-31 08:03:02 -07:00
storage . NewListSeries ( labels . FromStrings ( "a" , "b" ) , expSamples ) ,
2017-11-22 04:28:06 -08:00
} )
if len ( expSamples ) == 0 {
2020-10-29 02:43:23 -07:00
require . False ( t , res . Next ( ) )
2017-11-22 04:28:06 -08:00
continue
}
for {
eok , rok := expss . Next ( ) , res . Next ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , eok , rok )
2017-11-22 04:28:06 -08:00
if ! eok {
2023-12-07 03:35:01 -08:00
require . Empty ( t , res . Warnings ( ) )
2017-11-22 04:28:06 -08:00
continue Outer
}
sexp := expss . At ( )
sres := res . At ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , sexp . Labels ( ) , sres . Labels ( ) )
2017-11-22 04:28:06 -08:00
2022-09-20 10:16:45 -07:00
smplExp , errExp := storage . ExpandSamples ( sexp . Iterator ( nil ) , nil )
smplRes , errRes := storage . ExpandSamples ( sres . Iterator ( nil ) , nil )
2017-11-22 04:28:06 -08:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , errExp , errRes )
require . Equal ( t , smplExp , smplRes )
2017-11-22 04:28:06 -08:00
}
}
}
2017-08-28 15:39:17 -07:00
func TestDB_e2e ( t * testing . T ) {
const (
numDatapoints = 1000
numRanges = 1000
timeInterval = int64 ( 3 )
)
// Create 8 series with 1000 data-points of different ranges and run queries.
2022-03-09 14:17:29 -08:00
lbls := [ ] [ ] labels . Label {
2017-08-28 15:39:17 -07:00
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "b" } ,
{ Name : "instance" , Value : "localhost:9090" } ,
{ Name : "job" , Value : "prometheus" } ,
2017-08-28 15:39:17 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "b" } ,
{ Name : "instance" , Value : "127.0.0.1:9090" } ,
{ Name : "job" , Value : "prometheus" } ,
2017-08-28 15:39:17 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "b" } ,
{ Name : "instance" , Value : "127.0.0.1:9090" } ,
{ Name : "job" , Value : "prom-k8s" } ,
2017-08-28 15:39:17 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "b" } ,
{ Name : "instance" , Value : "localhost:9090" } ,
{ Name : "job" , Value : "prom-k8s" } ,
2017-08-28 15:39:17 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "c" } ,
{ Name : "instance" , Value : "localhost:9090" } ,
{ Name : "job" , Value : "prometheus" } ,
2017-08-28 15:39:17 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "c" } ,
{ Name : "instance" , Value : "127.0.0.1:9090" } ,
{ Name : "job" , Value : "prometheus" } ,
2017-08-28 15:39:17 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "c" } ,
{ Name : "instance" , Value : "127.0.0.1:9090" } ,
{ Name : "job" , Value : "prom-k8s" } ,
2017-08-28 15:39:17 -07:00
} ,
{
2019-08-13 01:34:14 -07:00
{ Name : "a" , Value : "c" } ,
{ Name : "instance" , Value : "localhost:9090" } ,
{ Name : "job" , Value : "prom-k8s" } ,
2017-08-28 15:39:17 -07:00
} ,
}
2023-08-24 06:21:17 -07:00
seriesMap := map [ string ] [ ] chunks . Sample { }
2017-08-28 15:39:17 -07:00
for _ , l := range lbls {
2023-08-24 06:21:17 -07:00
seriesMap [ labels . New ( l ... ) . String ( ) ] = [ ] chunks . Sample { }
2017-08-28 15:39:17 -07:00
}
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-08-28 15:39:17 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2017-08-28 15:39:17 -07:00
for _ , l := range lbls {
lset := labels . New ( l ... )
2023-08-24 06:21:17 -07:00
series := [ ] chunks . Sample { }
2017-08-28 15:39:17 -07:00
ts := rand . Int63n ( 300 )
for i := 0 ; i < numDatapoints ; i ++ {
v := rand . Float64 ( )
2021-11-28 23:54:23 -08:00
series = append ( series , sample { ts , v , nil , nil } )
2017-08-28 15:39:17 -07:00
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , lset , ts , v )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-08-28 15:39:17 -07:00
ts += rand . Int63n ( timeInterval ) + 1
}
seriesMap [ lset . String ( ) ] = series
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2017-08-28 15:39:17 -07:00
// Query each selector on 1000 random time-ranges.
queries := [ ] struct {
2019-11-18 11:53:33 -08:00
ms [ ] * labels . Matcher
2017-08-28 15:39:17 -07:00
} {
{
2019-11-18 11:53:33 -08:00
ms : [ ] * labels . Matcher { labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) } ,
2017-08-28 15:39:17 -07:00
} ,
{
2019-11-18 11:53:33 -08:00
ms : [ ] * labels . Matcher {
labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) ,
labels . MustNewMatcher ( labels . MatchEqual , "job" , "prom-k8s" ) ,
2017-08-28 15:39:17 -07:00
} ,
} ,
{
2019-11-18 11:53:33 -08:00
ms : [ ] * labels . Matcher {
labels . MustNewMatcher ( labels . MatchEqual , "a" , "c" ) ,
labels . MustNewMatcher ( labels . MatchEqual , "instance" , "localhost:9090" ) ,
labels . MustNewMatcher ( labels . MatchEqual , "job" , "prometheus" ) ,
2017-08-28 15:39:17 -07:00
} ,
} ,
// TODO: Add Regexp Matchers.
}
for _ , qry := range queries {
matched := labels . Slice { }
2022-03-09 14:17:29 -08:00
for _ , l := range lbls {
2017-08-28 15:39:17 -07:00
s := labels . Selector ( qry . ms )
2022-03-09 14:17:29 -08:00
ls := labels . New ( l ... )
2017-08-28 15:39:17 -07:00
if s . Matches ( ls ) {
matched = append ( matched , ls )
}
}
sort . Sort ( matched )
for i := 0 ; i < numRanges ; i ++ {
mint := rand . Int63n ( 300 )
maxt := mint + rand . Int63n ( timeInterval * int64 ( numDatapoints ) )
2023-08-24 06:21:17 -07:00
expected := map [ string ] [ ] chunks . Sample { }
2017-08-28 15:39:17 -07:00
// Build the mockSeriesSet.
for _ , m := range matched {
smpls := boundedSamples ( seriesMap [ m . String ( ) ] , mint , maxt )
if len ( smpls ) > 0 {
expected [ m . String ( ) ] = smpls
}
}
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( mint , maxt )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-10-09 06:21:46 -07:00
2023-09-12 03:37:38 -07:00
ss := q . Select ( ctx , false , nil , qry . ms ... )
2023-08-24 06:21:17 -07:00
result := map [ string ] [ ] chunks . Sample { }
2017-08-28 15:39:17 -07:00
for ss . Next ( ) {
x := ss . At ( )
2022-09-20 10:16:45 -07:00
smpls , err := storage . ExpandSamples ( x . Iterator ( nil ) , newSample )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-08-28 15:39:17 -07:00
if len ( smpls ) > 0 {
result [ x . Labels ( ) . String ( ) ] = smpls
}
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , ss . Err ( ) )
2023-12-07 03:35:01 -08:00
require . Empty ( t , ss . Warnings ( ) )
2020-10-29 02:43:23 -07:00
require . Equal ( t , expected , result )
2017-08-28 15:39:17 -07:00
q . Close ( )
}
}
}
2017-11-10 12:19:39 -08:00
func TestWALFlushedOnDBClose ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2017-11-10 12:19:39 -08:00
2018-02-23 07:04:50 -08:00
dirDb := db . Dir ( )
2017-11-10 12:19:39 -08:00
2022-03-09 14:17:29 -08:00
lbls := labels . FromStrings ( "labelname" , "labelvalue" )
2017-11-10 12:19:39 -08:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , lbls , 0 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2017-11-10 12:19:39 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2017-11-10 12:19:39 -08:00
2021-06-05 07:29:32 -07:00
db , err = Open ( dirDb , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , db . Close ( ) ) } ( )
2017-11-10 12:19:39 -08:00
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( 0 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-11-10 12:19:39 -08:00
2023-09-14 07:02:04 -07:00
values , ws , err := q . LabelValues ( ctx , "labelname" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Empty ( t , ws )
2020-10-29 02:43:23 -07:00
require . Equal ( t , [ ] string { "labelvalue" } , values )
2017-11-10 12:19:39 -08:00
}
2017-11-23 05:27:10 -08:00
2019-03-25 16:38:12 -07:00
func TestWALSegmentSizeOptions ( t * testing . T ) {
tests := map [ int ] func ( dbdir string , segmentSize int ) {
// Default Wal Size.
0 : func ( dbDir string , segmentSize int ) {
2022-04-27 02:24:36 -07:00
filesAndDir , err := os . ReadDir ( filepath . Join ( dbDir , "wal" ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
files := [ ] os . FileInfo { }
for _ , f := range filesAndDir {
if ! f . IsDir ( ) {
2022-04-27 02:24:36 -07:00
fi , err := f . Info ( )
require . NoError ( t , err )
files = append ( files , fi )
2020-05-06 08:30:00 -07:00
}
}
// All the full segment files (all but the last) should match the segment size option.
2019-03-25 16:38:12 -07:00
for _ , f := range files [ : len ( files ) - 1 ] {
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( DefaultOptions ( ) . WALSegmentSize ) , f . Size ( ) , "WAL file size doesn't match WALSegmentSize option, filename: %v" , f . Name ( ) )
2019-03-25 16:38:12 -07:00
}
lastFile := files [ len ( files ) - 1 ]
2020-10-29 02:43:23 -07:00
require . Greater ( t , int64 ( DefaultOptions ( ) . WALSegmentSize ) , lastFile . Size ( ) , "last WAL file size is not smaller than the WALSegmentSize option, filename: %v" , lastFile . Name ( ) )
2019-03-25 16:38:12 -07:00
} ,
// Custom Wal Size.
2 * 32 * 1024 : func ( dbDir string , segmentSize int ) {
2022-04-27 02:24:36 -07:00
filesAndDir , err := os . ReadDir ( filepath . Join ( dbDir , "wal" ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-05-06 08:30:00 -07:00
files := [ ] os . FileInfo { }
for _ , f := range filesAndDir {
if ! f . IsDir ( ) {
2022-04-27 02:24:36 -07:00
fi , err := f . Info ( )
require . NoError ( t , err )
files = append ( files , fi )
2020-05-06 08:30:00 -07:00
}
}
2023-12-07 03:35:01 -08:00
require . NotEmpty ( t , files , "current WALSegmentSize should result in more than a single WAL file." )
2020-05-06 08:30:00 -07:00
// All the full segment files (all but the last) should match the segment size option.
2019-03-25 16:38:12 -07:00
for _ , f := range files [ : len ( files ) - 1 ] {
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( segmentSize ) , f . Size ( ) , "WAL file size doesn't match WALSegmentSize option, filename: %v" , f . Name ( ) )
2019-03-25 16:38:12 -07:00
}
lastFile := files [ len ( files ) - 1 ]
2020-10-29 02:43:23 -07:00
require . Greater ( t , int64 ( segmentSize ) , lastFile . Size ( ) , "last WAL file size is not smaller than the WALSegmentSize option, filename: %v" , lastFile . Name ( ) )
2019-03-25 16:38:12 -07:00
} ,
// Wal disabled.
- 1 : func ( dbDir string , segmentSize int ) {
2020-05-06 08:30:00 -07:00
// Check that WAL dir is not there.
_ , err := os . Stat ( filepath . Join ( dbDir , "wal" ) )
2020-10-29 02:43:23 -07:00
require . Error ( t , err )
2020-05-06 08:30:00 -07:00
// Check that there is chunks dir.
_ , err = os . Stat ( mmappedChunksDir ( dbDir ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-03-25 16:38:12 -07:00
} ,
2018-12-18 10:56:51 -08:00
}
2019-03-25 16:38:12 -07:00
for segmentSize , testFunc := range tests {
t . Run ( fmt . Sprintf ( "WALSegmentSize %d test" , segmentSize ) , func ( t * testing . T ) {
2020-02-06 07:58:38 -08:00
opts := DefaultOptions ( )
2020-02-11 08:34:09 -08:00
opts . WALSegmentSize = segmentSize
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , opts , nil )
2020-02-10 15:15:01 -08:00
2019-03-25 16:38:12 -07:00
for i := int64 ( 0 ) ; i < 155 ; i ++ {
2020-07-24 07:10:51 -07:00
app := db . Appender ( context . Background ( ) )
2022-03-09 14:17:29 -08:00
ref , err := app . Append ( 0 , labels . FromStrings ( "wal" + fmt . Sprintf ( "%d" , i ) , "size" ) , i , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-22 02:57:38 -07:00
for j := int64 ( 1 ) ; j <= 78 ; j ++ {
2022-03-09 14:17:29 -08:00
_ , err := app . Append ( ref , labels . EmptyLabels ( ) , i + j , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-22 02:57:38 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2019-03-25 16:38:12 -07:00
}
2018-12-18 10:56:51 -08:00
2019-03-25 16:38:12 -07:00
dbDir := db . Dir ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2023-04-09 00:08:40 -07:00
testFunc ( dbDir , opts . WALSegmentSize )
2019-03-25 16:38:12 -07:00
} )
2018-12-18 10:56:51 -08:00
}
}
2021-11-25 00:06:14 -08:00
// https://github.com/prometheus/prometheus/issues/9846
// https://github.com/prometheus/prometheus/issues/9859
func TestWALReplayRaceOnSamplesLoggedBeforeSeries ( t * testing . T ) {
const (
numRuns = 1
numSamplesBeforeSeriesCreation = 1000
)
// We test both with few and many samples appended after series creation. If samples are < 120 then there's no
// mmap-ed chunk, otherwise there's at least 1 mmap-ed chunk when replaying the WAL.
for _ , numSamplesAfterSeriesCreation := range [ ] int { 1 , 1000 } {
for run := 1 ; run <= numRuns ; run ++ {
t . Run ( fmt . Sprintf ( "samples after series creation = %d, run = %d" , numSamplesAfterSeriesCreation , run ) , func ( t * testing . T ) {
testWALReplayRaceOnSamplesLoggedBeforeSeries ( t , numSamplesBeforeSeriesCreation , numSamplesAfterSeriesCreation )
} )
}
}
}
func testWALReplayRaceOnSamplesLoggedBeforeSeries ( t * testing . T , numSamplesBeforeSeriesCreation , numSamplesAfterSeriesCreation int ) {
const numSeries = 1000
db := openTestDB ( t , nil , nil )
db . DisableCompactions ( )
for seriesRef := 1 ; seriesRef <= numSeries ; seriesRef ++ {
// Log samples before the series is logged to the WAL.
var enc record . Encoder
var samples [ ] record . RefSample
for ts := 0 ; ts < numSamplesBeforeSeriesCreation ; ts ++ {
samples = append ( samples , record . RefSample {
Ref : chunks . HeadSeriesRef ( uint64 ( seriesRef ) ) ,
T : int64 ( ts ) ,
V : float64 ( ts ) ,
} )
}
err := db . Head ( ) . wal . Log ( enc . Samples ( samples , nil ) )
require . NoError ( t , err )
// Add samples via appender so that they're logged after the series in the WAL.
app := db . Appender ( context . Background ( ) )
lbls := labels . FromStrings ( "series_id" , strconv . Itoa ( seriesRef ) )
for ts := numSamplesBeforeSeriesCreation ; ts < numSamplesBeforeSeriesCreation + numSamplesAfterSeriesCreation ; ts ++ {
_ , err := app . Append ( 0 , lbls , int64 ( ts ) , float64 ( ts ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
}
require . NoError ( t , db . Close ( ) )
// Reopen the DB, replaying the WAL.
reopenDB , err := Open ( db . Dir ( ) , log . NewLogfmtLogger ( os . Stderr ) , nil , nil , nil )
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , reopenDB . Close ( ) )
} )
// Query back chunks for all series.
2023-09-12 03:37:38 -07:00
q , err := reopenDB . ChunkQuerier ( math . MinInt64 , math . MaxInt64 )
2021-11-25 00:06:14 -08:00
require . NoError ( t , err )
2023-09-12 03:37:38 -07:00
set := q . Select ( context . Background ( ) , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "series_id" , ".+" ) )
2021-11-25 00:06:14 -08:00
actualSeries := 0
2022-09-20 10:16:45 -07:00
var chunksIt chunks . Iterator
2021-11-25 00:06:14 -08:00
for set . Next ( ) {
actualSeries ++
actualChunks := 0
2022-09-20 10:16:45 -07:00
chunksIt = set . At ( ) . Iterator ( chunksIt )
2021-11-25 00:06:14 -08:00
for chunksIt . Next ( ) {
actualChunks ++
}
require . NoError ( t , chunksIt . Err ( ) )
// We expect 1 chunk every 120 samples after series creation.
require . Equalf ( t , ( numSamplesAfterSeriesCreation / 120 ) + 1 , actualChunks , "series: %s" , set . At ( ) . Labels ( ) . String ( ) )
}
require . NoError ( t , set . Err ( ) )
require . Equal ( t , numSeries , actualSeries )
}
2017-11-22 04:34:50 -08:00
func TestTombstoneClean ( t * testing . T ) {
2023-09-13 06:43:06 -07:00
const numSamples int64 = 10
2017-11-22 04:34:50 -08:00
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2017-11-22 04:34:50 -08:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2017-11-22 04:34:50 -08:00
smpls := make ( [ ] float64 , numSamples )
for i := int64 ( 0 ) ; i < numSamples ; i ++ {
smpls [ i ] = rand . Float64 ( )
2022-03-09 14:17:29 -08:00
app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , i , smpls [ i ] )
2017-11-22 04:34:50 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2017-11-22 04:34:50 -08:00
cases := [ ] struct {
2019-09-19 02:15:41 -07:00
intervals tombstones . Intervals
2017-11-22 04:34:50 -08:00
remaint [ ] int64
} {
{
2019-09-19 02:15:41 -07:00
intervals : tombstones . Intervals { { Mint : 1 , Maxt : 3 } , { Mint : 4 , Maxt : 7 } } ,
2017-11-22 04:34:50 -08:00
remaint : [ ] int64 { 0 , 8 , 9 } ,
} ,
}
for _ , c := range cases {
// Delete the ranges.
2021-02-16 21:32:43 -08:00
// Create snapshot.
2022-01-22 01:55:01 -08:00
snap := t . TempDir ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Snapshot ( snap , true ) )
require . NoError ( t , db . Close ( ) )
2017-11-22 04:34:50 -08:00
2021-02-16 21:32:43 -08:00
// Reopen DB from snapshot.
2022-01-22 01:55:01 -08:00
db , err := Open ( snap , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-12-10 10:02:01 -08:00
defer db . Close ( )
2017-11-22 04:34:50 -08:00
for _ , r := range c . intervals {
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Delete ( ctx , r . Mint , r . Maxt , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) ) )
2017-11-22 04:34:50 -08:00
}
// All of the setup for THIS line.
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . CleanTombstones ( ) )
2017-11-22 04:34:50 -08:00
// Compare the result.
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( 0 , numSamples )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-12-10 10:02:01 -08:00
defer q . Close ( )
2017-11-22 04:34:50 -08:00
2023-09-12 03:37:38 -07:00
res := q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2017-11-22 04:34:50 -08:00
2023-08-24 06:21:17 -07:00
expSamples := make ( [ ] chunks . Sample , 0 , len ( c . remaint ) )
2017-11-22 04:34:50 -08:00
for _ , ts := range c . remaint {
2021-11-28 23:54:23 -08:00
expSamples = append ( expSamples , sample { ts , smpls [ ts ] , nil , nil } )
2017-11-22 04:34:50 -08:00
}
2020-02-06 07:58:38 -08:00
expss := newMockSeriesSet ( [ ] storage . Series {
2020-07-31 08:03:02 -07:00
storage . NewListSeries ( labels . FromStrings ( "a" , "b" ) , expSamples ) ,
2017-11-22 04:34:50 -08:00
} )
if len ( expSamples ) == 0 {
2020-10-29 02:43:23 -07:00
require . False ( t , res . Next ( ) )
2017-11-22 04:34:50 -08:00
continue
}
for {
eok , rok := expss . Next ( ) , res . Next ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , eok , rok )
2017-11-22 04:34:50 -08:00
if ! eok {
break
}
sexp := expss . At ( )
sres := res . At ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , sexp . Labels ( ) , sres . Labels ( ) )
2017-11-22 04:34:50 -08:00
2022-09-20 10:16:45 -07:00
smplExp , errExp := storage . ExpandSamples ( sexp . Iterator ( nil ) , nil )
smplRes , errRes := storage . ExpandSamples ( sres . Iterator ( nil ) , nil )
2017-11-22 04:34:50 -08:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , errExp , errRes )
require . Equal ( t , smplExp , smplRes )
2017-11-22 04:34:50 -08:00
}
2023-12-07 03:35:01 -08:00
require . Empty ( t , res . Warnings ( ) )
2017-11-22 04:34:50 -08:00
2018-11-07 07:52:41 -08:00
for _ , b := range db . Blocks ( ) {
2020-10-29 02:43:23 -07:00
require . Equal ( t , tombstones . NewMemTombstones ( ) , b . tombstones )
2017-11-22 04:34:50 -08:00
}
}
}
2017-11-29 23:45:23 -08:00
2021-02-16 21:32:43 -08:00
// TestTombstoneCleanResultEmptyBlock tests that a TombstoneClean that results in empty blocks (no timeseries)
// will also delete the resultant block.
func TestTombstoneCleanResultEmptyBlock ( t * testing . T ) {
numSamples := int64 ( 10 )
db := openTestDB ( t , nil , nil )
ctx := context . Background ( )
app := db . Appender ( ctx )
smpls := make ( [ ] float64 , numSamples )
for i := int64 ( 0 ) ; i < numSamples ; i ++ {
smpls [ i ] = rand . Float64 ( )
2022-03-09 14:17:29 -08:00
app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , i , smpls [ i ] )
2021-02-16 21:32:43 -08:00
}
require . NoError ( t , app . Commit ( ) )
// Interval should cover the whole block.
intervals := tombstones . Intervals { { Mint : 0 , Maxt : numSamples } }
// Create snapshot.
2022-01-22 01:55:01 -08:00
snap := t . TempDir ( )
2021-02-16 21:32:43 -08:00
require . NoError ( t , db . Snapshot ( snap , true ) )
require . NoError ( t , db . Close ( ) )
// Reopen DB from snapshot.
2022-01-22 01:55:01 -08:00
db , err := Open ( snap , nil , nil , nil , nil )
2021-02-16 21:32:43 -08:00
require . NoError ( t , err )
defer db . Close ( )
// Create tombstones by deleting all samples.
for _ , r := range intervals {
2023-09-13 06:43:06 -07:00
require . NoError ( t , db . Delete ( ctx , r . Mint , r . Maxt , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) ) )
2021-02-16 21:32:43 -08:00
}
require . NoError ( t , db . CleanTombstones ( ) )
// After cleaning tombstones that covers the entire block, no blocks should be left behind.
actualBlockDirs , err := blockDirs ( db . dir )
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Empty ( t , actualBlockDirs )
2021-02-16 21:32:43 -08:00
}
2018-06-04 12:18:44 -07:00
// TestTombstoneCleanFail tests that a failing TombstoneClean doesn't leave any blocks behind.
// When TombstoneClean errors the original block that should be rebuilt doesn't get deleted so
// if TombstoneClean leaves any blocks behind these will overlap.
2018-06-04 05:35:36 -07:00
func TestTombstoneCleanFail ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2018-06-04 05:35:36 -07:00
2021-02-16 21:32:43 -08:00
var oldBlockDirs [ ] string
2018-06-04 05:35:36 -07:00
2021-02-16 21:32:43 -08:00
// Create some blocks pending for compaction.
2018-06-04 12:18:44 -07:00
// totalBlocks should be >=2 so we have enough blocks to trigger compaction failure.
totalBlocks := 2
for i := 0 ; i < totalBlocks ; i ++ {
2021-02-16 21:32:43 -08:00
blockDir := createBlock ( t , db . Dir ( ) , genSeries ( 1 , 1 , int64 ( i ) , int64 ( i ) + 1 ) )
2019-01-16 02:03:52 -08:00
block , err := OpenBlock ( nil , blockDir , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-01-30 22:41:52 -08:00
// Add some fake tombstones to trigger the compaction.
2019-09-19 02:15:41 -07:00
tomb := tombstones . NewMemTombstones ( )
2021-02-16 21:32:43 -08:00
tomb . AddInterval ( 0 , tombstones . Interval { Mint : int64 ( i ) , Maxt : int64 ( i ) + 1 } )
2018-06-04 05:35:36 -07:00
block . tombstones = tomb
db . blocks = append ( db . blocks , block )
2021-02-16 21:32:43 -08:00
oldBlockDirs = append ( oldBlockDirs , blockDir )
2018-06-04 05:35:36 -07:00
}
2018-06-04 12:18:44 -07:00
// Initialize the mockCompactorFailing with a room for a single compaction iteration.
// mockCompactorFailing will fail on the second iteration so we can check if the cleanup works as expected.
db . compactor = & mockCompactorFailing {
t : t ,
blocks : db . blocks ,
max : totalBlocks + 1 ,
}
// The compactor should trigger a failure here.
2020-10-29 02:43:23 -07:00
require . Error ( t , db . CleanTombstones ( ) )
2018-06-04 05:35:36 -07:00
2021-02-16 21:32:43 -08:00
// Now check that the CleanTombstones replaced the old block even after a failure.
2018-06-04 05:35:36 -07:00
actualBlockDirs , err := blockDirs ( db . dir )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-16 21:32:43 -08:00
// Only one block should have been replaced by a new block.
require . Equal ( t , len ( oldBlockDirs ) , len ( actualBlockDirs ) )
2023-12-07 03:35:01 -08:00
require . Len ( t , intersection ( oldBlockDirs , actualBlockDirs ) , len ( actualBlockDirs ) - 1 )
2021-02-16 21:32:43 -08:00
}
// TestTombstoneCleanRetentionLimitsRace tests that a CleanTombstones operation
// and retention limit policies, when triggered at the same time,
// won't race against each other.
func TestTombstoneCleanRetentionLimitsRace ( t * testing . T ) {
2021-10-15 03:23:48 -07:00
if testing . Short ( ) {
t . Skip ( "skipping test in short mode." )
}
2021-02-16 21:32:43 -08:00
opts := DefaultOptions ( )
var wg sync . WaitGroup
// We want to make sure that a race doesn't happen when a normal reload and a CleanTombstones()
// reload try to delete the same block. Without the correct lock placement, it can happen if a
// block is marked for deletion due to retention limits and also has tombstones to be cleaned at
// the same time.
//
// That is something tricky to trigger, so let's try several times just to make sure.
for i := 0 ; i < 20 ; i ++ {
2022-05-08 15:39:26 -07:00
t . Run ( fmt . Sprintf ( "iteration%d" , i ) , func ( t * testing . T ) {
db := openTestDB ( t , opts , nil )
totalBlocks := 20
dbDir := db . Dir ( )
// Generate some blocks with old mint (near epoch).
for j := 0 ; j < totalBlocks ; j ++ {
blockDir := createBlock ( t , dbDir , genSeries ( 10 , 1 , int64 ( j ) , int64 ( j ) + 1 ) )
block , err := OpenBlock ( nil , blockDir , nil )
require . NoError ( t , err )
// Cover block with tombstones so it can be deleted with CleanTombstones() as well.
tomb := tombstones . NewMemTombstones ( )
tomb . AddInterval ( 0 , tombstones . Interval { Mint : int64 ( j ) , Maxt : int64 ( j ) + 1 } )
block . tombstones = tomb
2021-02-16 21:32:43 -08:00
2022-05-08 15:39:26 -07:00
db . blocks = append ( db . blocks , block )
}
2021-02-16 21:32:43 -08:00
2022-05-08 15:39:26 -07:00
wg . Add ( 2 )
// Run reload and CleanTombstones together, with a small time window randomization
go func ( ) {
defer wg . Done ( )
time . Sleep ( time . Duration ( rand . Float64 ( ) * 100 * float64 ( time . Millisecond ) ) )
require . NoError ( t , db . reloadBlocks ( ) )
} ( )
go func ( ) {
defer wg . Done ( )
time . Sleep ( time . Duration ( rand . Float64 ( ) * 100 * float64 ( time . Millisecond ) ) )
require . NoError ( t , db . CleanTombstones ( ) )
} ( )
wg . Wait ( )
2021-02-16 21:32:43 -08:00
2022-05-08 15:39:26 -07:00
require . NoError ( t , db . Close ( ) )
} )
2021-02-16 21:32:43 -08:00
}
}
func intersection ( oldBlocks , actualBlocks [ ] string ) ( intersection [ ] string ) {
hash := make ( map [ string ] bool )
for _ , e := range oldBlocks {
hash [ e ] = true
}
for _ , e := range actualBlocks {
// If block present in the hashmap then append intersection list.
if hash [ e ] {
intersection = append ( intersection , e )
}
}
return
2018-06-04 05:35:36 -07:00
}
2018-06-04 12:18:44 -07:00
// mockCompactorFailing creates a new empty block on every write and fails when reached the max allowed total.
2022-09-20 10:05:50 -07:00
// For CompactOOO, it always fails.
2018-06-04 05:35:36 -07:00
type mockCompactorFailing struct {
t * testing . T
blocks [ ] * Block
2018-06-04 12:18:44 -07:00
max int
2018-06-04 05:35:36 -07:00
}
2023-04-12 04:05:41 -07:00
func ( * mockCompactorFailing ) Plan ( string ) ( [ ] string , error ) {
2018-06-04 05:35:36 -07:00
return nil , nil
}
2021-10-22 01:06:44 -07:00
2023-04-12 04:05:41 -07:00
func ( c * mockCompactorFailing ) Write ( dest string , _ BlockReader , _ , _ int64 , _ * BlockMeta ) ( ulid . ULID , error ) {
2018-06-04 12:18:44 -07:00
if len ( c . blocks ) >= c . max {
return ulid . ULID { } , fmt . Errorf ( "the compactor already did the maximum allowed blocks so it is time to fail" )
2018-06-04 05:35:36 -07:00
}
2018-06-04 12:18:44 -07:00
2019-07-03 03:47:31 -07:00
block , err := OpenBlock ( nil , createBlock ( c . t , dest , genSeries ( 1 , 1 , 0 , 1 ) ) , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( c . t , err )
require . NoError ( c . t , block . Close ( ) ) // Close block as we won't be using anywhere.
2018-06-04 05:35:36 -07:00
c . blocks = append ( c . blocks , block )
2018-06-04 12:18:44 -07:00
// Now check that all expected blocks are actually persisted on disk.
// This way we make sure that the we have some blocks that are supposed to be removed.
var expectedBlocks [ ] string
for _ , b := range c . blocks {
expectedBlocks = append ( expectedBlocks , filepath . Join ( dest , b . Meta ( ) . ULID . String ( ) ) )
}
actualBlockDirs , err := blockDirs ( dest )
2020-10-29 02:43:23 -07:00
require . NoError ( c . t , err )
2018-06-04 12:18:44 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( c . t , expectedBlocks , actualBlockDirs )
2018-06-04 12:18:44 -07:00
2018-06-04 05:35:36 -07:00
return block . Meta ( ) . ULID , nil
}
2020-02-06 07:58:38 -08:00
func ( * mockCompactorFailing ) Compact ( string , [ ] string , [ ] * Block ) ( ulid . ULID , error ) {
2018-06-04 05:35:36 -07:00
return ulid . ULID { } , nil
}
2023-04-12 04:05:41 -07:00
func ( * mockCompactorFailing ) CompactOOO ( string , * OOOCompactionHead ) ( result [ ] ulid . ULID , err error ) {
2022-09-20 10:05:50 -07:00
return nil , fmt . Errorf ( "mock compaction failing CompactOOO" )
}
2019-01-16 02:03:52 -08:00
func TestTimeRetention ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , [ ] int64 { 1000 } )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
blocks := [ ] * BlockMeta {
{ MinTime : 500 , MaxTime : 900 } , // Oldest block
{ MinTime : 1000 , MaxTime : 1500 } ,
{ MinTime : 1500 , MaxTime : 2000 } , // Newest Block
}
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
for _ , m := range blocks {
2019-01-28 03:24:49 -08:00
createBlock ( t , db . Dir ( ) , genSeries ( 10 , 10 , m . MinTime , m . MaxTime ) )
2019-01-16 02:03:52 -08:00
}
2017-11-23 05:27:10 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . reloadBlocks ( ) ) // Reload the db to register the new blocks.
require . Equal ( t , len ( blocks ) , len ( db . Blocks ( ) ) ) // Ensure all blocks are registered.
2018-02-28 03:04:55 -08:00
2020-02-11 08:34:09 -08:00
db . opts . RetentionDuration = blocks [ 2 ] . MaxTime - blocks [ 1 ] . MinTime
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . reloadBlocks ( ) )
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
expBlocks := blocks [ 1 : ]
actBlocks := db . Blocks ( )
2017-11-23 05:27:10 -08:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1 , int ( prom_testutil . ToFloat64 ( db . metrics . timeRetentionCount ) ) , "metric retention count mismatch" )
require . Equal ( t , len ( expBlocks ) , len ( actBlocks ) )
require . Equal ( t , expBlocks [ 0 ] . MaxTime , actBlocks [ 0 ] . meta . MaxTime )
require . Equal ( t , expBlocks [ len ( expBlocks ) - 1 ] . MaxTime , actBlocks [ len ( actBlocks ) - 1 ] . meta . MaxTime )
2019-01-16 02:03:52 -08:00
}
2017-11-23 05:27:10 -08:00
2023-10-24 04:34:42 -07:00
func TestRetentionDurationMetric ( t * testing . T ) {
db := openTestDB ( t , & Options {
RetentionDuration : 1000 ,
} , [ ] int64 { 100 } )
defer func ( ) {
require . NoError ( t , db . Close ( ) )
} ( )
expRetentionDuration := 1.0
actRetentionDuration := prom_testutil . ToFloat64 ( db . metrics . retentionDuration )
require . Equal ( t , expRetentionDuration , actRetentionDuration , "metric retention duration mismatch" )
}
2019-01-16 02:03:52 -08:00
func TestSizeRetention ( t * testing . T ) {
2022-09-20 10:05:50 -07:00
opts := DefaultOptions ( )
opts . OutOfOrderTimeWindow = 100
db := openTestDB ( t , opts , [ ] int64 { 100 } )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-11-23 05:27:10 -08:00
2019-01-16 02:03:52 -08:00
blocks := [ ] * BlockMeta {
{ MinTime : 100 , MaxTime : 200 } , // Oldest block
{ MinTime : 200 , MaxTime : 300 } ,
{ MinTime : 300 , MaxTime : 400 } ,
{ MinTime : 400 , MaxTime : 500 } ,
{ MinTime : 500 , MaxTime : 600 } , // Newest Block
}
2018-02-28 03:04:55 -08:00
2019-01-16 02:03:52 -08:00
for _ , m := range blocks {
2019-01-28 03:24:49 -08:00
createBlock ( t , db . Dir ( ) , genSeries ( 100 , 10 , m . MinTime , m . MaxTime ) )
2019-01-16 02:03:52 -08:00
}
2017-11-23 05:27:10 -08:00
2019-11-11 18:40:16 -08:00
headBlocks := [ ] * BlockMeta {
{ MinTime : 700 , MaxTime : 800 } ,
}
// Add some data to the WAL.
2020-07-30 04:11:13 -07:00
headApp := db . Head ( ) . Appender ( context . Background ( ) )
2022-09-20 10:05:50 -07:00
var aSeries labels . Labels
2022-09-20 10:16:45 -07:00
var it chunkenc . Iterator
2019-11-11 18:40:16 -08:00
for _ , m := range headBlocks {
2020-11-03 02:04:59 -08:00
series := genSeries ( 100 , 10 , m . MinTime , m . MaxTime + 1 )
2019-11-11 18:40:16 -08:00
for _ , s := range series {
2022-09-20 10:05:50 -07:00
aSeries = s . Labels ( )
2022-09-20 10:16:45 -07:00
it = s . Iterator ( it )
2021-11-28 23:54:23 -08:00
for it . Next ( ) == chunkenc . ValFloat {
2019-11-11 18:40:16 -08:00
tim , v := it . At ( )
2021-02-18 04:07:00 -08:00
_ , err := headApp . Append ( 0 , s . Labels ( ) , tim , v )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-11-11 18:40:16 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , it . Err ( ) )
2019-11-11 18:40:16 -08:00
}
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , headApp . Commit ( ) )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
db . Head ( ) . mmapHeadChunks ( )
2019-11-11 18:40:16 -08:00
2022-01-11 03:54:03 -08:00
require . Eventually ( t , func ( ) bool {
return db . Head ( ) . chunkDiskMapper . IsQueueEmpty ( )
} , 2 * time . Second , 100 * time . Millisecond )
2019-01-16 02:03:52 -08:00
// Test that registered size matches the actual disk size.
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . reloadBlocks ( ) ) // Reload the db to register the new db size.
require . Equal ( t , len ( blocks ) , len ( db . Blocks ( ) ) ) // Ensure all blocks are registered.
2020-01-08 00:43:27 -08:00
blockSize := int64 ( prom_testutil . ToFloat64 ( db . metrics . blocksBytes ) ) // Use the actual internal metrics.
2019-11-11 18:40:16 -08:00
walSize , err := db . Head ( ) . wal . Size ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-11-03 02:04:59 -08:00
cdmSize , err := db . Head ( ) . chunkDiskMapper . Size ( )
require . NoError ( t , err )
require . NotZero ( t , cdmSize )
// Expected size should take into account block size + WAL size + Head
// chunks size
expSize := blockSize + walSize + cdmSize
2019-11-11 18:40:16 -08:00
actSize , err := fileutil . DirSize ( db . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , expSize , actSize , "registered size doesn't match actual disk size" )
2019-11-11 18:40:16 -08:00
// Create a WAL checkpoint, and compare sizes.
2022-10-10 08:08:46 -07:00
first , last , err := wlog . Segments ( db . Head ( ) . wal . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2022-10-10 08:08:46 -07:00
_ , err = wlog . Checkpoint ( log . NewNopLogger ( ) , db . Head ( ) . wal , first , last - 1 , func ( x chunks . HeadSeriesRef ) bool { return false } , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-01-08 00:43:27 -08:00
blockSize = int64 ( prom_testutil . ToFloat64 ( db . metrics . blocksBytes ) ) // Use the actual internal metrics.
2019-11-11 18:40:16 -08:00
walSize , err = db . Head ( ) . wal . Size ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-11-03 02:04:59 -08:00
cdmSize , err = db . Head ( ) . chunkDiskMapper . Size ( )
require . NoError ( t , err )
require . NotZero ( t , cdmSize )
expSize = blockSize + walSize + cdmSize
actSize , err = fileutil . DirSize ( db . Dir ( ) )
require . NoError ( t , err )
require . Equal ( t , expSize , actSize , "registered size doesn't match actual disk size" )
// Truncate Chunk Disk Mapper and compare sizes.
require . NoError ( t , db . Head ( ) . chunkDiskMapper . Truncate ( 900 ) )
cdmSize , err = db . Head ( ) . chunkDiskMapper . Size ( )
require . NoError ( t , err )
require . NotZero ( t , cdmSize )
expSize = blockSize + walSize + cdmSize
2019-11-11 18:40:16 -08:00
actSize , err = fileutil . DirSize ( db . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , expSize , actSize , "registered size doesn't match actual disk size" )
2019-01-16 02:03:52 -08:00
2022-09-20 10:05:50 -07:00
// Add some out of order samples to check the size of WBL.
headApp = db . Head ( ) . Appender ( context . Background ( ) )
for ts := int64 ( 750 ) ; ts < 800 ; ts ++ {
_ , err := headApp . Append ( 0 , aSeries , ts , float64 ( ts ) )
require . NoError ( t , err )
}
require . NoError ( t , headApp . Commit ( ) )
walSize , err = db . Head ( ) . wal . Size ( )
require . NoError ( t , err )
wblSize , err := db . Head ( ) . wbl . Size ( )
require . NoError ( t , err )
require . NotZero ( t , wblSize )
cdmSize , err = db . Head ( ) . chunkDiskMapper . Size ( )
require . NoError ( t , err )
expSize = blockSize + walSize + wblSize + cdmSize
actSize , err = fileutil . DirSize ( db . Dir ( ) )
require . NoError ( t , err )
require . Equal ( t , expSize , actSize , "registered size doesn't match actual disk size" )
2019-01-16 02:03:52 -08:00
// Decrease the max bytes limit so that a delete is triggered.
// Check total size, total count and check that the oldest block was deleted.
firstBlockSize := db . Blocks ( ) [ 0 ] . Size ( )
sizeLimit := actSize - firstBlockSize
2020-10-29 02:43:23 -07:00
db . opts . MaxBytes = sizeLimit // Set the new db size limit one block smaller that the actual size.
require . NoError ( t , db . reloadBlocks ( ) ) // Reload the db to register the new db size.
2019-01-16 02:03:52 -08:00
expBlocks := blocks [ 1 : ]
actBlocks := db . Blocks ( )
2019-11-11 18:40:16 -08:00
blockSize = int64 ( prom_testutil . ToFloat64 ( db . metrics . blocksBytes ) )
walSize , err = db . Head ( ) . wal . Size ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-11-03 02:04:59 -08:00
cdmSize , err = db . Head ( ) . chunkDiskMapper . Size ( )
require . NoError ( t , err )
require . NotZero ( t , cdmSize )
2022-09-20 10:05:50 -07:00
// Expected size should take into account block size + WAL size + WBL size
expSize = blockSize + walSize + wblSize + cdmSize
2020-01-02 06:54:09 -08:00
actRetentionCount := int ( prom_testutil . ToFloat64 ( db . metrics . sizeRetentionCount ) )
2019-11-11 18:40:16 -08:00
actSize , err = fileutil . DirSize ( db . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 1 , actRetentionCount , "metric retention count mismatch" )
2023-12-07 03:35:01 -08:00
require . Equal ( t , expSize , actSize , "metric db size doesn't match actual disk size" )
2020-10-29 02:43:23 -07:00
require . LessOrEqual ( t , expSize , sizeLimit , "actual size (%v) is expected to be less than or equal to limit (%v)" , expSize , sizeLimit )
2023-12-07 03:35:01 -08:00
require . Len ( t , actBlocks , len ( blocks ) - 1 , "new block count should be decreased from:%v to:%v" , len ( blocks ) , len ( blocks ) - 1 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , expBlocks [ 0 ] . MaxTime , actBlocks [ 0 ] . meta . MaxTime , "maxT mismatch of the first block" )
require . Equal ( t , expBlocks [ len ( expBlocks ) - 1 ] . MaxTime , actBlocks [ len ( actBlocks ) - 1 ] . meta . MaxTime , "maxT mismatch of the last block" )
2019-01-16 02:03:52 -08:00
}
2017-11-23 05:27:10 -08:00
2019-07-27 01:52:25 -07:00
func TestSizeRetentionMetric ( t * testing . T ) {
cases := [ ] struct {
maxBytes int64
expMaxBytes int64
} {
{ maxBytes : 1000 , expMaxBytes : 1000 } ,
{ maxBytes : 0 , expMaxBytes : 0 } ,
{ maxBytes : - 1000 , expMaxBytes : 0 } ,
}
for _ , c := range cases {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , & Options {
2020-02-11 08:34:09 -08:00
MaxBytes : c . maxBytes ,
2020-02-06 07:58:38 -08:00
} , [ ] int64 { 100 } )
2020-02-10 15:15:01 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-02-10 15:15:01 -08:00
} ( )
2019-07-27 01:52:25 -07:00
actMaxBytes := int64 ( prom_testutil . ToFloat64 ( db . metrics . maxBytes ) )
2023-12-07 03:35:01 -08:00
require . Equal ( t , c . expMaxBytes , actMaxBytes , "metric retention limit bytes mismatch" )
2019-07-27 01:52:25 -07:00
}
}
2017-12-17 10:08:21 -08:00
func TestNotMatcherSelectsLabelsUnsetSeries ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2017-12-17 10:08:21 -08:00
labelpairs := [ ] labels . Labels {
labels . FromStrings ( "a" , "abcd" , "b" , "abcde" ) ,
labels . FromStrings ( "labelname" , "labelvalue" ) ,
}
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2017-12-17 10:08:21 -08:00
for _ , lbls := range labelpairs {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , lbls , 0 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2017-12-17 10:08:21 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2017-12-17 10:08:21 -08:00
cases := [ ] struct {
selector labels . Selector
series [ ] labels . Labels
} { {
selector : labels . Selector {
2019-11-18 11:53:33 -08:00
labels . MustNewMatcher ( labels . MatchNotEqual , "lname" , "lvalue" ) ,
2017-12-17 10:08:21 -08:00
} ,
series : labelpairs ,
} , {
selector : labels . Selector {
2019-11-18 11:53:33 -08:00
labels . MustNewMatcher ( labels . MatchEqual , "a" , "abcd" ) ,
labels . MustNewMatcher ( labels . MatchNotEqual , "b" , "abcde" ) ,
2017-12-17 10:08:21 -08:00
} ,
series : [ ] labels . Labels { } ,
} , {
selector : labels . Selector {
2019-11-18 11:53:33 -08:00
labels . MustNewMatcher ( labels . MatchEqual , "a" , "abcd" ) ,
labels . MustNewMatcher ( labels . MatchNotEqual , "b" , "abc" ) ,
2017-12-17 10:08:21 -08:00
} ,
series : [ ] labels . Labels { labelpairs [ 0 ] } ,
} , {
selector : labels . Selector {
2019-11-18 11:53:33 -08:00
labels . MustNewMatcher ( labels . MatchNotRegexp , "a" , "abd.*" ) ,
2017-12-17 10:08:21 -08:00
} ,
series : labelpairs ,
} , {
selector : labels . Selector {
2019-11-18 11:53:33 -08:00
labels . MustNewMatcher ( labels . MatchNotRegexp , "a" , "abc.*" ) ,
2017-12-17 10:08:21 -08:00
} ,
series : labelpairs [ 1 : ] ,
} , {
selector : labels . Selector {
2019-11-18 11:53:33 -08:00
labels . MustNewMatcher ( labels . MatchNotRegexp , "c" , "abd.*" ) ,
2017-12-17 10:08:21 -08:00
} ,
series : labelpairs ,
} , {
selector : labels . Selector {
2019-11-18 11:53:33 -08:00
labels . MustNewMatcher ( labels . MatchNotRegexp , "labelname" , "labelvalue" ) ,
2017-12-17 10:08:21 -08:00
} ,
series : labelpairs [ : 1 ] ,
} }
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( 0 , 10 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , q . Close ( ) ) } ( )
2017-12-17 10:08:21 -08:00
for _ , c := range cases {
2023-09-12 03:37:38 -07:00
ss := q . Select ( ctx , false , nil , c . selector ... )
2020-06-09 09:57:31 -07:00
lres , _ , ws , err := expandSeriesSet ( ss )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Empty ( t , ws )
2020-10-29 02:43:23 -07:00
require . Equal ( t , c . series , lres )
2017-12-17 10:08:21 -08:00
}
}
2020-02-12 11:22:27 -08:00
// expandSeriesSet returns the raw labels in the order they are retrieved from
// the series set and the samples keyed by Labels().String().
2023-09-14 09:57:31 -07:00
func expandSeriesSet ( ss storage . SeriesSet ) ( [ ] labels . Labels , map [ string ] [ ] sample , annotations . Annotations , error ) {
2020-02-12 11:22:27 -08:00
resultLabels := [ ] labels . Labels { }
resultSamples := map [ string ] [ ] sample { }
2022-09-20 10:16:45 -07:00
var it chunkenc . Iterator
2017-12-17 10:08:21 -08:00
for ss . Next ( ) {
2020-02-12 11:22:27 -08:00
series := ss . At ( )
samples := [ ] sample { }
2022-09-20 10:16:45 -07:00
it = series . Iterator ( it )
2021-11-28 23:54:23 -08:00
for it . Next ( ) == chunkenc . ValFloat {
2020-02-12 11:22:27 -08:00
t , v := it . At ( )
2023-03-30 10:50:13 -07:00
samples = append ( samples , sample { t : t , f : v } )
2020-02-12 11:22:27 -08:00
}
resultLabels = append ( resultLabels , series . Labels ( ) )
resultSamples [ series . Labels ( ) . String ( ) ] = samples
2017-12-17 10:08:21 -08:00
}
2020-06-09 09:57:31 -07:00
return resultLabels , resultSamples , ss . Warnings ( ) , ss . Err ( )
2017-12-17 10:08:21 -08:00
}
2018-03-28 07:50:52 -07:00
2018-03-28 10:33:41 -07:00
func TestOverlappingBlocksDetectsAllOverlaps ( t * testing . T ) {
// Create 10 blocks that does not overlap (0-10, 10-20, ..., 100-110) but in reverse order to ensure our algorithm
// will handle that.
2021-10-22 01:06:44 -07:00
metas := make ( [ ] BlockMeta , 11 )
2018-03-28 10:33:41 -07:00
for i := 10 ; i >= 0 ; i -- {
2018-03-28 15:19:22 -07:00
metas [ i ] = BlockMeta { MinTime : int64 ( i * 10 ) , MaxTime : int64 ( ( i + 1 ) * 10 ) }
2018-03-28 07:50:52 -07:00
}
2023-12-07 03:35:01 -08:00
require . Empty ( t , OverlappingBlocks ( metas ) , "we found unexpected overlaps" )
2018-03-28 07:50:52 -07:00
2018-05-22 05:51:20 -07:00
// Add overlapping blocks. We've to establish order again since we aren't interested
// in trivial overlaps caused by unorderedness.
add := func ( ms ... BlockMeta ) [ ] BlockMeta {
repl := append ( append ( [ ] BlockMeta { } , metas ... ) , ms ... )
sort . Slice ( repl , func ( i , j int ) bool {
return repl [ i ] . MinTime < repl [ j ] . MinTime
} )
return repl
}
2018-03-28 07:50:52 -07:00
// o1 overlaps with 10-20.
2018-03-28 15:19:22 -07:00
o1 := BlockMeta { MinTime : 15 , MaxTime : 17 }
2020-10-29 02:43:23 -07:00
require . Equal ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 15 , Max : 17 } : { metas [ 1 ] , o1 } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o1 ) ) )
2018-03-28 10:33:41 -07:00
// o2 overlaps with 20-30 and 30-40.
2018-03-28 15:19:22 -07:00
o2 := BlockMeta { MinTime : 21 , MaxTime : 31 }
2020-10-29 02:43:23 -07:00
require . Equal ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 21 , Max : 30 } : { metas [ 2 ] , o2 } ,
{ Min : 30 , Max : 31 } : { o2 , metas [ 3 ] } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o2 ) ) )
2018-03-28 10:33:41 -07:00
// o3a and o3b overlaps with 30-40 and each other.
2018-03-28 15:19:22 -07:00
o3a := BlockMeta { MinTime : 33 , MaxTime : 39 }
o3b := BlockMeta { MinTime : 34 , MaxTime : 36 }
2020-10-29 02:43:23 -07:00
require . Equal ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 34 , Max : 36 } : { metas [ 3 ] , o3a , o3b } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o3a , o3b ) ) )
2018-03-28 10:33:41 -07:00
// o4 is 1:1 overlap with 50-60.
2018-03-28 15:19:22 -07:00
o4 := BlockMeta { MinTime : 50 , MaxTime : 60 }
2020-10-29 02:43:23 -07:00
require . Equal ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 50 , Max : 60 } : { metas [ 5 ] , o4 } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o4 ) ) )
2018-03-28 10:33:41 -07:00
// o5 overlaps with 60-70, 70-80 and 80-90.
2018-03-28 15:19:22 -07:00
o5 := BlockMeta { MinTime : 61 , MaxTime : 85 }
2020-10-29 02:43:23 -07:00
require . Equal ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 61 , Max : 70 } : { metas [ 6 ] , o5 } ,
{ Min : 70 , Max : 80 } : { o5 , metas [ 7 ] } ,
{ Min : 80 , Max : 85 } : { o5 , metas [ 8 ] } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o5 ) ) )
2018-03-28 10:33:41 -07:00
// o6a overlaps with 90-100, 100-110 and o6b, o6b overlaps with 90-100 and o6a.
2018-03-28 15:19:22 -07:00
o6a := BlockMeta { MinTime : 92 , MaxTime : 105 }
o6b := BlockMeta { MinTime : 94 , MaxTime : 99 }
2020-10-29 02:43:23 -07:00
require . Equal ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 94 , Max : 99 } : { metas [ 9 ] , o6a , o6b } ,
{ Min : 100 , Max : 105 } : { o6a , metas [ 10 ] } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o6a , o6b ) ) )
2018-03-28 15:50:42 -07:00
// All together.
2020-10-29 02:43:23 -07:00
require . Equal ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 15 , Max : 17 } : { metas [ 1 ] , o1 } ,
{ Min : 21 , Max : 30 } : { metas [ 2 ] , o2 } , { Min : 30 , Max : 31 } : { o2 , metas [ 3 ] } ,
{ Min : 34 , Max : 36 } : { metas [ 3 ] , o3a , o3b } ,
{ Min : 50 , Max : 60 } : { metas [ 5 ] , o4 } ,
{ Min : 61 , Max : 70 } : { metas [ 6 ] , o5 } , { Min : 70 , Max : 80 } : { o5 , metas [ 7 ] } , { Min : 80 , Max : 85 } : { o5 , metas [ 8 ] } ,
{ Min : 94 , Max : 99 } : { metas [ 9 ] , o6a , o6b } , { Min : 100 , Max : 105 } : { o6a , metas [ 10 ] } ,
2018-05-22 05:51:20 -07:00
} , OverlappingBlocks ( add ( o1 , o2 , o3a , o3b , o4 , o5 , o6a , o6b ) ) )
2018-03-29 04:50:46 -07:00
2018-04-05 05:51:33 -07:00
// Additional case.
2018-03-29 04:50:46 -07:00
var nc1 [ ] BlockMeta
nc1 = append ( nc1 , BlockMeta { MinTime : 1 , MaxTime : 5 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 2 , MaxTime : 3 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 2 , MaxTime : 3 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 2 , MaxTime : 3 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 2 , MaxTime : 3 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 2 , MaxTime : 6 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 3 , MaxTime : 5 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 5 , MaxTime : 7 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 7 , MaxTime : 10 } )
nc1 = append ( nc1 , BlockMeta { MinTime : 8 , MaxTime : 9 } )
2020-10-29 02:43:23 -07:00
require . Equal ( t , Overlaps {
2018-04-05 05:51:33 -07:00
{ Min : 2 , Max : 3 } : { nc1 [ 0 ] , nc1 [ 1 ] , nc1 [ 2 ] , nc1 [ 3 ] , nc1 [ 4 ] , nc1 [ 5 ] } , // 1-5, 2-3, 2-3, 2-3, 2-3, 2,6
{ Min : 3 , Max : 5 } : { nc1 [ 0 ] , nc1 [ 5 ] , nc1 [ 6 ] } , // 1-5, 2-6, 3-5
{ Min : 5 , Max : 6 } : { nc1 [ 5 ] , nc1 [ 7 ] } , // 2-6, 5-7
{ Min : 8 , Max : 9 } : { nc1 [ 8 ] , nc1 [ 9 ] } , // 7-10, 8-9
2018-03-29 04:50:46 -07:00
} , OverlappingBlocks ( nc1 ) )
2018-03-28 10:33:41 -07:00
}
2018-06-13 02:24:28 -07:00
2020-03-25 12:26:10 -07:00
// Regression test for https://github.com/prometheus/tsdb/issues/347
2018-06-13 02:24:28 -07:00
func TestChunkAtBlockBoundary ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2018-06-13 02:24:28 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2018-06-13 02:24:28 -07:00
2020-02-06 07:58:38 -08:00
blockRange := db . compactor . ( * LeveledCompactor ) . ranges [ 0 ]
2018-06-13 02:24:28 -07:00
label := labels . FromStrings ( "foo" , "bar" )
for i := int64 ( 0 ) ; i < 3 ; i ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , label , i * blockRange , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , label , i * blockRange + 1000 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-13 02:24:28 -07:00
}
err := app . Commit ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-13 02:24:28 -07:00
2023-09-13 08:45:06 -07:00
err = db . Compact ( ctx )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-13 02:24:28 -07:00
2022-06-28 08:03:26 -07:00
var builder labels . ScratchBuilder
2018-11-07 07:52:41 -08:00
for _ , block := range db . Blocks ( ) {
2020-03-25 12:13:47 -07:00
r , err := block . Index ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-13 02:24:28 -07:00
defer r . Close ( )
meta := block . Meta ( )
Reduce memory used by postings offset table.
Rather than keeping the offset of each postings list, instead
keep the nth offset of the offset of the posting list. As postings
list offsets have always been sorted, we can then get to the closest
entry before the one we want an iterate forwards.
I haven't done much tuning on the 32 number, it was chosen to try
not to read through more than a 4k page of data.
Switch to a bulk interface for fetching postings. Use it to avoid having
to re-read parts of the posting offset table when querying lots of it.
For a index with what BenchmarkHeadPostingForMatchers uses RAM
for r.postings drops from 3.79MB to 80.19kB or about 48x.
Bytes allocated go down by 30%, and suprisingly CPU usage drops by
4-6% for typical queries too.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56%
BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70%
BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01%
BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01%
BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69%
BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69%
BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-05 10:27:40 -08:00
k , v := index . AllPostingsKey ( )
2023-09-13 08:45:06 -07:00
p , err := r . Postings ( ctx , k , v )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-13 02:24:28 -07:00
2022-12-15 10:19:15 -08:00
var chks [ ] chunks . Meta
2018-06-13 02:24:28 -07:00
chunkCount := 0
for p . Next ( ) {
2022-12-15 10:19:15 -08:00
err = r . Series ( p . At ( ) , & builder , & chks )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-13 02:24:28 -07:00
for _ , c := range chks {
2020-10-29 02:43:23 -07:00
require . True ( t , meta . MinTime <= c . MinTime && c . MaxTime <= meta . MaxTime ,
2018-06-13 02:24:28 -07:00
"chunk spans beyond block boundaries: [block.MinTime=%d, block.MaxTime=%d]; [chunk.MinTime=%d, chunk.MaxTime=%d]" ,
meta . MinTime , meta . MaxTime , c . MinTime , c . MaxTime )
chunkCount ++
}
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1 , chunkCount , "expected 1 chunk in block %s, got %d" , meta . ULID , chunkCount )
2018-06-13 02:24:28 -07:00
}
}
2018-06-14 06:29:32 -07:00
func TestQuerierWithBoundaryChunks ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2018-06-14 06:29:32 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2018-06-14 06:29:32 -07:00
2020-02-06 07:58:38 -08:00
blockRange := db . compactor . ( * LeveledCompactor ) . ranges [ 0 ]
2018-06-14 06:29:32 -07:00
label := labels . FromStrings ( "foo" , "bar" )
for i := int64 ( 0 ) ; i < 5 ; i ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , label , i * blockRange , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "blockID" , strconv . FormatInt ( i , 10 ) ) , i * blockRange , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-14 06:29:32 -07:00
}
err := app . Commit ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-14 06:29:32 -07:00
2023-09-13 08:45:06 -07:00
err = db . Compact ( ctx )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-14 06:29:32 -07:00
2020-10-29 02:43:23 -07:00
require . GreaterOrEqual ( t , len ( db . blocks ) , 3 , "invalid test, less than three blocks in DB" )
2018-06-14 06:29:32 -07:00
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( blockRange , 2 * blockRange )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-14 06:29:32 -07:00
defer q . Close ( )
2020-07-31 08:03:02 -07:00
// The requested interval covers 2 blocks, so the querier's label values for blockID should give us 2 values, one from each block.
2023-09-14 07:02:04 -07:00
b , ws , err := q . LabelValues ( ctx , "blockID" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-10-24 09:36:07 -07:00
var nilAnnotations annotations . Annotations
require . Equal ( t , nilAnnotations , ws )
2020-10-29 02:43:23 -07:00
require . Equal ( t , [ ] string { "1" , "2" } , b )
2018-06-14 06:29:32 -07:00
}
2018-05-25 14:19:32 -07:00
2018-12-04 02:30:49 -08:00
// TestInitializeHeadTimestamp ensures that the h.minTime is set properly.
2022-08-17 03:02:28 -07:00
// - no blocks no WAL: set to the time of the first appended sample
// - no blocks with WAL: set to the smallest sample from the WAL
// - with blocks no WAL: set to the last block maxT
// - with blocks with WAL: same as above
2018-05-25 14:19:32 -07:00
func TestInitializeHeadTimestamp ( t * testing . T ) {
t . Run ( "clean" , func ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2018-05-25 14:19:32 -07:00
2021-06-05 07:29:32 -07:00
db , err := Open ( dir , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-03-19 06:31:57 -07:00
defer db . Close ( )
2018-05-25 14:19:32 -07:00
// Should be set to init values if no WAL or blocks exist so far.
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( math . MaxInt64 ) , db . head . MinTime ( ) )
require . Equal ( t , int64 ( math . MinInt64 ) , db . head . MaxTime ( ) )
2018-05-25 14:19:32 -07:00
// First added sample initializes the writable range.
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 1000 , 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-05-25 14:19:32 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( 1000 ) , db . head . MinTime ( ) )
require . Equal ( t , int64 ( 1000 ) , db . head . MaxTime ( ) )
2018-05-25 14:19:32 -07:00
} )
t . Run ( "wal-only" , func ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2018-05-25 14:19:32 -07:00
2021-10-22 01:06:44 -07:00
require . NoError ( t , os . MkdirAll ( path . Join ( dir , "wal" ) , 0 o777 ) )
2023-07-11 05:57:57 -07:00
w , err := wlog . New ( nil , nil , path . Join ( dir , "wal" ) , wlog . CompressionNone )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-05-25 14:19:32 -07:00
2019-09-19 02:15:41 -07:00
var enc record . Encoder
2018-05-25 14:19:32 -07:00
err = w . Log (
2019-09-19 02:15:41 -07:00
enc . Series ( [ ] record . RefSeries {
2018-05-25 14:19:32 -07:00
{ Ref : 123 , Labels : labels . FromStrings ( "a" , "1" ) } ,
{ Ref : 124 , Labels : labels . FromStrings ( "a" , "2" ) } ,
} , nil ) ,
2019-09-19 02:15:41 -07:00
enc . Samples ( [ ] record . RefSample {
2018-05-25 14:19:32 -07:00
{ Ref : 123 , T : 5000 , V : 1 } ,
{ Ref : 124 , T : 15000 , V : 1 } ,
} , nil ) ,
)
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , w . Close ( ) )
2018-05-25 14:19:32 -07:00
2021-06-05 07:29:32 -07:00
db , err := Open ( dir , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-03-19 06:31:57 -07:00
defer db . Close ( )
2018-05-25 14:19:32 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( 5000 ) , db . head . MinTime ( ) )
require . Equal ( t , int64 ( 15000 ) , db . head . MaxTime ( ) )
2018-05-25 14:19:32 -07:00
} )
t . Run ( "existing-block" , func ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2018-05-25 14:19:32 -07:00
2019-01-28 03:24:49 -08:00
createBlock ( t , dir , genSeries ( 1 , 1 , 1000 , 2000 ) )
2018-05-25 14:19:32 -07:00
2021-06-05 07:29:32 -07:00
db , err := Open ( dir , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-03-19 06:31:57 -07:00
defer db . Close ( )
2018-05-25 14:19:32 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( 2000 ) , db . head . MinTime ( ) )
require . Equal ( t , int64 ( 2000 ) , db . head . MaxTime ( ) )
2018-05-25 14:19:32 -07:00
} )
t . Run ( "existing-block-and-wal" , func ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2018-05-25 14:19:32 -07:00
2019-01-28 03:24:49 -08:00
createBlock ( t , dir , genSeries ( 1 , 1 , 1000 , 6000 ) )
2018-05-25 14:19:32 -07:00
2021-10-22 01:06:44 -07:00
require . NoError ( t , os . MkdirAll ( path . Join ( dir , "wal" ) , 0 o777 ) )
2023-07-11 05:57:57 -07:00
w , err := wlog . New ( nil , nil , path . Join ( dir , "wal" ) , wlog . CompressionNone )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-05-25 14:19:32 -07:00
2019-09-19 02:15:41 -07:00
var enc record . Encoder
2018-05-25 14:19:32 -07:00
err = w . Log (
2019-09-19 02:15:41 -07:00
enc . Series ( [ ] record . RefSeries {
2018-05-25 14:19:32 -07:00
{ Ref : 123 , Labels : labels . FromStrings ( "a" , "1" ) } ,
{ Ref : 124 , Labels : labels . FromStrings ( "a" , "2" ) } ,
} , nil ) ,
2019-09-19 02:15:41 -07:00
enc . Samples ( [ ] record . RefSample {
2018-05-25 14:19:32 -07:00
{ Ref : 123 , T : 5000 , V : 1 } ,
{ Ref : 124 , T : 15000 , V : 1 } ,
} , nil ) ,
)
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , w . Close ( ) )
2018-05-25 14:19:32 -07:00
2018-11-28 01:23:50 -08:00
r := prometheus . NewRegistry ( )
2021-06-05 07:29:32 -07:00
db , err := Open ( dir , nil , r , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-03-19 06:31:57 -07:00
defer db . Close ( )
2018-05-25 14:19:32 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( 6000 ) , db . head . MinTime ( ) )
require . Equal ( t , int64 ( 15000 ) , db . head . MaxTime ( ) )
2018-11-28 01:23:50 -08:00
// Check that old series has been GCed.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . series ) )
2018-05-25 14:19:32 -07:00
} )
}
2018-09-27 04:43:22 -07:00
2019-01-18 00:35:16 -08:00
func TestNoEmptyBlocks ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , [ ] int64 { 100 } )
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2019-01-18 00:35:16 -08:00
db . DisableCompactions ( )
2020-02-06 07:58:38 -08:00
rangeToTriggerCompaction := db . compactor . ( * LeveledCompactor ) . ranges [ 0 ] / 2 * 3 - 1
2019-01-18 00:35:16 -08:00
defaultLabel := labels . FromStrings ( "foo" , "bar" )
2019-11-18 11:53:33 -08:00
defaultMatcher := labels . MustNewMatcher ( labels . MatchRegexp , "" , ".*" )
2019-01-18 00:35:16 -08:00
t . Run ( "Test no blocks after compact with empty head." , func ( t * testing . T ) {
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2019-01-18 00:35:16 -08:00
actBlocks , err := blockDirs ( db . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , len ( db . Blocks ( ) ) , len ( actBlocks ) )
2023-12-07 03:35:01 -08:00
require . Empty ( t , actBlocks )
2023-04-03 23:31:49 -07:00
require . Equal ( t , 0 , int ( prom_testutil . ToFloat64 ( db . compactor . ( * LeveledCompactor ) . metrics . Ran ) ) , "no compaction should be triggered here" )
2019-01-18 00:35:16 -08:00
} )
t . Run ( "Test no blocks after deleting all samples from head." , func ( t * testing . T ) {
2020-07-24 07:10:51 -07:00
app := db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , defaultLabel , 1 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , defaultLabel , 2 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , defaultLabel , 3 + rangeToTriggerCompaction , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2023-09-13 06:43:06 -07:00
require . NoError ( t , db . Delete ( ctx , math . MinInt64 , math . MaxInt64 , defaultMatcher ) )
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2023-04-03 23:31:49 -07:00
require . Equal ( t , 1 , int ( prom_testutil . ToFloat64 ( db . compactor . ( * LeveledCompactor ) . metrics . Ran ) ) , "compaction should have been triggered here" )
2019-01-18 00:35:16 -08:00
actBlocks , err := blockDirs ( db . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , len ( db . Blocks ( ) ) , len ( actBlocks ) )
2023-12-07 03:35:01 -08:00
require . Empty ( t , actBlocks )
2019-01-18 00:35:16 -08:00
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , defaultLabel , 1 , 0 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , storage . ErrOutOfBounds , err , "the head should be truncated so no samples in the past should be allowed" )
2019-01-18 00:35:16 -08:00
// Adding new blocks.
currentTime := db . Head ( ) . MaxTime ( )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , defaultLabel , currentTime , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , defaultLabel , currentTime + 1 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , defaultLabel , currentTime + rangeToTriggerCompaction , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2019-01-18 00:35:16 -08:00
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2023-04-03 23:31:49 -07:00
require . Equal ( t , 2 , int ( prom_testutil . ToFloat64 ( db . compactor . ( * LeveledCompactor ) . metrics . Ran ) ) , "compaction should have been triggered here" )
2019-01-18 00:35:16 -08:00
actBlocks , err = blockDirs ( db . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , len ( db . Blocks ( ) ) , len ( actBlocks ) )
2023-12-07 03:35:01 -08:00
require . Len ( t , actBlocks , 1 , "No blocks created when compacting with >0 samples" )
2019-01-18 00:35:16 -08:00
} )
2019-03-25 16:38:12 -07:00
t . Run ( ` When no new block is created from head , and there are some blocks on disk
2019-01-18 00:35:16 -08:00
compaction should not run into infinite loop ( was seen during development ) . ` , func ( t * testing . T ) {
oldBlocks := db . Blocks ( )
2020-07-24 07:10:51 -07:00
app := db . Appender ( ctx )
2019-01-18 00:35:16 -08:00
currentTime := db . Head ( ) . MaxTime ( )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , defaultLabel , currentTime , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , defaultLabel , currentTime + 1 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , defaultLabel , currentTime + rangeToTriggerCompaction , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2023-09-13 06:43:06 -07:00
require . NoError ( t , db . head . Delete ( ctx , math . MinInt64 , math . MaxInt64 , defaultMatcher ) )
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2023-04-03 23:31:49 -07:00
require . Equal ( t , 3 , int ( prom_testutil . ToFloat64 ( db . compactor . ( * LeveledCompactor ) . metrics . Ran ) ) , "compaction should have been triggered here" )
2020-10-29 02:43:23 -07:00
require . Equal ( t , oldBlocks , db . Blocks ( ) )
2019-01-18 00:35:16 -08:00
} )
t . Run ( "Test no blocks remaining after deleting all samples from disk." , func ( t * testing . T ) {
currentTime := db . Head ( ) . MaxTime ( )
blocks := [ ] * BlockMeta {
2020-02-06 07:58:38 -08:00
{ MinTime : currentTime , MaxTime : currentTime + db . compactor . ( * LeveledCompactor ) . ranges [ 0 ] } ,
{ MinTime : currentTime + 100 , MaxTime : currentTime + 100 + db . compactor . ( * LeveledCompactor ) . ranges [ 0 ] } ,
2019-01-18 00:35:16 -08:00
}
for _ , m := range blocks {
2019-01-28 03:24:49 -08:00
createBlock ( t , db . Dir ( ) , genSeries ( 2 , 2 , m . MinTime , m . MaxTime ) )
2019-01-18 00:35:16 -08:00
}
oldBlocks := db . Blocks ( )
2023-12-07 03:35:01 -08:00
require . NoError ( t , db . reloadBlocks ( ) ) // Reload the db to register the new blocks.
require . Len ( t , db . Blocks ( ) , len ( blocks ) + len ( oldBlocks ) ) // Ensure all blocks are registered.
2023-09-13 06:43:06 -07:00
require . NoError ( t , db . Delete ( ctx , math . MinInt64 , math . MaxInt64 , defaultMatcher ) )
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2023-04-03 23:31:49 -07:00
require . Equal ( t , 5 , int ( prom_testutil . ToFloat64 ( db . compactor . ( * LeveledCompactor ) . metrics . Ran ) ) , "compaction should have been triggered here once for each block that have tombstones" )
2019-01-18 00:35:16 -08:00
actBlocks , err := blockDirs ( db . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , len ( db . Blocks ( ) ) , len ( actBlocks ) )
2023-12-07 03:35:01 -08:00
require . Len ( t , actBlocks , 1 , "All samples are deleted. Only the most recent block should remain after compaction." )
2019-01-18 00:35:16 -08:00
} )
}
2018-11-07 07:52:41 -08:00
func TestDB_LabelNames ( t * testing . T ) {
2023-09-14 01:39:51 -07:00
ctx := context . Background ( )
2018-11-07 07:52:41 -08:00
tests := [ ] struct {
// Add 'sampleLabels1' -> Test Head -> Compact -> Test Disk ->
// -> Add 'sampleLabels2' -> Test Head+Disk
sampleLabels1 [ ] [ 2 ] string // For checking head and disk separately.
// To test Head+Disk, sampleLabels2 should have
// at least 1 unique label name which is not in sampleLabels1.
sampleLabels2 [ ] [ 2 ] string // // For checking head and disk together.
exp1 [ ] string // after adding sampleLabels1.
exp2 [ ] string // after adding sampleLabels1 and sampleLabels2.
} {
{
sampleLabels1 : [ ] [ 2 ] string {
2019-08-13 01:34:14 -07:00
{ "name1" , "1" } ,
{ "name3" , "3" } ,
{ "name2" , "2" } ,
2018-11-07 07:52:41 -08:00
} ,
sampleLabels2 : [ ] [ 2 ] string {
2019-08-13 01:34:14 -07:00
{ "name4" , "4" } ,
{ "name1" , "1" } ,
2018-11-07 07:52:41 -08:00
} ,
exp1 : [ ] string { "name1" , "name2" , "name3" } ,
exp2 : [ ] string { "name1" , "name2" , "name3" , "name4" } ,
} ,
{
sampleLabels1 : [ ] [ 2 ] string {
2019-08-13 01:34:14 -07:00
{ "name2" , "2" } ,
{ "name1" , "1" } ,
{ "name2" , "2" } ,
2018-11-07 07:52:41 -08:00
} ,
sampleLabels2 : [ ] [ 2 ] string {
2019-08-13 01:34:14 -07:00
{ "name6" , "6" } ,
{ "name0" , "0" } ,
2018-11-07 07:52:41 -08:00
} ,
exp1 : [ ] string { "name1" , "name2" } ,
exp2 : [ ] string { "name0" , "name1" , "name2" , "name6" } ,
} ,
}
2020-02-06 07:58:38 -08:00
blockRange := int64 ( 1000 )
2018-11-07 07:52:41 -08:00
// Appends samples into the database.
appendSamples := func ( db * DB , mint , maxt int64 , sampleLabels [ ] [ 2 ] string ) {
t . Helper ( )
2020-07-24 07:10:51 -07:00
app := db . Appender ( ctx )
2018-11-07 07:52:41 -08:00
for i := mint ; i <= maxt ; i ++ {
for _ , tuple := range sampleLabels {
label := labels . FromStrings ( tuple [ 0 ] , tuple [ 1 ] )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , label , i * blockRange , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-11-07 07:52:41 -08:00
}
}
err := app . Commit ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-11-07 07:52:41 -08:00
}
for _ , tst := range tests {
2023-09-13 08:45:06 -07:00
ctx := context . Background ( )
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2018-11-07 07:52:41 -08:00
appendSamples ( db , 0 , 4 , tst . sampleLabels1 )
// Testing head.
2020-03-25 12:13:47 -07:00
headIndexr , err := db . head . Index ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-09-14 01:39:51 -07:00
labelNames , err := headIndexr . LabelNames ( ctx )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , tst . exp1 , labelNames )
require . NoError ( t , headIndexr . Close ( ) )
2018-11-07 07:52:41 -08:00
// Testing disk.
2023-09-13 08:45:06 -07:00
err = db . Compact ( ctx )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-11-07 07:52:41 -08:00
// All blocks have same label names, hence check them individually.
2019-12-08 11:16:46 -08:00
// No need to aggregate and check.
2018-11-07 07:52:41 -08:00
for _ , b := range db . Blocks ( ) {
2020-03-25 12:13:47 -07:00
blockIndexr , err := b . Index ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-09-14 01:39:51 -07:00
labelNames , err = blockIndexr . LabelNames ( ctx )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , tst . exp1 , labelNames )
require . NoError ( t , blockIndexr . Close ( ) )
2018-11-07 07:52:41 -08:00
}
2019-12-08 11:16:46 -08:00
// Adding more samples to head with new label names
2023-09-14 01:39:51 -07:00
// so that we can test (head+disk).LabelNames(ctx) (the union).
2018-11-07 07:52:41 -08:00
appendSamples ( db , 5 , 9 , tst . sampleLabels2 )
// Testing DB (union).
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( math . MinInt64 , math . MaxInt64 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-09-14 09:57:31 -07:00
var ws annotations . Annotations
2023-09-14 01:39:51 -07:00
labelNames , ws , err = q . LabelNames ( ctx )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Empty ( t , ws )
2020-10-29 02:43:23 -07:00
require . NoError ( t , q . Close ( ) )
require . Equal ( t , tst . exp2 , labelNames )
2018-11-07 07:52:41 -08:00
}
}
2018-09-27 04:43:22 -07:00
func TestCorrectNumTombstones ( t * testing . T ) {
2020-07-21 01:39:02 -07:00
db := openTestDB ( t , nil , nil )
2019-01-30 01:40:12 -08:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2019-01-30 01:40:12 -08:00
} ( )
2018-09-27 04:43:22 -07:00
2020-02-06 07:58:38 -08:00
blockRange := db . compactor . ( * LeveledCompactor ) . ranges [ 0 ]
2022-03-09 14:17:29 -08:00
name , value := "foo" , "bar"
defaultLabel := labels . FromStrings ( name , value )
defaultMatcher := labels . MustNewMatcher ( labels . MatchEqual , name , value )
2018-09-27 04:43:22 -07:00
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2018-09-27 04:43:22 -07:00
for i := int64 ( 0 ) ; i < 3 ; i ++ {
for j := int64 ( 0 ) ; j < 15 ; j ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , defaultLabel , i * blockRange + j , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-09-27 04:43:22 -07:00
}
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2018-09-27 04:43:22 -07:00
2023-09-13 08:45:06 -07:00
err := db . Compact ( ctx )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Len ( t , db . blocks , 1 )
2018-09-27 04:43:22 -07:00
2023-09-13 06:43:06 -07:00
require . NoError ( t , db . Delete ( ctx , 0 , 1 , defaultMatcher ) )
2020-10-29 02:43:23 -07:00
require . Equal ( t , uint64 ( 1 ) , db . blocks [ 0 ] . meta . Stats . NumTombstones )
2018-09-27 04:43:22 -07:00
// {0, 1} and {2, 3} are merged to form 1 tombstone.
2023-09-13 06:43:06 -07:00
require . NoError ( t , db . Delete ( ctx , 2 , 3 , defaultMatcher ) )
2020-10-29 02:43:23 -07:00
require . Equal ( t , uint64 ( 1 ) , db . blocks [ 0 ] . meta . Stats . NumTombstones )
2018-09-27 04:43:22 -07:00
2023-09-13 06:43:06 -07:00
require . NoError ( t , db . Delete ( ctx , 5 , 6 , defaultMatcher ) )
2020-10-29 02:43:23 -07:00
require . Equal ( t , uint64 ( 2 ) , db . blocks [ 0 ] . meta . Stats . NumTombstones )
2018-09-27 04:43:22 -07:00
2023-09-13 06:43:06 -07:00
require . NoError ( t , db . Delete ( ctx , 9 , 11 , defaultMatcher ) )
2020-10-29 02:43:23 -07:00
require . Equal ( t , uint64 ( 3 ) , db . blocks [ 0 ] . meta . Stats . NumTombstones )
2018-09-27 04:43:22 -07:00
}
2018-12-04 02:30:49 -08:00
// TestBlockRanges checks the following use cases:
2022-08-17 03:02:28 -07:00
// - No samples can be added with timestamps lower than the last block maxt.
// - The compactor doesn't create overlapping blocks
//
2018-12-04 02:30:49 -08:00
// even when the last blocks is not within the default boundaries.
2022-08-17 03:02:28 -07:00
// - Lower boundary is based on the smallest sample in the head and
//
2018-12-04 02:30:49 -08:00
// upper boundary is rounded to the configured block range.
//
// This ensures that a snapshot that includes the head and creates a block with a custom time range
// will not overlap with the first block created by the next compaction.
func TestBlockRanges ( t * testing . T ) {
logger := log . NewLogfmtLogger ( log . NewSyncWriter ( os . Stderr ) )
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
2018-12-04 02:30:49 -08:00
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2018-12-04 02:30:49 -08:00
// Test that the compactor doesn't create overlapping blocks
// when a non standard block already exists.
firstBlockMaxT := int64 ( 3 )
2019-01-28 03:24:49 -08:00
createBlock ( t , dir , genSeries ( 1 , 1 , 0 , firstBlockMaxT ) )
2021-06-05 07:29:32 -07:00
db , err := open ( dir , logger , nil , DefaultOptions ( ) , [ ] int64 { 10000 } , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-06 07:58:38 -08:00
rangeToTriggerCompaction := db . compactor . ( * LeveledCompactor ) . ranges [ 0 ] / 2 * 3 + 1
2022-01-22 01:55:01 -08:00
2020-07-24 07:10:51 -07:00
app := db . Appender ( ctx )
2022-03-09 14:17:29 -08:00
lbl := labels . FromStrings ( "a" , "b" )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , firstBlockMaxT - 1 , rand . Float64 ( ) )
2018-12-04 02:30:49 -08:00
if err == nil {
t . Fatalf ( "appending a sample with a timestamp covered by a previous block shouldn't be possible" )
}
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , firstBlockMaxT + 1 , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , firstBlockMaxT + 2 , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-06 07:58:38 -08:00
secondBlockMaxt := firstBlockMaxT + rangeToTriggerCompaction
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , secondBlockMaxt , rand . Float64 ( ) ) // Add samples to trigger a new compaction
2018-12-04 02:30:49 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2018-12-12 03:49:03 -08:00
for x := 0 ; x < 100 ; x ++ {
2018-12-04 02:30:49 -08:00
if len ( db . Blocks ( ) ) == 2 {
break
}
time . Sleep ( 100 * time . Millisecond )
}
2023-12-07 03:35:01 -08:00
require . Len ( t , db . Blocks ( ) , 2 , "no new block created after the set timeout" )
2018-12-04 02:30:49 -08:00
if db . Blocks ( ) [ 0 ] . Meta ( ) . MaxTime > db . Blocks ( ) [ 1 ] . Meta ( ) . MinTime {
t . Fatalf ( "new block overlaps old:%v,new:%v" , db . Blocks ( ) [ 0 ] . Meta ( ) , db . Blocks ( ) [ 1 ] . Meta ( ) )
}
// Test that wal records are skipped when an existing block covers the same time ranges
// and compaction doesn't create an overlapping block.
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2018-12-04 02:30:49 -08:00
db . DisableCompactions ( )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , secondBlockMaxt + 1 , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , secondBlockMaxt + 2 , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , secondBlockMaxt + 3 , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , secondBlockMaxt + 4 , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
require . NoError ( t , db . Close ( ) )
2018-12-04 02:30:49 -08:00
thirdBlockMaxt := secondBlockMaxt + 2
2019-01-28 03:24:49 -08:00
createBlock ( t , dir , genSeries ( 1 , 1 , secondBlockMaxt + 1 , thirdBlockMaxt ) )
2018-12-04 02:30:49 -08:00
2021-06-05 07:29:32 -07:00
db , err = open ( dir , logger , nil , DefaultOptions ( ) , [ ] int64 { 10000 } , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-06 07:58:38 -08:00
2018-12-04 02:30:49 -08:00
defer db . Close ( )
2023-12-07 03:35:01 -08:00
require . Len ( t , db . Blocks ( ) , 3 , "db doesn't include expected number of blocks" )
2020-10-29 02:43:23 -07:00
require . Equal ( t , db . Blocks ( ) [ 2 ] . Meta ( ) . MaxTime , thirdBlockMaxt , "unexpected maxt of the last block" )
2018-12-04 02:30:49 -08:00
2020-07-24 07:10:51 -07:00
app = db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbl , thirdBlockMaxt + rangeToTriggerCompaction , rand . Float64 ( ) ) // Trigger a compaction
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2018-12-12 03:49:03 -08:00
for x := 0 ; x < 100 ; x ++ {
2018-12-04 02:30:49 -08:00
if len ( db . Blocks ( ) ) == 4 {
break
}
time . Sleep ( 100 * time . Millisecond )
}
2023-12-07 03:35:01 -08:00
require . Len ( t , db . Blocks ( ) , 4 , "no new block created after the set timeout" )
2018-12-04 02:30:49 -08:00
if db . Blocks ( ) [ 2 ] . Meta ( ) . MaxTime > db . Blocks ( ) [ 3 ] . Meta ( ) . MinTime {
t . Fatalf ( "new block overlaps old:%v,new:%v" , db . Blocks ( ) [ 2 ] . Meta ( ) , db . Blocks ( ) [ 3 ] . Meta ( ) )
}
}
2019-07-23 01:04:48 -07:00
// TestDBReadOnly ensures that opening a DB in readonly mode doesn't modify any files on the disk.
// It also checks that the API calls return equivalent results as a normal db.Open() mode.
func TestDBReadOnly ( t * testing . T ) {
var (
2020-07-31 08:03:02 -07:00
dbDir string
logger = log . NewLogfmtLogger ( log . NewSyncWriter ( os . Stderr ) )
expBlocks [ ] * Block
2023-06-01 04:43:09 -07:00
expBlock * Block
2023-08-24 06:21:17 -07:00
expSeries map [ string ] [ ] chunks . Sample
expChunks map [ string ] [ ] [ ] chunks . Sample
2020-07-31 08:03:02 -07:00
expDBHash [ ] byte
matchAll = labels . MustNewMatcher ( labels . MatchEqual , "" , "" )
err error
2019-07-23 01:04:48 -07:00
)
2019-10-10 02:47:30 -07:00
// Bootstrap the db.
2019-07-23 01:04:48 -07:00
{
2022-01-22 01:55:01 -08:00
dbDir = t . TempDir ( )
2019-07-23 01:04:48 -07:00
dbBlocks := [ ] * BlockMeta {
2020-07-31 08:03:02 -07:00
// Create three 2-sample blocks.
{ MinTime : 10 , MaxTime : 12 } ,
{ MinTime : 12 , MaxTime : 14 } ,
{ MinTime : 14 , MaxTime : 16 } ,
2019-07-23 01:04:48 -07:00
}
for _ , m := range dbBlocks {
2020-07-31 08:03:02 -07:00
_ = createBlock ( t , dbDir , genSeries ( 1 , 1 , m . MinTime , m . MaxTime ) )
2019-07-23 01:04:48 -07:00
}
2020-07-31 08:03:02 -07:00
// Add head to test DBReadOnly WAL reading capabilities.
2023-07-11 05:57:57 -07:00
w , err := wlog . New ( logger , nil , filepath . Join ( dbDir , "wal" ) , wlog . CompressionSnappy )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-31 08:03:02 -07:00
h := createHead ( t , w , genSeries ( 1 , 1 , 16 , 18 ) , dbDir )
2020-10-29 02:43:23 -07:00
require . NoError ( t , h . Close ( ) )
2019-07-23 01:04:48 -07:00
}
// Open a normal db to use for a comparison.
{
2021-06-05 07:29:32 -07:00
dbWritable , err := Open ( dbDir , logger , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-07-23 01:04:48 -07:00
dbWritable . DisableCompactions ( )
2019-11-11 18:40:16 -08:00
dbSizeBeforeAppend , err := fileutil . DirSize ( dbWritable . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-24 07:10:51 -07:00
app := dbWritable . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , dbWritable . Head ( ) . MaxTime ( ) + 1 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2019-07-23 01:04:48 -07:00
expBlocks = dbWritable . Blocks ( )
2023-06-01 04:43:09 -07:00
expBlock = expBlocks [ 0 ]
2019-11-11 18:40:16 -08:00
expDbSize , err := fileutil . DirSize ( dbWritable . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Greater ( t , expDbSize , dbSizeBeforeAppend , "db size didn't increase after an append" )
2019-07-23 01:04:48 -07:00
2023-09-12 03:37:38 -07:00
q , err := dbWritable . Querier ( math . MinInt64 , math . MaxInt64 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-07-23 01:04:48 -07:00
expSeries = query ( t , q , matchAll )
2023-09-12 03:37:38 -07:00
cq , err := dbWritable . ChunkQuerier ( math . MinInt64 , math . MaxInt64 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-02-21 00:32:59 -08:00
expChunks = queryAndExpandChunks ( t , cq , matchAll )
2019-07-23 01:04:48 -07:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , dbWritable . Close ( ) ) // Close here to allow getting the dir hash for windows.
2019-07-23 01:04:48 -07:00
expDBHash = testutil . DirHash ( t , dbWritable . Dir ( ) )
}
// Open a read only db and ensure that the API returns the same result as the normal DB.
2020-07-31 08:03:02 -07:00
dbReadOnly , err := OpenDBReadOnly ( dbDir , logger )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , dbReadOnly . Close ( ) ) } ( )
2020-07-31 08:03:02 -07:00
t . Run ( "blocks" , func ( t * testing . T ) {
2019-07-23 01:04:48 -07:00
blocks , err := dbReadOnly . Blocks ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , len ( expBlocks ) , len ( blocks ) )
2019-07-23 01:04:48 -07:00
for i , expBlock := range expBlocks {
2020-10-29 02:43:23 -07:00
require . Equal ( t , expBlock . Meta ( ) , blocks [ i ] . Meta ( ) , "block meta mismatch" )
2019-07-23 01:04:48 -07:00
}
2020-07-31 08:03:02 -07:00
} )
2023-06-01 04:43:09 -07:00
t . Run ( "block" , func ( t * testing . T ) {
blockID := expBlock . meta . ULID . String ( )
block , err := dbReadOnly . Block ( blockID )
require . NoError ( t , err )
require . Equal ( t , expBlock . Meta ( ) , block . Meta ( ) , "block meta mismatch" )
} )
t . Run ( "invalid block ID" , func ( t * testing . T ) {
blockID := "01GTDVZZF52NSWB5SXQF0P2PGF"
_ , err := dbReadOnly . Block ( blockID )
require . Error ( t , err )
} )
t . Run ( "last block ID" , func ( t * testing . T ) {
blockID , err := dbReadOnly . LastBlockID ( )
require . NoError ( t , err )
require . Equal ( t , expBlocks [ 2 ] . Meta ( ) . ULID . String ( ) , blockID )
} )
2020-07-31 08:03:02 -07:00
t . Run ( "querier" , func ( t * testing . T ) {
// Open a read only db and ensure that the API returns the same result as the normal DB.
2023-09-12 03:37:38 -07:00
q , err := dbReadOnly . Querier ( math . MinInt64 , math . MaxInt64 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-07-23 01:04:48 -07:00
readOnlySeries := query ( t , q , matchAll )
readOnlyDBHash := testutil . DirHash ( t , dbDir )
2020-10-29 02:43:23 -07:00
require . Equal ( t , len ( expSeries ) , len ( readOnlySeries ) , "total series mismatch" )
require . Equal ( t , expSeries , readOnlySeries , "series mismatch" )
require . Equal ( t , expDBHash , readOnlyDBHash , "after all read operations the db hash should remain the same" )
2020-07-31 08:03:02 -07:00
} )
t . Run ( "chunk querier" , func ( t * testing . T ) {
2023-09-12 03:37:38 -07:00
cq , err := dbReadOnly . ChunkQuerier ( math . MinInt64 , math . MaxInt64 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-02-21 00:32:59 -08:00
readOnlySeries := queryAndExpandChunks ( t , cq , matchAll )
2020-07-31 08:03:02 -07:00
readOnlyDBHash := testutil . DirHash ( t , dbDir )
2020-10-29 02:43:23 -07:00
require . Equal ( t , len ( expChunks ) , len ( readOnlySeries ) , "total series mismatch" )
require . Equal ( t , expChunks , readOnlySeries , "series chunks mismatch" )
require . Equal ( t , expDBHash , readOnlyDBHash , "after all read operations the db hash should remain the same" )
2020-07-31 08:03:02 -07:00
} )
2019-07-23 01:04:48 -07:00
}
// TestDBReadOnlyClosing ensures that after closing the db
// all api methods return an ErrClosed.
func TestDBReadOnlyClosing ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
dbDir := t . TempDir ( )
2019-07-23 01:04:48 -07:00
db , err := OpenDBReadOnly ( dbDir , log . NewLogfmtLogger ( log . NewSyncWriter ( os . Stderr ) ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , db . Close ( ) )
require . Equal ( t , db . Close ( ) , ErrClosed )
2019-07-23 01:04:48 -07:00
_ , err = db . Blocks ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , err , ErrClosed )
2023-09-12 03:37:38 -07:00
_ , err = db . Querier ( 0 , 1 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , err , ErrClosed )
2019-07-23 01:04:48 -07:00
}
2019-09-13 03:25:21 -07:00
func TestDBReadOnly_FlushWAL ( t * testing . T ) {
var (
dbDir string
logger = log . NewLogfmtLogger ( log . NewSyncWriter ( os . Stderr ) )
err error
maxt int
2020-07-24 08:10:13 -07:00
ctx = context . Background ( )
2019-09-13 03:25:21 -07:00
)
2019-10-10 02:47:30 -07:00
// Bootstrap the db.
2019-09-13 03:25:21 -07:00
{
2022-01-22 01:55:01 -08:00
dbDir = t . TempDir ( )
2019-09-13 03:25:21 -07:00
// Append data to the WAL.
2021-06-05 07:29:32 -07:00
db , err := Open ( dbDir , logger , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-09-13 03:25:21 -07:00
db . DisableCompactions ( )
2020-07-24 07:10:51 -07:00
app := db . Appender ( ctx )
2019-09-13 03:25:21 -07:00
maxt = 1000
for i := 0 ; i < maxt ; i ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( defaultLabelName , "flush" ) , int64 ( i ) , 1.0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-09-13 03:25:21 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2022-01-10 05:36:45 -08:00
require . NoError ( t , db . Close ( ) )
2019-09-13 03:25:21 -07:00
}
// Flush WAL.
db , err := OpenDBReadOnly ( dbDir , logger )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-09-13 03:25:21 -07:00
2022-01-22 01:55:01 -08:00
flush := t . TempDir ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . FlushWAL ( flush ) )
require . NoError ( t , db . Close ( ) )
2019-09-13 03:25:21 -07:00
// Reopen the DB from the flushed WAL block.
db , err = OpenDBReadOnly ( flush , logger )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , db . Close ( ) ) } ( )
2019-09-13 03:25:21 -07:00
blocks , err := db . Blocks ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Len ( t , blocks , 1 )
2019-09-13 03:25:21 -07:00
2023-09-12 03:37:38 -07:00
querier , err := db . Querier ( 0 , int64 ( maxt ) - 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , querier . Close ( ) ) } ( )
2019-09-13 03:25:21 -07:00
// Sum the values.
2023-09-12 03:37:38 -07:00
seriesSet := querier . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchEqual , defaultLabelName , "flush" ) )
2022-09-20 10:16:45 -07:00
var series chunkenc . Iterator
2019-09-13 03:25:21 -07:00
sum := 0.0
for seriesSet . Next ( ) {
2022-09-20 10:16:45 -07:00
series = seriesSet . At ( ) . Iterator ( series )
2021-11-28 23:54:23 -08:00
for series . Next ( ) == chunkenc . ValFloat {
2019-09-13 03:25:21 -07:00
_ , v := series . At ( )
sum += v
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , series . Err ( ) )
2019-09-13 03:25:21 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , seriesSet . Err ( ) )
2023-12-07 03:35:01 -08:00
require . Empty ( t , seriesSet . Warnings ( ) )
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1000.0 , sum )
2019-09-13 03:25:21 -07:00
}
2019-12-03 23:37:49 -08:00
2020-02-12 11:22:27 -08:00
func TestDBCannotSeePartialCommits ( t * testing . T ) {
2021-11-19 02:11:32 -08:00
if defaultIsolationDisabled {
t . Skip ( "skipping test since tsdb isolation is disabled" )
}
2022-01-22 01:55:01 -08:00
tmpdir := t . TempDir ( )
2020-02-12 11:22:27 -08:00
2021-06-05 07:29:32 -07:00
db , err := Open ( tmpdir , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
defer db . Close ( )
stop := make ( chan struct { } )
firstInsert := make ( chan struct { } )
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
2020-02-12 11:22:27 -08:00
// Insert data in batches.
go func ( ) {
iter := 0
for {
2020-07-24 07:10:51 -07:00
app := db . Appender ( ctx )
2020-02-12 11:22:27 -08:00
for j := 0 ; j < 100 ; j ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "foo" , "bar" , "a" , strconv . Itoa ( j ) ) , int64 ( iter ) , float64 ( iter ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
}
err = app . Commit ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
if iter == 0 {
close ( firstInsert )
}
iter ++
select {
case <- stop :
return
default :
}
}
} ( )
<- firstInsert
// This is a race condition, so do a few tests to tickle it.
// Usually most will fail.
inconsistencies := 0
for i := 0 ; i < 10 ; i ++ {
func ( ) {
2023-09-12 03:37:38 -07:00
querier , err := db . Querier ( 0 , 1000000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
defer querier . Close ( )
2023-09-12 03:37:38 -07:00
ss := querier . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2020-06-09 09:57:31 -07:00
_ , seriesSet , ws , err := expandSeriesSet ( ss )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Empty ( t , ws )
2020-02-12 11:22:27 -08:00
values := map [ float64 ] struct { } { }
for _ , series := range seriesSet {
2023-03-30 10:50:13 -07:00
values [ series [ len ( series ) - 1 ] . f ] = struct { } { }
2020-02-12 11:22:27 -08:00
}
if len ( values ) != 1 {
inconsistencies ++
}
} ( )
}
stop <- struct { } { }
2020-10-29 02:43:23 -07:00
require . Equal ( t , 0 , inconsistencies , "Some queries saw inconsistent results." )
2020-02-12 11:22:27 -08:00
}
func TestDBQueryDoesntSeeAppendsAfterCreation ( t * testing . T ) {
2021-11-19 02:11:32 -08:00
if defaultIsolationDisabled {
t . Skip ( "skipping test since tsdb isolation is disabled" )
}
2022-01-22 01:55:01 -08:00
tmpdir := t . TempDir ( )
2020-02-12 11:22:27 -08:00
2021-06-05 07:29:32 -07:00
db , err := Open ( tmpdir , nil , nil , nil , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
defer db . Close ( )
2023-09-12 03:37:38 -07:00
querierBeforeAdd , err := db . Querier ( 0 , 1000000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
defer querierBeforeAdd . Close ( )
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "foo" , "bar" ) , 0 , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
2023-09-12 03:37:38 -07:00
querierAfterAddButBeforeCommit , err := db . Querier ( 0 , 1000000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
defer querierAfterAddButBeforeCommit . Close ( )
// None of the queriers should return anything after the Add but before the commit.
2023-09-12 03:37:38 -07:00
ss := querierBeforeAdd . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2020-06-09 09:57:31 -07:00
_ , seriesSet , ws , err := expandSeriesSet ( ss )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Empty ( t , ws )
2020-10-29 02:43:23 -07:00
require . Equal ( t , map [ string ] [ ] sample { } , seriesSet )
2020-02-12 11:22:27 -08:00
2023-09-12 03:37:38 -07:00
ss = querierAfterAddButBeforeCommit . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2020-06-09 09:57:31 -07:00
_ , seriesSet , ws , err = expandSeriesSet ( ss )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Empty ( t , ws )
2020-10-29 02:43:23 -07:00
require . Equal ( t , map [ string ] [ ] sample { } , seriesSet )
2020-02-12 11:22:27 -08:00
// This commit is after the queriers are created, so should not be returned.
err = app . Commit ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
// Nothing returned for querier created before the Add.
2023-09-12 03:37:38 -07:00
ss = querierBeforeAdd . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2020-06-09 09:57:31 -07:00
_ , seriesSet , ws , err = expandSeriesSet ( ss )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Empty ( t , ws )
2020-10-29 02:43:23 -07:00
require . Equal ( t , map [ string ] [ ] sample { } , seriesSet )
2020-02-12 11:22:27 -08:00
// Series exists but has no samples for querier created after Add.
2023-09-12 03:37:38 -07:00
ss = querierAfterAddButBeforeCommit . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2020-06-09 09:57:31 -07:00
_ , seriesSet , ws , err = expandSeriesSet ( ss )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Empty ( t , ws )
2020-10-29 02:43:23 -07:00
require . Equal ( t , map [ string ] [ ] sample { ` { foo="bar"} ` : { } } , seriesSet )
2020-02-12 11:22:27 -08:00
2023-09-12 03:37:38 -07:00
querierAfterCommit , err := db . Querier ( 0 , 1000000 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-02-12 11:22:27 -08:00
defer querierAfterCommit . Close ( )
// Samples are returned for querier created after Commit.
2023-09-12 03:37:38 -07:00
ss = querierAfterCommit . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2020-06-09 09:57:31 -07:00
_ , seriesSet , ws , err = expandSeriesSet ( ss )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Empty ( t , ws )
2023-03-30 10:50:13 -07:00
require . Equal ( t , map [ string ] [ ] sample { ` { foo="bar"} ` : { { t : 0 , f : 0 } } } , seriesSet )
2020-02-12 11:22:27 -08:00
}
2023-08-24 06:21:17 -07:00
func assureChunkFromSamples ( t * testing . T , samples [ ] chunks . Sample ) chunks . Meta {
chks , err := chunks . ChunkFromSamples ( samples )
2023-07-20 08:01:34 -07:00
require . NoError ( t , err )
return chks
}
2019-12-24 13:55:22 -08:00
// TestChunkWriter_ReadAfterWrite ensures that chunk segment are cut at the set segment size and
2019-12-03 23:37:49 -08:00
// that the resulted segments includes the expected chunks data.
2019-12-24 13:55:22 -08:00
func TestChunkWriter_ReadAfterWrite ( t * testing . T ) {
2023-08-24 06:21:17 -07:00
chk1 := assureChunkFromSamples ( t , [ ] chunks . Sample { sample { 1 , 1 , nil , nil } } )
chk2 := assureChunkFromSamples ( t , [ ] chunks . Sample { sample { 1 , 2 , nil , nil } } )
chk3 := assureChunkFromSamples ( t , [ ] chunks . Sample { sample { 1 , 3 , nil , nil } } )
chk4 := assureChunkFromSamples ( t , [ ] chunks . Sample { sample { 1 , 4 , nil , nil } } )
chk5 := assureChunkFromSamples ( t , [ ] chunks . Sample { sample { 1 , 5 , nil , nil } } )
2019-12-03 23:37:49 -08:00
chunkSize := len ( chk1 . Chunk . Bytes ( ) ) + chunks . MaxChunkLengthFieldSize + chunks . ChunkEncodingSize + crc32 . Size
tests := [ ] struct {
chks [ ] [ ] chunks . Meta
segmentSize ,
expSegmentsCount int
expSegmentSizes [ ] int
} {
// 0:Last chunk ends at the segment boundary so
// all chunks should fit in a single segment.
{
chks : [ ] [ ] chunks . Meta {
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk1 ,
chk2 ,
chk3 ,
} ,
} ,
segmentSize : 3 * chunkSize ,
expSegmentSizes : [ ] int { 3 * chunkSize } ,
expSegmentsCount : 1 ,
} ,
// 1:Two chunks can fit in a single segment so the last one should result in a new segment.
{
chks : [ ] [ ] chunks . Meta {
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk1 ,
chk2 ,
chk3 ,
chk4 ,
chk5 ,
} ,
} ,
segmentSize : 2 * chunkSize ,
expSegmentSizes : [ ] int { 2 * chunkSize , 2 * chunkSize , chunkSize } ,
expSegmentsCount : 3 ,
} ,
// 2:When the segment size is smaller than the size of 2 chunks
// the last segment should still create a new segment.
{
chks : [ ] [ ] chunks . Meta {
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk1 ,
chk2 ,
chk3 ,
} ,
} ,
segmentSize : 2 * chunkSize - 1 ,
expSegmentSizes : [ ] int { chunkSize , chunkSize , chunkSize } ,
expSegmentsCount : 3 ,
} ,
// 3:When the segment is smaller than a single chunk
// it should still be written by ignoring the max segment size.
{
chks : [ ] [ ] chunks . Meta {
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk1 ,
} ,
} ,
segmentSize : chunkSize - 1 ,
expSegmentSizes : [ ] int { chunkSize } ,
expSegmentsCount : 1 ,
} ,
// 4:All chunks are bigger than the max segment size, but
// these should still be written even when this will result in bigger segment than the set size.
// Each segment will hold a single chunk.
{
chks : [ ] [ ] chunks . Meta {
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk1 ,
chk2 ,
chk3 ,
} ,
} ,
segmentSize : 1 ,
expSegmentSizes : [ ] int { chunkSize , chunkSize , chunkSize } ,
expSegmentsCount : 3 ,
} ,
// 5:Adding multiple batches of chunks.
{
chks : [ ] [ ] chunks . Meta {
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk1 ,
chk2 ,
chk3 ,
} ,
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk4 ,
chk5 ,
} ,
} ,
segmentSize : 3 * chunkSize ,
expSegmentSizes : [ ] int { 3 * chunkSize , 2 * chunkSize } ,
expSegmentsCount : 2 ,
} ,
// 6:Adding multiple batches of chunks.
{
chks : [ ] [ ] chunks . Meta {
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk1 ,
} ,
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk2 ,
chk3 ,
} ,
2020-04-15 03:17:41 -07:00
{
2019-12-03 23:37:49 -08:00
chk4 ,
} ,
} ,
segmentSize : 2 * chunkSize ,
expSegmentSizes : [ ] int { 2 * chunkSize , 2 * chunkSize } ,
expSegmentsCount : 2 ,
} ,
}
for i , test := range tests {
t . Run ( strconv . Itoa ( i ) , func ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
tempDir := t . TempDir ( )
2019-12-03 23:37:49 -08:00
2019-12-24 13:55:22 -08:00
chunkw , err := chunks . NewWriterWithSegSize ( tempDir , chunks . SegmentHeaderSize + int64 ( test . segmentSize ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-12-03 23:37:49 -08:00
for _ , chks := range test . chks {
2020-10-29 02:43:23 -07:00
require . NoError ( t , chunkw . WriteChunks ( chks ... ) )
2019-12-03 23:37:49 -08:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , chunkw . Close ( ) )
2019-12-03 23:37:49 -08:00
2022-04-27 02:24:36 -07:00
files , err := os . ReadDir ( tempDir )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Len ( t , files , test . expSegmentsCount , "expected segments count mismatch" )
2019-12-03 23:37:49 -08:00
// Verify that all data is written to the segments.
sizeExp := 0
sizeAct := 0
for _ , chks := range test . chks {
for _ , chk := range chks {
l := make ( [ ] byte , binary . MaxVarintLen32 )
sizeExp += binary . PutUvarint ( l , uint64 ( len ( chk . Chunk . Bytes ( ) ) ) ) // The length field.
sizeExp += chunks . ChunkEncodingSize
sizeExp += len ( chk . Chunk . Bytes ( ) ) // The data itself.
sizeExp += crc32 . Size // The 4 bytes of crc32
}
}
sizeExp += test . expSegmentsCount * chunks . SegmentHeaderSize // The segment header bytes.
for i , f := range files {
2022-04-27 02:24:36 -07:00
fi , err := f . Info ( )
require . NoError ( t , err )
size := int ( fi . Size ( ) )
2019-12-03 23:37:49 -08:00
// Verify that the segment is the same or smaller than the expected size.
2020-10-29 02:43:23 -07:00
require . GreaterOrEqual ( t , chunks . SegmentHeaderSize + test . expSegmentSizes [ i ] , size , "Segment:%v should NOT be bigger than:%v actual:%v" , i , chunks . SegmentHeaderSize + test . expSegmentSizes [ i ] , size )
2019-12-03 23:37:49 -08:00
sizeAct += size
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , sizeExp , sizeAct )
2019-12-03 23:37:49 -08:00
// Check the content of the chunks.
2019-12-24 13:55:22 -08:00
r , err := chunks . NewDirReader ( tempDir , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , r . Close ( ) ) } ( )
2019-12-03 23:37:49 -08:00
for _ , chks := range test . chks {
for _ , chkExp := range chks {
2023-11-28 02:14:29 -08:00
chkAct , iterable , err := r . ChunkOrIterable ( chkExp )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-11-28 02:14:29 -08:00
require . Nil ( t , iterable )
2020-10-29 02:43:23 -07:00
require . Equal ( t , chkExp . Chunk . Bytes ( ) , chkAct . Bytes ( ) )
2019-12-03 23:37:49 -08:00
}
}
} )
}
}
2019-12-24 13:55:22 -08:00
2020-08-25 08:16:43 -07:00
func TestRangeForTimestamp ( t * testing . T ) {
type args struct {
t int64
width int64
}
tests := [ ] struct {
args args
expected int64
} {
{ args { 0 , 5 } , 5 } ,
{ args { 1 , 5 } , 5 } ,
{ args { 5 , 5 } , 10 } ,
{ args { 6 , 5 } , 10 } ,
{ args { 13 , 5 } , 15 } ,
{ args { 95 , 5 } , 100 } ,
}
for _ , tt := range tests {
got := rangeForTimestamp ( tt . args . t , tt . args . width )
2020-10-29 02:43:23 -07:00
require . Equal ( t , tt . expected , got )
2020-08-25 08:16:43 -07:00
}
}
2019-12-24 13:55:22 -08:00
// TestChunkReader_ConcurrentReads checks that the chunk result can be read concurrently.
// Regression test for https://github.com/prometheus/prometheus/pull/6514.
func TestChunkReader_ConcurrentReads ( t * testing . T ) {
chks := [ ] chunks . Meta {
2023-08-24 06:21:17 -07:00
assureChunkFromSamples ( t , [ ] chunks . Sample { sample { 1 , 1 , nil , nil } } ) ,
assureChunkFromSamples ( t , [ ] chunks . Sample { sample { 1 , 2 , nil , nil } } ) ,
assureChunkFromSamples ( t , [ ] chunks . Sample { sample { 1 , 3 , nil , nil } } ) ,
assureChunkFromSamples ( t , [ ] chunks . Sample { sample { 1 , 4 , nil , nil } } ) ,
assureChunkFromSamples ( t , [ ] chunks . Sample { sample { 1 , 5 , nil , nil } } ) ,
2019-12-24 13:55:22 -08:00
}
2022-01-22 01:55:01 -08:00
tempDir := t . TempDir ( )
2019-12-24 13:55:22 -08:00
chunkw , err := chunks . NewWriter ( tempDir )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-12-24 13:55:22 -08:00
2020-10-29 02:43:23 -07:00
require . NoError ( t , chunkw . WriteChunks ( chks ... ) )
require . NoError ( t , chunkw . Close ( ) )
2019-12-24 13:55:22 -08:00
r , err := chunks . NewDirReader ( tempDir , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-12-24 13:55:22 -08:00
var wg sync . WaitGroup
for _ , chk := range chks {
for i := 0 ; i < 100 ; i ++ {
wg . Add ( 1 )
go func ( chunk chunks . Meta ) {
defer wg . Done ( )
2023-11-28 02:14:29 -08:00
chkAct , iterable , err := r . ChunkOrIterable ( chunk )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-11-28 02:14:29 -08:00
require . Nil ( t , iterable )
2020-10-29 02:43:23 -07:00
require . Equal ( t , chunk . Chunk . Bytes ( ) , chkAct . Bytes ( ) )
2019-12-24 13:55:22 -08:00
} ( chk )
}
wg . Wait ( )
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , r . Close ( ) )
2019-12-24 13:55:22 -08:00
}
2020-06-12 03:29:26 -07:00
// TestCompactHead ensures that the head compaction
// creates a block that is ready for loading and
// does not cause data loss.
// This test:
// * opens a storage;
// * appends values;
// * compacts the head; and
// * queries the db to ensure the samples are present from the compacted head.
func TestCompactHead ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
dbDir := t . TempDir ( )
2020-06-12 03:29:26 -07:00
// Open a DB and append data to the WAL.
tsdbCfg := & Options {
RetentionDuration : int64 ( time . Hour * 24 * 15 / time . Millisecond ) ,
NoLockfile : true ,
MinBlockDuration : int64 ( time . Hour * 2 / time . Millisecond ) ,
MaxBlockDuration : int64 ( time . Hour * 2 / time . Millisecond ) ,
2023-07-11 05:57:57 -07:00
WALCompression : wlog . CompressionSnappy ,
2020-06-12 03:29:26 -07:00
}
2021-06-05 07:29:32 -07:00
db , err := Open ( dbDir , log . NewNopLogger ( ) , prometheus . NewRegistry ( ) , tsdbCfg , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-07-24 07:10:51 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2020-06-12 03:29:26 -07:00
var expSamples [ ] sample
maxt := 100
for i := 0 ; i < maxt ; i ++ {
val := rand . Float64 ( )
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , int64 ( i ) , val )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-11-28 23:54:23 -08:00
expSamples = append ( expSamples , sample { int64 ( i ) , val , nil , nil } )
2020-06-12 03:29:26 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2020-06-12 03:29:26 -07:00
// Compact the Head to create a new block.
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . CompactHead ( NewRangeHead ( db . Head ( ) , 0 , int64 ( maxt ) - 1 ) ) )
require . NoError ( t , db . Close ( ) )
2020-06-12 03:29:26 -07:00
// Delete everything but the new block and
// reopen the db to query it to ensure it includes the head data.
2020-10-29 02:43:23 -07:00
require . NoError ( t , deleteNonBlocks ( db . Dir ( ) ) )
2021-06-05 07:29:32 -07:00
db , err = Open ( dbDir , log . NewNopLogger ( ) , prometheus . NewRegistry ( ) , tsdbCfg , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Len ( t , db . Blocks ( ) , 1 )
2020-10-29 02:43:23 -07:00
require . Equal ( t , int64 ( maxt ) , db . Head ( ) . MinTime ( ) )
defer func ( ) { require . NoError ( t , db . Close ( ) ) } ( )
2023-09-12 03:37:38 -07:00
querier , err := db . Querier ( 0 , int64 ( maxt ) - 1 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
defer func ( ) { require . NoError ( t , querier . Close ( ) ) } ( )
2020-06-12 03:29:26 -07:00
2023-09-12 03:37:38 -07:00
seriesSet := querier . Select ( ctx , false , nil , & labels . Matcher { Type : labels . MatchEqual , Name : "a" , Value : "b" } )
2022-09-20 10:16:45 -07:00
var series chunkenc . Iterator
2020-06-12 03:29:26 -07:00
var actSamples [ ] sample
for seriesSet . Next ( ) {
2022-09-20 10:16:45 -07:00
series = seriesSet . At ( ) . Iterator ( series )
2021-11-28 23:54:23 -08:00
for series . Next ( ) == chunkenc . ValFloat {
2020-06-12 03:29:26 -07:00
time , val := series . At ( )
2023-04-09 00:08:40 -07:00
actSamples = append ( actSamples , sample { time , val , nil , nil } )
2020-06-12 03:29:26 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , series . Err ( ) )
2020-06-12 03:29:26 -07:00
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , expSamples , actSamples )
require . NoError ( t , seriesSet . Err ( ) )
2020-06-12 03:29:26 -07:00
}
2022-11-23 04:01:18 -08:00
// TestCompactHeadWithDeletion tests https://github.com/prometheus/prometheus/issues/11585.
func TestCompactHeadWithDeletion ( t * testing . T ) {
db , err := Open ( t . TempDir ( ) , log . NewNopLogger ( ) , prometheus . NewRegistry ( ) , nil , nil )
require . NoError ( t , err )
2023-09-13 08:45:06 -07:00
ctx := context . Background ( )
app := db . Appender ( ctx )
2022-11-23 04:01:18 -08:00
_ , err = app . Append ( 0 , labels . FromStrings ( "a" , "b" ) , 10 , rand . Float64 ( ) )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2023-09-13 08:45:06 -07:00
err = db . Delete ( ctx , 0 , 100 , labels . MustNewMatcher ( labels . MatchEqual , "a" , "b" ) )
2022-11-23 04:01:18 -08:00
require . NoError ( t , err )
// This recreates the bug.
require . NoError ( t , db . CompactHead ( NewRangeHead ( db . Head ( ) , 0 , 100 ) ) )
require . NoError ( t , db . Close ( ) )
}
2020-06-12 03:29:26 -07:00
func deleteNonBlocks ( dbDir string ) error {
2022-04-27 02:24:36 -07:00
dirs , err := os . ReadDir ( dbDir )
2020-06-12 03:29:26 -07:00
if err != nil {
return err
}
for _ , dir := range dirs {
if ok := isBlockDir ( dir ) ; ! ok {
if err := os . RemoveAll ( filepath . Join ( dbDir , dir . Name ( ) ) ) ; err != nil {
return err
}
}
}
2022-04-27 02:24:36 -07:00
dirs , err = os . ReadDir ( dbDir )
2020-06-12 03:29:26 -07:00
if err != nil {
return err
}
for _ , dir := range dirs {
if ok := isBlockDir ( dir ) ; ! ok {
2023-11-14 05:04:31 -08:00
return fmt . Errorf ( "root folder:%v still hase non block directory:%v" , dbDir , dir . Name ( ) )
2020-06-12 03:29:26 -07:00
}
}
return nil
}
2020-08-10 22:56:08 -07:00
func TestOpen_VariousBlockStates ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
tmpDir := t . TempDir ( )
2020-08-10 22:56:08 -07:00
var (
expectedLoadedDirs = map [ string ] struct { } { }
expectedRemovedDirs = map [ string ] struct { } { }
expectedIgnoredDirs = map [ string ] struct { } { }
)
{
// Ok blocks; should be loaded.
expectedLoadedDirs [ createBlock ( t , tmpDir , genSeries ( 10 , 2 , 0 , 10 ) ) ] = struct { } { }
expectedLoadedDirs [ createBlock ( t , tmpDir , genSeries ( 10 , 2 , 10 , 20 ) ) ] = struct { } { }
}
{
// Block to repair; should be repaired & loaded.
dbDir := filepath . Join ( "testdata" , "repair_index_version" , "01BZJ9WJQPWHGNC2W4J9TA62KC" )
outDir := filepath . Join ( tmpDir , "01BZJ9WJQPWHGNC2W4J9TA62KC" )
expectedLoadedDirs [ outDir ] = struct { } { }
// Touch chunks dir in block.
2021-10-22 01:06:44 -07:00
require . NoError ( t , os . MkdirAll ( filepath . Join ( dbDir , "chunks" ) , 0 o777 ) )
2020-08-10 22:56:08 -07:00
defer func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . RemoveAll ( filepath . Join ( dbDir , "chunks" ) ) )
2020-08-10 22:56:08 -07:00
} ( )
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . Mkdir ( outDir , os . ModePerm ) )
require . NoError ( t , fileutil . CopyDirs ( dbDir , outDir ) )
2020-08-10 22:56:08 -07:00
}
{
// Missing meta.json; should be ignored and only logged.
// TODO(bwplotka): Probably add metric.
dir := createBlock ( t , tmpDir , genSeries ( 10 , 2 , 20 , 30 ) )
expectedIgnoredDirs [ dir ] = struct { } { }
2020-10-29 02:43:23 -07:00
require . NoError ( t , os . Remove ( filepath . Join ( dir , metaFilename ) ) )
2020-08-10 22:56:08 -07:00
}
{
2021-01-09 01:02:26 -08:00
// Tmp blocks during creation; those should be removed on start.
2020-08-10 22:56:08 -07:00
dir := createBlock ( t , tmpDir , genSeries ( 10 , 2 , 30 , 40 ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , fileutil . Replace ( dir , dir + tmpForCreationBlockDirSuffix ) )
2020-08-10 22:56:08 -07:00
expectedRemovedDirs [ dir + tmpForCreationBlockDirSuffix ] = struct { } { }
2021-01-09 01:02:26 -08:00
// Tmp blocks during deletion; those should be removed on start.
2020-08-10 22:56:08 -07:00
dir = createBlock ( t , tmpDir , genSeries ( 10 , 2 , 40 , 50 ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , fileutil . Replace ( dir , dir + tmpForDeletionBlockDirSuffix ) )
2020-08-10 22:56:08 -07:00
expectedRemovedDirs [ dir + tmpForDeletionBlockDirSuffix ] = struct { } { }
2021-01-09 01:02:26 -08:00
// Pre-2.21 tmp blocks; those should be removed on start.
dir = createBlock ( t , tmpDir , genSeries ( 10 , 2 , 50 , 60 ) )
require . NoError ( t , fileutil . Replace ( dir , dir + tmpLegacy ) )
expectedRemovedDirs [ dir + tmpLegacy ] = struct { } { }
2020-08-10 22:56:08 -07:00
}
2020-08-11 07:53:23 -07:00
{
// One ok block; but two should be replaced.
dir := createBlock ( t , tmpDir , genSeries ( 10 , 2 , 50 , 60 ) )
expectedLoadedDirs [ dir ] = struct { } { }
m , _ , err := readMetaFile ( dir )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-08-11 07:53:23 -07:00
compacted := createBlock ( t , tmpDir , genSeries ( 10 , 2 , 50 , 55 ) )
expectedRemovedDirs [ compacted ] = struct { } { }
m . Compaction . Parents = append ( m . Compaction . Parents ,
BlockDesc { ULID : ulid . MustParse ( filepath . Base ( compacted ) ) } ,
BlockDesc { ULID : ulid . MustNew ( 1 , nil ) } ,
BlockDesc { ULID : ulid . MustNew ( 123 , nil ) } ,
)
// Regression test: Already removed parent can be still in list, which was causing Open errors.
m . Compaction . Parents = append ( m . Compaction . Parents , BlockDesc { ULID : ulid . MustParse ( filepath . Base ( compacted ) ) } )
m . Compaction . Parents = append ( m . Compaction . Parents , BlockDesc { ULID : ulid . MustParse ( filepath . Base ( compacted ) ) } )
_ , err = writeMetaFile ( log . NewLogfmtLogger ( os . Stderr ) , dir , m )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-08-11 07:53:23 -07:00
}
2022-03-24 03:44:14 -07:00
tmpCheckpointDir := path . Join ( tmpDir , "wal/checkpoint.00000001.tmp" )
err := os . MkdirAll ( tmpCheckpointDir , 0 o777 )
require . NoError ( t , err )
2023-08-08 00:32:51 -07:00
tmpChunkSnapshotDir := path . Join ( tmpDir , chunkSnapshotPrefix + "0000.00000001.tmp" )
err = os . MkdirAll ( tmpChunkSnapshotDir , 0 o777 )
require . NoError ( t , err )
2020-08-10 22:56:08 -07:00
opts := DefaultOptions ( )
opts . RetentionDuration = 0
2021-06-05 07:29:32 -07:00
db , err := Open ( tmpDir , log . NewLogfmtLogger ( os . Stderr ) , nil , opts , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-08-10 22:56:08 -07:00
loadedBlocks := db . Blocks ( )
var loaded int
for _ , l := range loadedBlocks {
if _ , ok := expectedLoadedDirs [ filepath . Join ( tmpDir , l . meta . ULID . String ( ) ) ] ; ! ok {
t . Fatal ( "unexpected block" , l . meta . ULID , "was loaded" )
}
loaded ++
}
2023-12-07 03:35:01 -08:00
require . Len ( t , expectedLoadedDirs , loaded )
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-08-10 22:56:08 -07:00
2022-04-27 02:24:36 -07:00
files , err := os . ReadDir ( tmpDir )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-08-10 22:56:08 -07:00
var ignored int
for _ , f := range files {
if _ , ok := expectedRemovedDirs [ filepath . Join ( tmpDir , f . Name ( ) ) ] ; ok {
t . Fatal ( "expected" , filepath . Join ( tmpDir , f . Name ( ) ) , "to be removed, but still exists" )
}
if _ , ok := expectedIgnoredDirs [ filepath . Join ( tmpDir , f . Name ( ) ) ] ; ok {
ignored ++
}
}
2023-12-07 03:35:01 -08:00
require . Len ( t , expectedIgnoredDirs , ignored )
2022-03-24 03:44:14 -07:00
_ , err = os . Stat ( tmpCheckpointDir )
require . True ( t , os . IsNotExist ( err ) )
2023-08-08 00:32:51 -07:00
_ , err = os . Stat ( tmpChunkSnapshotDir )
require . True ( t , os . IsNotExist ( err ) )
2020-08-10 22:56:08 -07:00
}
2020-10-19 08:27:08 -07:00
func TestOneCheckpointPerCompactCall ( t * testing . T ) {
blockRange := int64 ( 1000 )
tsdbCfg := & Options {
RetentionDuration : blockRange * 1000 ,
NoLockfile : true ,
MinBlockDuration : blockRange ,
MaxBlockDuration : blockRange ,
}
2022-01-22 01:55:01 -08:00
tmpDir := t . TempDir ( )
2023-09-13 08:45:06 -07:00
ctx := context . Background ( )
2020-10-19 08:27:08 -07:00
2021-06-05 07:29:32 -07:00
db , err := Open ( tmpDir , log . NewNopLogger ( ) , prometheus . NewRegistry ( ) , tsdbCfg , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-10-19 08:27:08 -07:00
t . Cleanup ( func ( ) {
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-10-19 08:27:08 -07:00
} )
db . DisableCompactions ( )
// Case 1: Lot's of uncompacted data in Head.
2022-03-09 14:17:29 -08:00
lbls := labels . FromStrings ( "foo_d" , "choco_bar" )
2020-10-19 08:27:08 -07:00
// Append samples spanning 59 block ranges.
app := db . Appender ( context . Background ( ) )
for i := int64 ( 0 ) ; i < 60 ; i ++ {
2021-02-18 04:07:00 -08:00
_ , err := app . Append ( 0 , lbls , blockRange * i , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbls , ( blockRange * i ) + blockRange / 2 , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-10-19 08:27:08 -07:00
// Rotate the WAL file so that there is >3 files for checkpoint to happen.
2022-09-20 10:05:50 -07:00
_ , err = db . head . wal . NextSegment ( )
require . NoError ( t , err )
2020-10-19 08:27:08 -07:00
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , app . Commit ( ) )
2020-10-19 08:27:08 -07:00
// Check the existing WAL files.
2022-10-10 08:08:46 -07:00
first , last , err := wlog . Segments ( db . head . wal . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 0 , first )
require . Equal ( t , 60 , last )
2020-10-19 08:27:08 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( db . head . metrics . checkpointCreationTotal ) )
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . checkpointCreationTotal ) )
2020-10-19 08:27:08 -07:00
// As the data spans for 59 blocks, 58 go to disk and 1 remains in Head.
2023-12-07 03:35:01 -08:00
require . Len ( t , db . Blocks ( ) , 58 )
2020-10-19 08:27:08 -07:00
// Though WAL was truncated only once, head should be truncated after each compaction.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 58.0 , prom_testutil . ToFloat64 ( db . head . metrics . headTruncateTotal ) )
2020-10-19 08:27:08 -07:00
// The compaction should have only truncated first 2/3 of WAL (while also rotating the files).
2022-10-10 08:08:46 -07:00
first , last , err = wlog . Segments ( db . head . wal . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 40 , first )
require . Equal ( t , 61 , last )
2020-10-19 08:27:08 -07:00
// The first checkpoint would be for first 2/3rd of WAL, hence till 39.
// That should be the last checkpoint.
2022-10-10 08:08:46 -07:00
_ , cno , err := wlog . LastCheckpoint ( db . head . wal . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 39 , cno )
2020-10-19 08:27:08 -07:00
// Case 2: Old blocks on disk.
// The above blocks will act as old blocks.
// Creating a block to cover the data in the Head so that
// Head will skip the data during replay and start fresh.
blocks := db . Blocks ( )
newBlockMint := blocks [ len ( blocks ) - 1 ] . Meta ( ) . MaxTime
newBlockMaxt := db . Head ( ) . MaxTime ( ) + 1
2020-10-29 02:43:23 -07:00
require . NoError ( t , db . Close ( ) )
2020-10-19 08:27:08 -07:00
createBlock ( t , db . dir , genSeries ( 1 , 1 , newBlockMint , newBlockMaxt ) )
2021-06-05 07:29:32 -07:00
db , err = Open ( db . dir , log . NewNopLogger ( ) , prometheus . NewRegistry ( ) , tsdbCfg , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-10-19 08:27:08 -07:00
db . DisableCompactions ( )
// 1 block more.
2023-12-07 03:35:01 -08:00
require . Len ( t , db . Blocks ( ) , 59 )
2020-10-19 08:27:08 -07:00
// No series in Head because of this new block.
2020-10-29 02:43:23 -07:00
require . Equal ( t , 0 , int ( db . head . NumSeries ( ) ) )
2020-10-19 08:27:08 -07:00
// Adding sample way into the future.
app = db . Appender ( context . Background ( ) )
2021-02-18 04:07:00 -08:00
_ , err = app . Append ( 0 , lbls , blockRange * 120 , rand . Float64 ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2020-10-19 08:27:08 -07:00
// The mint of head is the last block maxt, that means the gap between mint and maxt
// of Head is too large. This will trigger many compactions.
2020-10-29 02:43:23 -07:00
require . Equal ( t , newBlockMaxt , db . head . MinTime ( ) )
2020-10-19 08:27:08 -07:00
// Another WAL file was rotated.
2022-10-10 08:08:46 -07:00
first , last , err = wlog . Segments ( db . head . wal . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 40 , first )
require . Equal ( t , 62 , last )
2020-10-19 08:27:08 -07:00
2020-10-29 02:43:23 -07:00
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( db . head . metrics . checkpointCreationTotal ) )
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . checkpointCreationTotal ) )
2020-10-19 08:27:08 -07:00
// No new blocks should be created as there was not data in between the new samples and the blocks.
2023-12-07 03:35:01 -08:00
require . Len ( t , db . Blocks ( ) , 59 )
2020-10-19 08:27:08 -07:00
// The compaction should have only truncated first 2/3 of WAL (while also rotating the files).
2022-10-10 08:08:46 -07:00
first , last , err = wlog . Segments ( db . head . wal . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 55 , first )
require . Equal ( t , 63 , last )
2020-10-19 08:27:08 -07:00
// The first checkpoint would be for first 2/3rd of WAL, hence till 54.
// That should be the last checkpoint.
2022-10-10 08:08:46 -07:00
_ , cno , err = wlog . LastCheckpoint ( db . head . wal . Dir ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . Equal ( t , 54 , cno )
2020-10-19 08:27:08 -07:00
}
2020-10-28 03:09:03 -07:00
func TestNoPanicOnTSDBOpenError ( t * testing . T ) {
2022-01-22 01:55:01 -08:00
tmpdir := t . TempDir ( )
2020-10-28 03:09:03 -07:00
2021-11-11 08:45:25 -08:00
// Taking the lock will cause a TSDB startup error.
l , err := tsdbutil . NewDirLocker ( tmpdir , "tsdb" , log . NewNopLogger ( ) , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2021-11-11 08:45:25 -08:00
require . NoError ( t , l . Lock ( ) )
2020-10-28 03:09:03 -07:00
2021-06-05 07:29:32 -07:00
_ , err = Open ( tmpdir , nil , nil , DefaultOptions ( ) , nil )
2020-10-29 02:43:23 -07:00
require . Error ( t , err )
2020-10-28 03:09:03 -07:00
2021-11-11 08:45:25 -08:00
require . NoError ( t , l . Release ( ) )
2020-10-28 03:09:03 -07:00
}
2021-05-06 13:18:59 -07:00
2021-11-11 08:45:25 -08:00
func TestLockfile ( t * testing . T ) {
tsdbutil . TestDirLockerUsage ( t , func ( t * testing . T , data string , createLock bool ) ( * tsdbutil . DirLocker , testutil . Closer ) {
opts := DefaultOptions ( )
opts . NoLockfile = ! createLock
2021-06-16 02:33:02 -07:00
2021-11-11 08:45:25 -08:00
// Create the DB. This should create lockfile and its metrics.
db , err := Open ( data , nil , nil , opts , nil )
require . NoError ( t , err )
2021-06-16 02:33:02 -07:00
2021-11-11 08:45:25 -08:00
return db . locker , testutil . NewCallbackCloser ( func ( ) {
2021-06-16 02:33:02 -07:00
require . NoError ( t , db . Close ( ) )
} )
2021-11-11 08:45:25 -08:00
} )
2021-06-16 02:33:02 -07:00
}
2021-05-06 13:18:59 -07:00
func TestQuerier_ShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks ( t * testing . T ) {
t . Skip ( "TODO: investigate why process crash in CI" )
const numRuns = 5
for i := 1 ; i <= numRuns ; i ++ {
t . Run ( strconv . Itoa ( i ) , func ( t * testing . T ) {
testQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks ( t )
} )
}
}
func testQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks ( t * testing . T ) {
const (
numSeries = 1000
numStressIterations = 10000
minStressAllocationBytes = 128 * 1024
maxStressAllocationBytes = 512 * 1024
)
db := openTestDB ( t , nil , nil )
defer func ( ) {
require . NoError ( t , db . Close ( ) )
} ( )
// Disable compactions so we can control it.
db . DisableCompactions ( )
// Generate the metrics we're going to append.
metrics := make ( [ ] labels . Labels , 0 , numSeries )
for i := 0 ; i < numSeries ; i ++ {
2022-03-09 14:17:29 -08:00
metrics = append ( metrics , labels . FromStrings ( labels . MetricName , fmt . Sprintf ( "test_%d" , i ) ) )
2021-05-06 13:18:59 -07:00
}
// Push 1 sample every 15s for 2x the block duration period.
ctx := context . Background ( )
interval := int64 ( 15 * time . Second / time . Millisecond )
ts := int64 ( 0 )
for ; ts < 2 * DefaultBlockDuration ; ts += interval {
app := db . Appender ( ctx )
for _ , metric := range metrics {
_ , err := app . Append ( 0 , metric , ts , float64 ( ts ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
}
// Compact the TSDB head for the first time. We expect the head chunks file has been cut.
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2021-05-06 13:18:59 -07:00
require . Equal ( t , float64 ( 1 ) , prom_testutil . ToFloat64 ( db . Head ( ) . metrics . headTruncateTotal ) )
// Push more samples for another 1x block duration period.
for ; ts < 3 * DefaultBlockDuration ; ts += interval {
app := db . Appender ( ctx )
for _ , metric := range metrics {
_ , err := app . Append ( 0 , metric , ts , float64 ( ts ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
}
// At this point we expect 2 mmap-ed head chunks.
// Get a querier and make sure it's closed only once the test is over.
2023-09-12 03:37:38 -07:00
querier , err := db . Querier ( 0 , math . MaxInt64 )
2021-05-06 13:18:59 -07:00
require . NoError ( t , err )
defer func ( ) {
require . NoError ( t , querier . Close ( ) )
} ( )
// Query back all series.
hints := & storage . SelectHints { Start : 0 , End : math . MaxInt64 , Step : interval }
2023-09-12 03:37:38 -07:00
seriesSet := querier . Select ( ctx , true , hints , labels . MustNewMatcher ( labels . MatchRegexp , labels . MetricName , ".+" ) )
2021-05-06 13:18:59 -07:00
// Fetch samples iterators from all series.
var iterators [ ] chunkenc . Iterator
actualSeries := 0
for seriesSet . Next ( ) {
actualSeries ++
// Get the iterator and call Next() so that we're sure the chunk is loaded.
2022-09-20 10:16:45 -07:00
it := seriesSet . At ( ) . Iterator ( nil )
2021-05-06 13:18:59 -07:00
it . Next ( )
it . At ( )
iterators = append ( iterators , it )
}
require . NoError ( t , seriesSet . Err ( ) )
2023-12-07 03:35:01 -08:00
require . Equal ( t , numSeries , actualSeries )
2021-05-06 13:18:59 -07:00
// Compact the TSDB head again.
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2021-05-06 13:18:59 -07:00
require . Equal ( t , float64 ( 2 ) , prom_testutil . ToFloat64 ( db . Head ( ) . metrics . headTruncateTotal ) )
// At this point we expect 1 head chunk has been deleted.
// Stress the memory and call GC. This is required to increase the chances
// the chunk memory area is released to the kernel.
var buf [ ] byte
for i := 0 ; i < numStressIterations ; i ++ {
//nolint:staticcheck
buf = append ( buf , make ( [ ] byte , minStressAllocationBytes + rand . Int31n ( maxStressAllocationBytes - minStressAllocationBytes ) ) ... )
if i % 1000 == 0 {
buf = nil
}
}
// Iterate samples. Here we're summing it just to make sure no golang compiler
// optimization triggers in case we discard the result of it.At().
var sum float64
var firstErr error
for _ , it := range iterators {
2021-11-28 23:54:23 -08:00
for it . Next ( ) == chunkenc . ValFloat {
2021-05-06 13:18:59 -07:00
_ , v := it . At ( )
sum += v
}
if err := it . Err ( ) ; err != nil {
firstErr = err
}
}
// After having iterated all samples we also want to be sure no error occurred or
// the "cannot populate chunk XXX: not found" error occurred. This error can occur
// when the iterator tries to fetch an head chunk which has been offloaded because
// of the head compaction in the meanwhile.
if firstErr != nil && ! strings . Contains ( firstErr . Error ( ) , "cannot populate chunk" ) {
t . Fatalf ( "unexpected error: %s" , firstErr . Error ( ) )
}
}
func TestChunkQuerier_ShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks ( t * testing . T ) {
t . Skip ( "TODO: investigate why process crash in CI" )
const numRuns = 5
for i := 1 ; i <= numRuns ; i ++ {
t . Run ( strconv . Itoa ( i ) , func ( t * testing . T ) {
testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks ( t )
} )
}
}
func testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks ( t * testing . T ) {
const (
numSeries = 1000
numStressIterations = 10000
minStressAllocationBytes = 128 * 1024
maxStressAllocationBytes = 512 * 1024
)
db := openTestDB ( t , nil , nil )
defer func ( ) {
require . NoError ( t , db . Close ( ) )
} ( )
// Disable compactions so we can control it.
db . DisableCompactions ( )
// Generate the metrics we're going to append.
metrics := make ( [ ] labels . Labels , 0 , numSeries )
for i := 0 ; i < numSeries ; i ++ {
2022-03-09 14:17:29 -08:00
metrics = append ( metrics , labels . FromStrings ( labels . MetricName , fmt . Sprintf ( "test_%d" , i ) ) )
2021-05-06 13:18:59 -07:00
}
// Push 1 sample every 15s for 2x the block duration period.
ctx := context . Background ( )
interval := int64 ( 15 * time . Second / time . Millisecond )
ts := int64 ( 0 )
for ; ts < 2 * DefaultBlockDuration ; ts += interval {
app := db . Appender ( ctx )
for _ , metric := range metrics {
_ , err := app . Append ( 0 , metric , ts , float64 ( ts ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
}
// Compact the TSDB head for the first time. We expect the head chunks file has been cut.
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2021-05-06 13:18:59 -07:00
require . Equal ( t , float64 ( 1 ) , prom_testutil . ToFloat64 ( db . Head ( ) . metrics . headTruncateTotal ) )
// Push more samples for another 1x block duration period.
for ; ts < 3 * DefaultBlockDuration ; ts += interval {
app := db . Appender ( ctx )
for _ , metric := range metrics {
_ , err := app . Append ( 0 , metric , ts , float64 ( ts ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
}
// At this point we expect 2 mmap-ed head chunks.
// Get a querier and make sure it's closed only once the test is over.
2023-09-12 03:37:38 -07:00
querier , err := db . ChunkQuerier ( 0 , math . MaxInt64 )
2021-05-06 13:18:59 -07:00
require . NoError ( t , err )
defer func ( ) {
require . NoError ( t , querier . Close ( ) )
} ( )
// Query back all series.
hints := & storage . SelectHints { Start : 0 , End : math . MaxInt64 , Step : interval }
2023-09-12 03:37:38 -07:00
seriesSet := querier . Select ( ctx , true , hints , labels . MustNewMatcher ( labels . MatchRegexp , labels . MetricName , ".+" ) )
2021-05-06 13:18:59 -07:00
// Iterate all series and get their chunks.
2022-09-20 10:16:45 -07:00
var it chunks . Iterator
2021-05-06 13:18:59 -07:00
var chunks [ ] chunkenc . Chunk
actualSeries := 0
for seriesSet . Next ( ) {
actualSeries ++
2022-09-20 10:16:45 -07:00
it = seriesSet . At ( ) . Iterator ( it )
for it . Next ( ) {
2021-05-06 13:18:59 -07:00
chunks = append ( chunks , it . At ( ) . Chunk )
}
}
require . NoError ( t , seriesSet . Err ( ) )
2023-12-07 03:35:01 -08:00
require . Equal ( t , numSeries , actualSeries )
2021-05-06 13:18:59 -07:00
// Compact the TSDB head again.
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2021-05-06 13:18:59 -07:00
require . Equal ( t , float64 ( 2 ) , prom_testutil . ToFloat64 ( db . Head ( ) . metrics . headTruncateTotal ) )
// At this point we expect 1 head chunk has been deleted.
// Stress the memory and call GC. This is required to increase the chances
// the chunk memory area is released to the kernel.
var buf [ ] byte
for i := 0 ; i < numStressIterations ; i ++ {
//nolint:staticcheck
buf = append ( buf , make ( [ ] byte , minStressAllocationBytes + rand . Int31n ( maxStressAllocationBytes - minStressAllocationBytes ) ) ... )
if i % 1000 == 0 {
buf = nil
}
}
// Iterate chunks and read their bytes slice. Here we're computing the CRC32
// just to iterate through the bytes slice. We don't really care the reason why
// we read this data, we just need to read it to make sure the memory address
// of the []byte is still valid.
chkCRC32 := newCRC32 ( )
for _ , chunk := range chunks {
chkCRC32 . Reset ( )
_ , err := chkCRC32 . Write ( chunk . Bytes ( ) )
require . NoError ( t , err )
}
}
2021-07-20 01:47:20 -07:00
2023-11-24 03:38:38 -08:00
func TestQuerierShouldNotFailIfOOOCompactionOccursAfterRetrievingQuerier ( t * testing . T ) {
opts := DefaultOptions ( )
opts . OutOfOrderTimeWindow = 3 * DefaultBlockDuration
db := openTestDB ( t , opts , nil )
defer func ( ) {
require . NoError ( t , db . Close ( ) )
} ( )
// Disable compactions so we can control it.
db . DisableCompactions ( )
metric := labels . FromStrings ( labels . MetricName , "test_metric" )
ctx := context . Background ( )
interval := int64 ( 15 * time . Second / time . Millisecond )
ts := int64 ( 0 )
samplesWritten := 0
// Capture the first timestamp - this will be the timestamp of the OOO sample we'll append below.
oooTS := ts
ts += interval
// Push samples after the OOO sample we'll write below.
for ; ts < 10 * interval ; ts += interval {
app := db . Appender ( ctx )
_ , err := app . Append ( 0 , metric , ts , float64 ( ts ) )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
samplesWritten ++
}
// Push a single OOO sample.
app := db . Appender ( ctx )
_ , err := app . Append ( 0 , metric , oooTS , float64 ( ts ) )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
samplesWritten ++
// Get a querier.
querierCreatedBeforeCompaction , err := db . ChunkQuerier ( 0 , math . MaxInt64 )
require . NoError ( t , err )
// Start OOO head compaction.
compactionComplete := atomic . NewBool ( false )
go func ( ) {
defer compactionComplete . Store ( true )
require . NoError ( t , db . CompactOOOHead ( ctx ) )
require . Equal ( t , float64 ( 1 ) , prom_testutil . ToFloat64 ( db . Head ( ) . metrics . chunksRemoved ) )
} ( )
// Give CompactOOOHead time to start work.
// If it does not wait for querierCreatedBeforeCompaction to be closed, then the query will return incorrect results or fail.
time . Sleep ( time . Second )
require . False ( t , compactionComplete . Load ( ) , "compaction completed before reading chunks or closing querier created before compaction" )
// Get another querier. This one should only use the compacted blocks from disk and ignore the chunks that will be garbage collected.
querierCreatedAfterCompaction , err := db . ChunkQuerier ( 0 , math . MaxInt64 )
require . NoError ( t , err )
testQuerier := func ( q storage . ChunkQuerier ) {
// Query back the series.
hints := & storage . SelectHints { Start : 0 , End : math . MaxInt64 , Step : interval }
seriesSet := q . Select ( ctx , true , hints , labels . MustNewMatcher ( labels . MatchEqual , labels . MetricName , "test_metric" ) )
// Collect the iterator for the series.
var iterators [ ] chunks . Iterator
for seriesSet . Next ( ) {
iterators = append ( iterators , seriesSet . At ( ) . Iterator ( nil ) )
}
require . NoError ( t , seriesSet . Err ( ) )
require . Len ( t , iterators , 1 )
iterator := iterators [ 0 ]
// Check that we can still successfully read all samples.
samplesRead := 0
for iterator . Next ( ) {
samplesRead += iterator . At ( ) . Chunk . NumSamples ( )
}
require . NoError ( t , iterator . Err ( ) )
require . Equal ( t , samplesWritten , samplesRead )
}
testQuerier ( querierCreatedBeforeCompaction )
require . False ( t , compactionComplete . Load ( ) , "compaction completed before closing querier created before compaction" )
require . NoError ( t , querierCreatedBeforeCompaction . Close ( ) )
require . Eventually ( t , compactionComplete . Load , time . Second , 10 * time . Millisecond , "compaction should complete after querier created before compaction was closed, and not wait for querier created after compaction" )
// Use the querier created after compaction and confirm it returns the expected results (ie. from the disk block created from OOO head and in-order head) without error.
testQuerier ( querierCreatedAfterCompaction )
require . NoError ( t , querierCreatedAfterCompaction . Close ( ) )
}
func TestQuerierShouldNotFailIfOOOCompactionOccursAfterSelecting ( t * testing . T ) {
opts := DefaultOptions ( )
opts . OutOfOrderTimeWindow = 3 * DefaultBlockDuration
db := openTestDB ( t , opts , nil )
defer func ( ) {
require . NoError ( t , db . Close ( ) )
} ( )
// Disable compactions so we can control it.
db . DisableCompactions ( )
metric := labels . FromStrings ( labels . MetricName , "test_metric" )
ctx := context . Background ( )
interval := int64 ( 15 * time . Second / time . Millisecond )
ts := int64 ( 0 )
samplesWritten := 0
// Capture the first timestamp - this will be the timestamp of the OOO sample we'll append below.
oooTS := ts
ts += interval
// Push samples after the OOO sample we'll write below.
for ; ts < 10 * interval ; ts += interval {
app := db . Appender ( ctx )
_ , err := app . Append ( 0 , metric , ts , float64 ( ts ) )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
samplesWritten ++
}
// Push a single OOO sample.
app := db . Appender ( ctx )
_ , err := app . Append ( 0 , metric , oooTS , float64 ( ts ) )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
samplesWritten ++
// Get a querier.
querier , err := db . ChunkQuerier ( 0 , math . MaxInt64 )
require . NoError ( t , err )
// Query back the series.
hints := & storage . SelectHints { Start : 0 , End : math . MaxInt64 , Step : interval }
seriesSet := querier . Select ( ctx , true , hints , labels . MustNewMatcher ( labels . MatchEqual , labels . MetricName , "test_metric" ) )
// Start OOO head compaction.
compactionComplete := atomic . NewBool ( false )
go func ( ) {
defer compactionComplete . Store ( true )
require . NoError ( t , db . CompactOOOHead ( ctx ) )
require . Equal ( t , float64 ( 1 ) , prom_testutil . ToFloat64 ( db . Head ( ) . metrics . chunksRemoved ) )
} ( )
// Give CompactOOOHead time to start work.
// If it does not wait for the querier to be closed, then the query will return incorrect results or fail.
time . Sleep ( time . Second )
require . False ( t , compactionComplete . Load ( ) , "compaction completed before reading chunks or closing querier" )
// Collect the iterator for the series.
var iterators [ ] chunks . Iterator
for seriesSet . Next ( ) {
iterators = append ( iterators , seriesSet . At ( ) . Iterator ( nil ) )
}
require . NoError ( t , seriesSet . Err ( ) )
require . Len ( t , iterators , 1 )
iterator := iterators [ 0 ]
// Check that we can still successfully read all samples.
samplesRead := 0
for iterator . Next ( ) {
samplesRead += iterator . At ( ) . Chunk . NumSamples ( )
}
require . NoError ( t , iterator . Err ( ) )
require . Equal ( t , samplesWritten , samplesRead )
require . False ( t , compactionComplete . Load ( ) , "compaction completed before closing querier" )
require . NoError ( t , querier . Close ( ) )
require . Eventually ( t , compactionComplete . Load , time . Second , 10 * time . Millisecond , "compaction should complete after querier was closed" )
}
func TestQuerierShouldNotFailIfOOOCompactionOccursAfterRetrievingIterators ( t * testing . T ) {
opts := DefaultOptions ( )
opts . OutOfOrderTimeWindow = 3 * DefaultBlockDuration
db := openTestDB ( t , opts , nil )
defer func ( ) {
require . NoError ( t , db . Close ( ) )
} ( )
// Disable compactions so we can control it.
db . DisableCompactions ( )
metric := labels . FromStrings ( labels . MetricName , "test_metric" )
ctx := context . Background ( )
interval := int64 ( 15 * time . Second / time . Millisecond )
ts := int64 ( 0 )
samplesWritten := 0
// Capture the first timestamp - this will be the timestamp of the OOO sample we'll append below.
oooTS := ts
ts += interval
// Push samples after the OOO sample we'll write below.
for ; ts < 10 * interval ; ts += interval {
app := db . Appender ( ctx )
_ , err := app . Append ( 0 , metric , ts , float64 ( ts ) )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
samplesWritten ++
}
// Push a single OOO sample.
app := db . Appender ( ctx )
_ , err := app . Append ( 0 , metric , oooTS , float64 ( ts ) )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
samplesWritten ++
// Get a querier.
querier , err := db . ChunkQuerier ( 0 , math . MaxInt64 )
require . NoError ( t , err )
// Query back the series.
hints := & storage . SelectHints { Start : 0 , End : math . MaxInt64 , Step : interval }
seriesSet := querier . Select ( ctx , true , hints , labels . MustNewMatcher ( labels . MatchEqual , labels . MetricName , "test_metric" ) )
// Collect the iterator for the series.
var iterators [ ] chunks . Iterator
for seriesSet . Next ( ) {
iterators = append ( iterators , seriesSet . At ( ) . Iterator ( nil ) )
}
require . NoError ( t , seriesSet . Err ( ) )
require . Len ( t , iterators , 1 )
iterator := iterators [ 0 ]
// Start OOO head compaction.
compactionComplete := atomic . NewBool ( false )
go func ( ) {
defer compactionComplete . Store ( true )
require . NoError ( t , db . CompactOOOHead ( ctx ) )
require . Equal ( t , float64 ( 1 ) , prom_testutil . ToFloat64 ( db . Head ( ) . metrics . chunksRemoved ) )
} ( )
// Give CompactOOOHead time to start work.
// If it does not wait for the querier to be closed, then the query will return incorrect results or fail.
time . Sleep ( time . Second )
require . False ( t , compactionComplete . Load ( ) , "compaction completed before reading chunks or closing querier" )
// Check that we can still successfully read all samples.
samplesRead := 0
for iterator . Next ( ) {
samplesRead += iterator . At ( ) . Chunk . NumSamples ( )
}
require . NoError ( t , iterator . Err ( ) )
require . Equal ( t , samplesWritten , samplesRead )
require . False ( t , compactionComplete . Load ( ) , "compaction completed before closing querier" )
require . NoError ( t , querier . Close ( ) )
require . Eventually ( t , compactionComplete . Load , time . Second , 10 * time . Millisecond , "compaction should complete after querier was closed" )
}
2021-07-20 01:47:20 -07:00
func newTestDB ( t * testing . T ) * DB {
2022-01-22 01:55:01 -08:00
dir := t . TempDir ( )
2021-07-20 01:47:20 -07:00
db , err := Open ( dir , nil , nil , DefaultOptions ( ) , nil )
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
return db
}
2022-02-22 07:05:15 -08:00
2022-09-20 10:05:50 -07:00
func TestOOOWALWrite ( t * testing . T ) {
dir := t . TempDir ( )
opts := DefaultOptions ( )
opts . OutOfOrderCapMax = 2
opts . OutOfOrderTimeWindow = 30 * time . Minute . Milliseconds ( )
db , err := Open ( dir , nil , nil , opts , nil )
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
s1 , s2 := labels . FromStrings ( "l" , "v1" ) , labels . FromStrings ( "l" , "v2" )
minutes := func ( m int64 ) int64 { return m * time . Minute . Milliseconds ( ) }
appendSample := func ( app storage . Appender , l labels . Labels , mins int64 ) {
_ , err = app . Append ( 0 , l , minutes ( mins ) , float64 ( mins ) )
require . NoError ( t , err )
}
// Ingest sample at 1h.
app := db . Appender ( context . Background ( ) )
appendSample ( app , s1 , 60 )
appendSample ( app , s2 , 60 )
require . NoError ( t , app . Commit ( ) )
// OOO for s1.
app = db . Appender ( context . Background ( ) )
appendSample ( app , s1 , 40 )
require . NoError ( t , app . Commit ( ) )
// OOO for s2.
app = db . Appender ( context . Background ( ) )
appendSample ( app , s2 , 42 )
require . NoError ( t , app . Commit ( ) )
// OOO for both s1 and s2 in the same commit.
app = db . Appender ( context . Background ( ) )
appendSample ( app , s2 , 45 )
appendSample ( app , s1 , 35 )
appendSample ( app , s1 , 36 ) // m-maps.
appendSample ( app , s1 , 37 )
require . NoError ( t , app . Commit ( ) )
// OOO for s1 but not for s2 in the same commit.
app = db . Appender ( context . Background ( ) )
appendSample ( app , s1 , 50 ) // m-maps.
appendSample ( app , s2 , 65 )
require . NoError ( t , app . Commit ( ) )
// Single commit has 2 times m-mapping and more samples after m-map.
app = db . Appender ( context . Background ( ) )
appendSample ( app , s2 , 50 ) // m-maps.
appendSample ( app , s2 , 51 )
appendSample ( app , s2 , 52 ) // m-maps.
appendSample ( app , s2 , 53 )
require . NoError ( t , app . Commit ( ) )
// The MmapRef in this are not hand calculated, and instead taken from the test run.
// What is important here is the order of records, and that MmapRef increases for each record.
oooRecords := [ ] interface { } {
[ ] record . RefMmapMarker {
{ Ref : 1 } ,
} ,
[ ] record . RefSample {
{ Ref : 1 , T : minutes ( 40 ) , V : 40 } ,
} ,
[ ] record . RefMmapMarker {
{ Ref : 2 } ,
} ,
[ ] record . RefSample {
{ Ref : 2 , T : minutes ( 42 ) , V : 42 } ,
} ,
[ ] record . RefSample {
{ Ref : 2 , T : minutes ( 45 ) , V : 45 } ,
{ Ref : 1 , T : minutes ( 35 ) , V : 35 } ,
} ,
[ ] record . RefMmapMarker { // 3rd sample, hence m-mapped.
{ Ref : 1 , MmapRef : 4294967304 } ,
} ,
[ ] record . RefSample {
{ Ref : 1 , T : minutes ( 36 ) , V : 36 } ,
{ Ref : 1 , T : minutes ( 37 ) , V : 37 } ,
} ,
[ ] record . RefMmapMarker { // 3rd sample, hence m-mapped.
{ Ref : 1 , MmapRef : 4294967354 } ,
} ,
[ ] record . RefSample { // Does not contain the in-order sample here.
{ Ref : 1 , T : minutes ( 50 ) , V : 50 } ,
} ,
// Single commit but multiple OOO records.
[ ] record . RefMmapMarker {
{ Ref : 2 , MmapRef : 4294967403 } ,
} ,
[ ] record . RefSample {
{ Ref : 2 , T : minutes ( 50 ) , V : 50 } ,
{ Ref : 2 , T : minutes ( 51 ) , V : 51 } ,
} ,
[ ] record . RefMmapMarker {
{ Ref : 2 , MmapRef : 4294967452 } ,
} ,
[ ] record . RefSample {
{ Ref : 2 , T : minutes ( 52 ) , V : 52 } ,
{ Ref : 2 , T : minutes ( 53 ) , V : 53 } ,
} ,
}
inOrderRecords := [ ] interface { } {
[ ] record . RefSeries {
{ Ref : 1 , Labels : s1 } ,
{ Ref : 2 , Labels : s2 } ,
} ,
[ ] record . RefSample {
{ Ref : 1 , T : minutes ( 60 ) , V : 60 } ,
{ Ref : 2 , T : minutes ( 60 ) , V : 60 } ,
} ,
[ ] record . RefSample {
{ Ref : 1 , T : minutes ( 40 ) , V : 40 } ,
} ,
[ ] record . RefSample {
{ Ref : 2 , T : minutes ( 42 ) , V : 42 } ,
} ,
[ ] record . RefSample {
{ Ref : 2 , T : minutes ( 45 ) , V : 45 } ,
{ Ref : 1 , T : minutes ( 35 ) , V : 35 } ,
{ Ref : 1 , T : minutes ( 36 ) , V : 36 } ,
{ Ref : 1 , T : minutes ( 37 ) , V : 37 } ,
} ,
[ ] record . RefSample { // Contains both in-order and ooo sample.
{ Ref : 1 , T : minutes ( 50 ) , V : 50 } ,
{ Ref : 2 , T : minutes ( 65 ) , V : 65 } ,
} ,
[ ] record . RefSample {
{ Ref : 2 , T : minutes ( 50 ) , V : 50 } ,
{ Ref : 2 , T : minutes ( 51 ) , V : 51 } ,
{ Ref : 2 , T : minutes ( 52 ) , V : 52 } ,
{ Ref : 2 , T : minutes ( 53 ) , V : 53 } ,
} ,
}
getRecords := func ( walDir string ) [ ] interface { } {
2022-10-10 08:08:46 -07:00
sr , err := wlog . NewSegmentsReader ( walDir )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
2022-10-10 08:08:46 -07:00
r := wlog . NewReader ( sr )
2022-09-20 10:05:50 -07:00
defer func ( ) {
require . NoError ( t , sr . Close ( ) )
} ( )
var (
records [ ] interface { }
dec record . Decoder
)
for r . Next ( ) {
rec := r . Record ( )
switch typ := dec . Type ( rec ) ; typ {
case record . Series :
series , err := dec . Series ( rec , nil )
require . NoError ( t , err )
records = append ( records , series )
case record . Samples :
samples , err := dec . Samples ( rec , nil )
require . NoError ( t , err )
records = append ( records , samples )
case record . MmapMarkers :
markers , err := dec . MmapMarkers ( rec , nil )
require . NoError ( t , err )
records = append ( records , markers )
default :
t . Fatalf ( "got a WAL record that is not series or samples: %v" , typ )
}
}
return records
}
// The normal WAL.
actRecs := getRecords ( path . Join ( dir , "wal" ) )
require . Equal ( t , inOrderRecords , actRecs )
2023-10-13 05:21:35 -07:00
// The WBL.
2022-10-10 08:08:46 -07:00
actRecs = getRecords ( path . Join ( dir , wlog . WblDirName ) )
2022-09-20 10:05:50 -07:00
require . Equal ( t , oooRecords , actRecs )
}
2022-02-22 07:05:15 -08:00
// Tests https://github.com/prometheus/prometheus/issues/10291#issuecomment-1044373110.
func TestDBPanicOnMmappingHeadChunk ( t * testing . T ) {
dir := t . TempDir ( )
2023-09-13 08:45:06 -07:00
ctx := context . Background ( )
2022-02-22 07:05:15 -08:00
db , err := Open ( dir , nil , nil , DefaultOptions ( ) , nil )
require . NoError ( t , err )
db . DisableCompactions ( )
// Choosing scrape interval of 45s to have chunk larger than 1h.
itvl := int64 ( 45 * time . Second / time . Millisecond )
lastTs := int64 ( 0 )
addSamples := func ( numSamples int ) {
app := db . Appender ( context . Background ( ) )
var ref storage . SeriesRef
lbls := labels . FromStrings ( "__name__" , "testing" , "foo" , "bar" )
for i := 0 ; i < numSamples ; i ++ {
ref , err = app . Append ( ref , lbls , lastTs , float64 ( lastTs ) )
require . NoError ( t , err )
lastTs += itvl
if i % 10 == 0 {
require . NoError ( t , app . Commit ( ) )
app = db . Appender ( context . Background ( ) )
}
}
require . NoError ( t , app . Commit ( ) )
}
// Ingest samples upto 2h50m to make the head "about to compact".
numSamples := int ( 170 * time . Minute / time . Millisecond ) / int ( itvl )
addSamples ( numSamples )
2023-12-07 03:35:01 -08:00
require . Empty ( t , db . Blocks ( ) )
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2023-12-07 03:35:01 -08:00
require . Empty ( t , db . Blocks ( ) )
2022-02-22 07:05:15 -08:00
// Restarting.
require . NoError ( t , db . Close ( ) )
db , err = Open ( dir , nil , nil , DefaultOptions ( ) , nil )
require . NoError ( t , err )
db . DisableCompactions ( )
// Ingest samples upto 20m more to make the head compact.
numSamples = int ( 20 * time . Minute / time . Millisecond ) / int ( itvl )
addSamples ( numSamples )
2023-12-07 03:35:01 -08:00
require . Empty ( t , db . Blocks ( ) )
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2022-02-22 07:05:15 -08:00
require . Len ( t , db . Blocks ( ) , 1 )
// More samples to m-map and panic.
numSamples = int ( 120 * time . Minute / time . Millisecond ) / int ( itvl )
addSamples ( numSamples )
require . NoError ( t , db . Close ( ) )
}
2022-07-19 01:58:52 -07:00
func TestMetadataInWAL ( t * testing . T ) {
updateMetadata := func ( t * testing . T , app storage . Appender , s labels . Labels , m metadata . Metadata ) {
_ , err := app . UpdateMetadata ( 0 , s , m )
require . NoError ( t , err )
}
db := newTestDB ( t )
ctx := context . Background ( )
// Add some series so we can append metadata to them.
app := db . Appender ( ctx )
s1 := labels . FromStrings ( "a" , "b" )
s2 := labels . FromStrings ( "c" , "d" )
s3 := labels . FromStrings ( "e" , "f" )
s4 := labels . FromStrings ( "g" , "h" )
for _ , s := range [ ] labels . Labels { s1 , s2 , s3 , s4 } {
_ , err := app . Append ( 0 , s , 0 , 0 )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
// Add a first round of metadata to the first three series.
// Re-take the Appender, as the previous Commit will have it closed.
m1 := metadata . Metadata { Type : "gauge" , Unit : "unit_1" , Help : "help_1" }
m2 := metadata . Metadata { Type : "gauge" , Unit : "unit_2" , Help : "help_2" }
m3 := metadata . Metadata { Type : "gauge" , Unit : "unit_3" , Help : "help_3" }
app = db . Appender ( ctx )
updateMetadata ( t , app , s1 , m1 )
updateMetadata ( t , app , s2 , m2 )
updateMetadata ( t , app , s3 , m3 )
require . NoError ( t , app . Commit ( ) )
// Add a replicated metadata entry to the first series,
// a completely new metadata entry for the fourth series,
// and a changed metadata entry to the second series.
m4 := metadata . Metadata { Type : "counter" , Unit : "unit_4" , Help : "help_4" }
m5 := metadata . Metadata { Type : "counter" , Unit : "unit_5" , Help : "help_5" }
app = db . Appender ( ctx )
updateMetadata ( t , app , s1 , m1 )
updateMetadata ( t , app , s4 , m4 )
updateMetadata ( t , app , s2 , m5 )
require . NoError ( t , app . Commit ( ) )
// Read the WAL to see if the disk storage format is correct.
recs := readTestWAL ( t , path . Join ( db . Dir ( ) , "wal" ) )
var gotMetadataBlocks [ ] [ ] record . RefMetadata
for _ , rec := range recs {
if mr , ok := rec . ( [ ] record . RefMetadata ) ; ok {
gotMetadataBlocks = append ( gotMetadataBlocks , mr )
}
}
expectedMetadata := [ ] record . RefMetadata {
{ Ref : 1 , Type : record . GetMetricType ( m1 . Type ) , Unit : m1 . Unit , Help : m1 . Help } ,
{ Ref : 2 , Type : record . GetMetricType ( m2 . Type ) , Unit : m2 . Unit , Help : m2 . Help } ,
{ Ref : 3 , Type : record . GetMetricType ( m3 . Type ) , Unit : m3 . Unit , Help : m3 . Help } ,
{ Ref : 4 , Type : record . GetMetricType ( m4 . Type ) , Unit : m4 . Unit , Help : m4 . Help } ,
{ Ref : 2 , Type : record . GetMetricType ( m5 . Type ) , Unit : m5 . Unit , Help : m5 . Help } ,
}
require . Len ( t , gotMetadataBlocks , 2 )
require . Equal ( t , expectedMetadata [ : 3 ] , gotMetadataBlocks [ 0 ] )
require . Equal ( t , expectedMetadata [ 3 : ] , gotMetadataBlocks [ 1 ] )
}
func TestMetadataCheckpointingOnlyKeepsLatestEntry ( t * testing . T ) {
updateMetadata := func ( t * testing . T , app storage . Appender , s labels . Labels , m metadata . Metadata ) {
_ , err := app . UpdateMetadata ( 0 , s , m )
require . NoError ( t , err )
}
ctx := context . Background ( )
numSamples := 10000
2023-07-11 05:57:57 -07:00
hb , w := newTestHead ( t , int64 ( numSamples ) * 10 , wlog . CompressionNone , false )
2022-07-19 01:58:52 -07:00
// Add some series so we can append metadata to them.
app := hb . Appender ( ctx )
s1 := labels . FromStrings ( "a" , "b" )
s2 := labels . FromStrings ( "c" , "d" )
s3 := labels . FromStrings ( "e" , "f" )
s4 := labels . FromStrings ( "g" , "h" )
for _ , s := range [ ] labels . Labels { s1 , s2 , s3 , s4 } {
_ , err := app . Append ( 0 , s , 0 , 0 )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
// Add a first round of metadata to the first three series.
// Re-take the Appender, as the previous Commit will have it closed.
m1 := metadata . Metadata { Type : "gauge" , Unit : "unit_1" , Help : "help_1" }
m2 := metadata . Metadata { Type : "gauge" , Unit : "unit_2" , Help : "help_2" }
m3 := metadata . Metadata { Type : "gauge" , Unit : "unit_3" , Help : "help_3" }
m4 := metadata . Metadata { Type : "gauge" , Unit : "unit_4" , Help : "help_4" }
app = hb . Appender ( ctx )
updateMetadata ( t , app , s1 , m1 )
updateMetadata ( t , app , s2 , m2 )
updateMetadata ( t , app , s3 , m3 )
updateMetadata ( t , app , s4 , m4 )
require . NoError ( t , app . Commit ( ) )
// Update metadata for first series.
m5 := metadata . Metadata { Type : "counter" , Unit : "unit_5" , Help : "help_5" }
app = hb . Appender ( ctx )
updateMetadata ( t , app , s1 , m5 )
require . NoError ( t , app . Commit ( ) )
// Switch back-and-forth metadata for second series.
// Since it ended on a new metadata record, we expect a single new entry.
m6 := metadata . Metadata { Type : "counter" , Unit : "unit_6" , Help : "help_6" }
app = hb . Appender ( ctx )
updateMetadata ( t , app , s2 , m6 )
require . NoError ( t , app . Commit ( ) )
app = hb . Appender ( ctx )
updateMetadata ( t , app , s2 , m2 )
require . NoError ( t , app . Commit ( ) )
app = hb . Appender ( ctx )
updateMetadata ( t , app , s2 , m6 )
require . NoError ( t , app . Commit ( ) )
app = hb . Appender ( ctx )
updateMetadata ( t , app , s2 , m2 )
require . NoError ( t , app . Commit ( ) )
app = hb . Appender ( ctx )
updateMetadata ( t , app , s2 , m6 )
require . NoError ( t , app . Commit ( ) )
// Let's create a checkpoint.
2022-10-10 08:08:46 -07:00
first , last , err := wlog . Segments ( w . Dir ( ) )
2022-07-19 01:58:52 -07:00
require . NoError ( t , err )
keep := func ( id chunks . HeadSeriesRef ) bool {
return id != 3
}
2022-10-10 08:08:46 -07:00
_ , err = wlog . Checkpoint ( log . NewNopLogger ( ) , w , first , last - 1 , keep , 0 )
2022-07-19 01:58:52 -07:00
require . NoError ( t , err )
// Confirm there's been a checkpoint.
2022-10-10 08:08:46 -07:00
cdir , _ , err := wlog . LastCheckpoint ( w . Dir ( ) )
2022-07-19 01:58:52 -07:00
require . NoError ( t , err )
// Read in checkpoint and WAL.
recs := readTestWAL ( t , cdir )
var gotMetadataBlocks [ ] [ ] record . RefMetadata
for _ , rec := range recs {
if mr , ok := rec . ( [ ] record . RefMetadata ) ; ok {
gotMetadataBlocks = append ( gotMetadataBlocks , mr )
}
}
// There should only be 1 metadata block present, with only the latest
// metadata kept around.
wantMetadata := [ ] record . RefMetadata {
{ Ref : 1 , Type : record . GetMetricType ( m5 . Type ) , Unit : m5 . Unit , Help : m5 . Help } ,
{ Ref : 2 , Type : record . GetMetricType ( m6 . Type ) , Unit : m6 . Unit , Help : m6 . Help } ,
{ Ref : 4 , Type : record . GetMetricType ( m4 . Type ) , Unit : m4 . Unit , Help : m4 . Help } ,
}
require . Len ( t , gotMetadataBlocks , 1 )
require . Len ( t , gotMetadataBlocks [ 0 ] , 3 )
gotMetadataBlock := gotMetadataBlocks [ 0 ]
sort . Slice ( gotMetadataBlock , func ( i , j int ) bool { return gotMetadataBlock [ i ] . Ref < gotMetadataBlock [ j ] . Ref } )
require . Equal ( t , wantMetadata , gotMetadataBlock )
require . NoError ( t , hb . Close ( ) )
}
func TestMetadataAssertInMemoryData ( t * testing . T ) {
updateMetadata := func ( t * testing . T , app storage . Appender , s labels . Labels , m metadata . Metadata ) {
_ , err := app . UpdateMetadata ( 0 , s , m )
require . NoError ( t , err )
}
db := openTestDB ( t , nil , nil )
ctx := context . Background ( )
// Add some series so we can append metadata to them.
app := db . Appender ( ctx )
s1 := labels . FromStrings ( "a" , "b" )
s2 := labels . FromStrings ( "c" , "d" )
s3 := labels . FromStrings ( "e" , "f" )
s4 := labels . FromStrings ( "g" , "h" )
for _ , s := range [ ] labels . Labels { s1 , s2 , s3 , s4 } {
_ , err := app . Append ( 0 , s , 0 , 0 )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
// Add a first round of metadata to the first three series.
// The in-memory data held in the db Head should hold the metadata.
m1 := metadata . Metadata { Type : "gauge" , Unit : "unit_1" , Help : "help_1" }
m2 := metadata . Metadata { Type : "gauge" , Unit : "unit_2" , Help : "help_2" }
m3 := metadata . Metadata { Type : "gauge" , Unit : "unit_3" , Help : "help_3" }
app = db . Appender ( ctx )
updateMetadata ( t , app , s1 , m1 )
updateMetadata ( t , app , s2 , m2 )
updateMetadata ( t , app , s3 , m3 )
require . NoError ( t , app . Commit ( ) )
series1 := db . head . series . getByHash ( s1 . Hash ( ) , s1 )
series2 := db . head . series . getByHash ( s2 . Hash ( ) , s2 )
series3 := db . head . series . getByHash ( s3 . Hash ( ) , s3 )
series4 := db . head . series . getByHash ( s4 . Hash ( ) , s4 )
2022-08-17 03:02:28 -07:00
require . Equal ( t , * series1 . meta , m1 )
require . Equal ( t , * series2 . meta , m2 )
require . Equal ( t , * series3 . meta , m3 )
require . Nil ( t , series4 . meta )
2022-07-19 01:58:52 -07:00
// Add a replicated metadata entry to the first series,
// a changed metadata entry to the second series,
// and a completely new metadata entry for the fourth series.
// The in-memory data held in the db Head should be correctly updated.
m4 := metadata . Metadata { Type : "counter" , Unit : "unit_4" , Help : "help_4" }
m5 := metadata . Metadata { Type : "counter" , Unit : "unit_5" , Help : "help_5" }
app = db . Appender ( ctx )
updateMetadata ( t , app , s1 , m1 )
updateMetadata ( t , app , s4 , m4 )
updateMetadata ( t , app , s2 , m5 )
require . NoError ( t , app . Commit ( ) )
series1 = db . head . series . getByHash ( s1 . Hash ( ) , s1 )
series2 = db . head . series . getByHash ( s2 . Hash ( ) , s2 )
series3 = db . head . series . getByHash ( s3 . Hash ( ) , s3 )
series4 = db . head . series . getByHash ( s4 . Hash ( ) , s4 )
2022-08-17 03:02:28 -07:00
require . Equal ( t , * series1 . meta , m1 )
require . Equal ( t , * series2 . meta , m5 )
require . Equal ( t , * series3 . meta , m3 )
require . Equal ( t , * series4 . meta , m4 )
2022-07-19 01:58:52 -07:00
require . NoError ( t , db . Close ( ) )
// Reopen the DB, replaying the WAL. The Head must have been replayed
// correctly in memory.
reopenDB , err := Open ( db . Dir ( ) , nil , nil , nil , nil )
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , reopenDB . Close ( ) )
} )
_ , err = reopenDB . head . wal . Size ( )
require . NoError ( t , err )
2022-08-17 03:02:28 -07:00
require . Equal ( t , * reopenDB . head . series . getByHash ( s1 . Hash ( ) , s1 ) . meta , m1 )
require . Equal ( t , * reopenDB . head . series . getByHash ( s2 . Hash ( ) , s2 ) . meta , m5 )
require . Equal ( t , * reopenDB . head . series . getByHash ( s3 . Hash ( ) , s3 ) . meta , m3 )
require . Equal ( t , * reopenDB . head . series . getByHash ( s4 . Hash ( ) , s4 ) . meta , m4 )
2022-07-19 01:58:52 -07:00
}
2022-09-20 10:05:50 -07:00
// TODO(codesome): test more samples incoming once compaction has started. To verify new samples after the start
//
// are not included in this compaction.
func TestOOOCompaction ( t * testing . T ) {
dir := t . TempDir ( )
2023-09-13 08:45:06 -07:00
ctx := context . Background ( )
2022-09-20 10:05:50 -07:00
opts := DefaultOptions ( )
opts . OutOfOrderCapMax = 30
opts . OutOfOrderTimeWindow = 300 * time . Minute . Milliseconds ( )
db , err := Open ( dir , nil , nil , opts , nil )
require . NoError ( t , err )
db . DisableCompactions ( ) // We want to manually call it.
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
series1 := labels . FromStrings ( "foo" , "bar1" )
series2 := labels . FromStrings ( "foo" , "bar2" )
addSample := func ( fromMins , toMins int64 ) {
app := db . Appender ( context . Background ( ) )
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
_ , err := app . Append ( 0 , series1 , ts , float64 ( ts ) )
require . NoError ( t , err )
_ , err = app . Append ( 0 , series2 , ts , float64 ( 2 * ts ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
}
// Add an in-order samples.
addSample ( 250 , 350 )
// Verify that the in-memory ooo chunk is empty.
checkEmptyOOOChunk := func ( lbls labels . Labels ) {
ms , created , err := db . head . getOrCreate ( lbls . Hash ( ) , lbls )
require . NoError ( t , err )
require . False ( t , created )
2022-12-28 02:19:41 -08:00
require . Nil ( t , ms . ooo )
2022-09-20 10:05:50 -07:00
}
checkEmptyOOOChunk ( series1 )
checkEmptyOOOChunk ( series2 )
// Add ooo samples that creates multiple chunks.
// 90 to 300 spans across 3 block ranges: [0, 120), [120, 240), [240, 360)
addSample ( 90 , 310 )
// Adding same samples to create overlapping chunks.
// Since the active chunk won't start at 90 again, all the new
// chunks will have different time ranges than the previous chunks.
addSample ( 90 , 310 )
verifyDBSamples := func ( ) {
2023-08-24 06:21:17 -07:00
var series1Samples , series2Samples [ ] chunks . Sample
2022-09-20 10:05:50 -07:00
for _ , r := range [ ] [ 2 ] int64 { { 90 , 119 } , { 120 , 239 } , { 240 , 350 } } {
fromMins , toMins := r [ 0 ] , r [ 1 ]
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
2022-10-11 09:35:35 -07:00
series1Samples = append ( series1Samples , sample { ts , float64 ( ts ) , nil , nil } )
series2Samples = append ( series2Samples , sample { ts , float64 ( 2 * ts ) , nil , nil } )
2022-09-20 10:05:50 -07:00
}
}
2023-08-24 06:21:17 -07:00
expRes := map [ string ] [ ] chunks . Sample {
2022-09-20 10:05:50 -07:00
series1 . String ( ) : series1Samples ,
series2 . String ( ) : series2Samples ,
}
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( math . MinInt64 , math . MaxInt64 )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
actRes := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "foo" , "bar.*" ) )
require . Equal ( t , expRes , actRes )
}
verifyDBSamples ( ) // Before any compaction.
// Verify that the in-memory ooo chunk is not empty.
checkNonEmptyOOOChunk := func ( lbls labels . Labels ) {
ms , created , err := db . head . getOrCreate ( lbls . Hash ( ) , lbls )
require . NoError ( t , err )
require . False ( t , created )
2022-12-28 02:19:41 -08:00
require . Greater ( t , ms . ooo . oooHeadChunk . chunk . NumSamples ( ) , 0 )
2023-12-07 03:35:01 -08:00
require . Len ( t , ms . ooo . oooMmappedChunks , 14 ) // 7 original, 7 duplicate.
2022-09-20 10:05:50 -07:00
}
checkNonEmptyOOOChunk ( series1 )
checkNonEmptyOOOChunk ( series2 )
// No blocks before compaction.
2023-12-07 03:35:01 -08:00
require . Empty ( t , db . Blocks ( ) )
2022-09-20 10:05:50 -07:00
// There is a 0th WBL file.
require . NoError ( t , db . head . wbl . Sync ( ) ) // syncing to make sure wbl is flushed in windows
files , err := os . ReadDir ( db . head . wbl . Dir ( ) )
require . NoError ( t , err )
require . Len ( t , files , 1 )
require . Equal ( t , "00000000" , files [ 0 ] . Name ( ) )
f , err := files [ 0 ] . Info ( )
require . NoError ( t , err )
require . Greater ( t , f . Size ( ) , int64 ( 100 ) )
// OOO compaction happens here.
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . CompactOOOHead ( ctx ) )
2022-09-20 10:05:50 -07:00
// 3 blocks exist now. [0, 120), [120, 240), [240, 360)
2023-12-07 03:35:01 -08:00
require . Len ( t , db . Blocks ( ) , 3 )
2022-09-20 10:05:50 -07:00
verifyDBSamples ( ) // Blocks created out of OOO head now.
// 0th WBL file will be deleted and 1st will be the only present.
files , err = os . ReadDir ( db . head . wbl . Dir ( ) )
require . NoError ( t , err )
require . Len ( t , files , 1 )
require . Equal ( t , "00000001" , files [ 0 ] . Name ( ) )
f , err = files [ 0 ] . Info ( )
require . NoError ( t , err )
require . Equal ( t , int64 ( 0 ) , f . Size ( ) )
// OOO stuff should not be present in the Head now.
checkEmptyOOOChunk ( series1 )
checkEmptyOOOChunk ( series2 )
verifySamples := func ( block * Block , fromMins , toMins int64 ) {
2023-08-24 06:21:17 -07:00
series1Samples := make ( [ ] chunks . Sample , 0 , toMins - fromMins + 1 )
series2Samples := make ( [ ] chunks . Sample , 0 , toMins - fromMins + 1 )
2022-09-20 10:05:50 -07:00
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
2022-10-11 09:35:35 -07:00
series1Samples = append ( series1Samples , sample { ts , float64 ( ts ) , nil , nil } )
series2Samples = append ( series2Samples , sample { ts , float64 ( 2 * ts ) , nil , nil } )
2022-09-20 10:05:50 -07:00
}
2023-08-24 06:21:17 -07:00
expRes := map [ string ] [ ] chunks . Sample {
2022-09-20 10:05:50 -07:00
series1 . String ( ) : series1Samples ,
series2 . String ( ) : series2Samples ,
}
q , err := NewBlockQuerier ( block , math . MinInt64 , math . MaxInt64 )
require . NoError ( t , err )
actRes := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "foo" , "bar.*" ) )
require . Equal ( t , expRes , actRes )
}
// Checking for expected data in the blocks.
verifySamples ( db . Blocks ( ) [ 0 ] , 90 , 119 )
verifySamples ( db . Blocks ( ) [ 1 ] , 120 , 239 )
verifySamples ( db . Blocks ( ) [ 2 ] , 240 , 310 )
// There should be a single m-map file.
mmapDir := mmappedChunksDir ( db . head . opts . ChunkDirRoot )
files , err = os . ReadDir ( mmapDir )
require . NoError ( t , err )
require . Len ( t , files , 1 )
// Compact the in-order head and expect another block.
// Since this is a forced compaction, this block is not aligned with 2h.
err = db . CompactHead ( NewRangeHead ( db . head , 250 * time . Minute . Milliseconds ( ) , 350 * time . Minute . Milliseconds ( ) ) )
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Len ( t , db . Blocks ( ) , 4 ) // [0, 120), [120, 240), [240, 360), [250, 351)
2022-09-20 10:05:50 -07:00
verifySamples ( db . Blocks ( ) [ 3 ] , 250 , 350 )
verifyDBSamples ( ) // Blocks created out of normal and OOO head now. But not merged.
// The compaction also clears out the old m-map files. Including
// the file that has ooo chunks.
files , err = os . ReadDir ( mmapDir )
require . NoError ( t , err )
require . Len ( t , files , 1 )
require . Equal ( t , "000001" , files [ 0 ] . Name ( ) )
// This will merge overlapping block.
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2022-09-20 10:05:50 -07:00
2023-12-07 03:35:01 -08:00
require . Len ( t , db . Blocks ( ) , 3 ) // [0, 120), [120, 240), [240, 360)
2022-09-20 10:05:50 -07:00
verifySamples ( db . Blocks ( ) [ 0 ] , 90 , 119 )
verifySamples ( db . Blocks ( ) [ 1 ] , 120 , 239 )
verifySamples ( db . Blocks ( ) [ 2 ] , 240 , 350 ) // Merged block.
verifyDBSamples ( ) // Final state. Blocks from normal and OOO head are merged.
}
// TestOOOCompactionWithNormalCompaction tests if OOO compaction is performed
// when the normal head's compaction is done.
func TestOOOCompactionWithNormalCompaction ( t * testing . T ) {
dir := t . TempDir ( )
2023-09-13 08:45:06 -07:00
ctx := context . Background ( )
2022-09-20 10:05:50 -07:00
opts := DefaultOptions ( )
opts . OutOfOrderCapMax = 30
opts . OutOfOrderTimeWindow = 300 * time . Minute . Milliseconds ( )
2023-02-10 06:18:15 -08:00
db , err := Open ( dir , nil , nil , opts , nil )
require . NoError ( t , err )
db . DisableCompactions ( ) // We want to manually call it.
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
series1 := labels . FromStrings ( "foo" , "bar1" )
series2 := labels . FromStrings ( "foo" , "bar2" )
addSamples := func ( fromMins , toMins int64 ) {
app := db . Appender ( context . Background ( ) )
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
_ , err := app . Append ( 0 , series1 , ts , float64 ( ts ) )
require . NoError ( t , err )
_ , err = app . Append ( 0 , series2 , ts , float64 ( 2 * ts ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
}
// Add an in-order samples.
addSamples ( 250 , 350 )
// Add ooo samples that will result into a single block.
addSamples ( 90 , 110 )
// Checking that ooo chunk is not empty.
for _ , lbls := range [ ] labels . Labels { series1 , series2 } {
ms , created , err := db . head . getOrCreate ( lbls . Hash ( ) , lbls )
require . NoError ( t , err )
require . False ( t , created )
require . Greater ( t , ms . ooo . oooHeadChunk . chunk . NumSamples ( ) , 0 )
}
// If the normal Head is not compacted, the OOO head compaction does not take place.
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2023-12-07 03:35:01 -08:00
require . Empty ( t , db . Blocks ( ) )
2023-02-10 06:18:15 -08:00
// Add more in-order samples in future that would trigger the compaction.
addSamples ( 400 , 450 )
// No blocks before compaction.
2023-12-07 03:35:01 -08:00
require . Empty ( t , db . Blocks ( ) )
2023-02-10 06:18:15 -08:00
// Compacts normal and OOO head.
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2023-02-10 06:18:15 -08:00
// 2 blocks exist now. [0, 120), [250, 360)
2023-12-07 03:35:01 -08:00
require . Len ( t , db . Blocks ( ) , 2 )
2023-02-10 06:18:15 -08:00
require . Equal ( t , int64 ( 0 ) , db . Blocks ( ) [ 0 ] . MinTime ( ) )
require . Equal ( t , 120 * time . Minute . Milliseconds ( ) , db . Blocks ( ) [ 0 ] . MaxTime ( ) )
require . Equal ( t , 250 * time . Minute . Milliseconds ( ) , db . Blocks ( ) [ 1 ] . MinTime ( ) )
require . Equal ( t , 360 * time . Minute . Milliseconds ( ) , db . Blocks ( ) [ 1 ] . MaxTime ( ) )
// Checking that ooo chunk is empty.
for _ , lbls := range [ ] labels . Labels { series1 , series2 } {
ms , created , err := db . head . getOrCreate ( lbls . Hash ( ) , lbls )
require . NoError ( t , err )
require . False ( t , created )
require . Nil ( t , ms . ooo )
}
verifySamples := func ( block * Block , fromMins , toMins int64 ) {
2023-08-24 06:21:17 -07:00
series1Samples := make ( [ ] chunks . Sample , 0 , toMins - fromMins + 1 )
series2Samples := make ( [ ] chunks . Sample , 0 , toMins - fromMins + 1 )
2023-02-10 06:18:15 -08:00
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
series1Samples = append ( series1Samples , sample { ts , float64 ( ts ) , nil , nil } )
series2Samples = append ( series2Samples , sample { ts , float64 ( 2 * ts ) , nil , nil } )
}
2023-08-24 06:21:17 -07:00
expRes := map [ string ] [ ] chunks . Sample {
2023-02-10 06:18:15 -08:00
series1 . String ( ) : series1Samples ,
series2 . String ( ) : series2Samples ,
}
q , err := NewBlockQuerier ( block , math . MinInt64 , math . MaxInt64 )
require . NoError ( t , err )
actRes := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "foo" , "bar.*" ) )
require . Equal ( t , expRes , actRes )
}
// Checking for expected data in the blocks.
verifySamples ( db . Blocks ( ) [ 0 ] , 90 , 110 )
verifySamples ( db . Blocks ( ) [ 1 ] , 250 , 350 )
}
// TestOOOCompactionWithDisabledWriteLog tests the scenario where the TSDB is
// configured to not have wal and wbl but its able to compact both the in-order
2023-10-03 13:09:25 -07:00
// and out-of-order head.
2023-02-10 06:18:15 -08:00
func TestOOOCompactionWithDisabledWriteLog ( t * testing . T ) {
dir := t . TempDir ( )
2023-09-13 08:45:06 -07:00
ctx := context . Background ( )
2023-02-10 06:18:15 -08:00
opts := DefaultOptions ( )
opts . OutOfOrderCapMax = 30
opts . OutOfOrderTimeWindow = 300 * time . Minute . Milliseconds ( )
opts . WALSegmentSize = - 1 // disabled WAL and WBL
2022-09-20 10:05:50 -07:00
db , err := Open ( dir , nil , nil , opts , nil )
require . NoError ( t , err )
db . DisableCompactions ( ) // We want to manually call it.
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
series1 := labels . FromStrings ( "foo" , "bar1" )
series2 := labels . FromStrings ( "foo" , "bar2" )
addSamples := func ( fromMins , toMins int64 ) {
app := db . Appender ( context . Background ( ) )
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
_ , err := app . Append ( 0 , series1 , ts , float64 ( ts ) )
require . NoError ( t , err )
_ , err = app . Append ( 0 , series2 , ts , float64 ( 2 * ts ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
}
// Add an in-order samples.
addSamples ( 250 , 350 )
// Add ooo samples that will result into a single block.
addSamples ( 90 , 110 )
// Checking that ooo chunk is not empty.
for _ , lbls := range [ ] labels . Labels { series1 , series2 } {
ms , created , err := db . head . getOrCreate ( lbls . Hash ( ) , lbls )
require . NoError ( t , err )
require . False ( t , created )
2022-12-28 02:19:41 -08:00
require . Greater ( t , ms . ooo . oooHeadChunk . chunk . NumSamples ( ) , 0 )
2022-09-20 10:05:50 -07:00
}
// If the normal Head is not compacted, the OOO head compaction does not take place.
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2023-12-07 03:35:01 -08:00
require . Empty ( t , db . Blocks ( ) )
2022-09-20 10:05:50 -07:00
// Add more in-order samples in future that would trigger the compaction.
addSamples ( 400 , 450 )
// No blocks before compaction.
2023-12-07 03:35:01 -08:00
require . Empty ( t , db . Blocks ( ) )
2022-09-20 10:05:50 -07:00
// Compacts normal and OOO head.
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2022-09-20 10:05:50 -07:00
// 2 blocks exist now. [0, 120), [250, 360)
2023-12-07 03:35:01 -08:00
require . Len ( t , db . Blocks ( ) , 2 )
2022-09-20 10:05:50 -07:00
require . Equal ( t , int64 ( 0 ) , db . Blocks ( ) [ 0 ] . MinTime ( ) )
require . Equal ( t , 120 * time . Minute . Milliseconds ( ) , db . Blocks ( ) [ 0 ] . MaxTime ( ) )
require . Equal ( t , 250 * time . Minute . Milliseconds ( ) , db . Blocks ( ) [ 1 ] . MinTime ( ) )
require . Equal ( t , 360 * time . Minute . Milliseconds ( ) , db . Blocks ( ) [ 1 ] . MaxTime ( ) )
// Checking that ooo chunk is empty.
for _ , lbls := range [ ] labels . Labels { series1 , series2 } {
ms , created , err := db . head . getOrCreate ( lbls . Hash ( ) , lbls )
require . NoError ( t , err )
require . False ( t , created )
2022-12-28 02:19:41 -08:00
require . Nil ( t , ms . ooo )
2022-09-20 10:05:50 -07:00
}
verifySamples := func ( block * Block , fromMins , toMins int64 ) {
2023-08-24 06:21:17 -07:00
series1Samples := make ( [ ] chunks . Sample , 0 , toMins - fromMins + 1 )
series2Samples := make ( [ ] chunks . Sample , 0 , toMins - fromMins + 1 )
2022-09-20 10:05:50 -07:00
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
2022-10-11 09:35:35 -07:00
series1Samples = append ( series1Samples , sample { ts , float64 ( ts ) , nil , nil } )
series2Samples = append ( series2Samples , sample { ts , float64 ( 2 * ts ) , nil , nil } )
2022-09-20 10:05:50 -07:00
}
2023-08-24 06:21:17 -07:00
expRes := map [ string ] [ ] chunks . Sample {
2022-09-20 10:05:50 -07:00
series1 . String ( ) : series1Samples ,
series2 . String ( ) : series2Samples ,
}
q , err := NewBlockQuerier ( block , math . MinInt64 , math . MaxInt64 )
require . NoError ( t , err )
actRes := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "foo" , "bar.*" ) )
require . Equal ( t , expRes , actRes )
}
// Checking for expected data in the blocks.
verifySamples ( db . Blocks ( ) [ 0 ] , 90 , 110 )
verifySamples ( db . Blocks ( ) [ 1 ] , 250 , 350 )
}
2023-03-12 23:53:57 -07:00
// TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL tests the scenario where the WBL goes
// missing after a restart while snapshot was enabled, but the query still returns the right
// data from the mmap chunks.
func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL ( t * testing . T ) {
dir := t . TempDir ( )
2023-09-13 08:45:06 -07:00
ctx := context . Background ( )
2023-03-12 23:53:57 -07:00
opts := DefaultOptions ( )
opts . OutOfOrderCapMax = 10
opts . OutOfOrderTimeWindow = 300 * time . Minute . Milliseconds ( )
opts . EnableMemorySnapshotOnShutdown = true
db , err := Open ( dir , nil , nil , opts , nil )
require . NoError ( t , err )
db . DisableCompactions ( ) // We want to manually call it.
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
series1 := labels . FromStrings ( "foo" , "bar1" )
series2 := labels . FromStrings ( "foo" , "bar2" )
addSamples := func ( fromMins , toMins int64 ) {
app := db . Appender ( context . Background ( ) )
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
_ , err := app . Append ( 0 , series1 , ts , float64 ( ts ) )
require . NoError ( t , err )
_ , err = app . Append ( 0 , series2 , ts , float64 ( 2 * ts ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
}
// Add an in-order samples.
addSamples ( 250 , 350 )
// Add ooo samples that will result into a single block.
addSamples ( 90 , 110 ) // The sample 110 will not be in m-map chunks.
// Checking that there are some ooo m-map chunks.
for _ , lbls := range [ ] labels . Labels { series1 , series2 } {
ms , created , err := db . head . getOrCreate ( lbls . Hash ( ) , lbls )
require . NoError ( t , err )
require . False ( t , created )
2023-12-07 03:35:01 -08:00
require . Len ( t , ms . ooo . oooMmappedChunks , 2 )
2023-03-12 23:53:57 -07:00
require . NotNil ( t , ms . ooo . oooHeadChunk )
}
// Restart DB.
require . NoError ( t , db . Close ( ) )
// For some reason wbl goes missing.
require . NoError ( t , os . RemoveAll ( path . Join ( dir , "wbl" ) ) )
db , err = Open ( dir , nil , nil , opts , nil )
require . NoError ( t , err )
db . DisableCompactions ( ) // We want to manually call it.
// Check ooo m-map chunks again.
for _ , lbls := range [ ] labels . Labels { series1 , series2 } {
ms , created , err := db . head . getOrCreate ( lbls . Hash ( ) , lbls )
require . NoError ( t , err )
require . False ( t , created )
2023-12-07 03:35:01 -08:00
require . Len ( t , ms . ooo . oooMmappedChunks , 2 )
2023-03-12 23:53:57 -07:00
require . Equal ( t , 109 * time . Minute . Milliseconds ( ) , ms . ooo . oooMmappedChunks [ 1 ] . maxTime )
require . Nil ( t , ms . ooo . oooHeadChunk ) // Because of missing wbl.
}
verifySamples := func ( fromMins , toMins int64 ) {
2023-08-24 06:21:17 -07:00
series1Samples := make ( [ ] chunks . Sample , 0 , toMins - fromMins + 1 )
series2Samples := make ( [ ] chunks . Sample , 0 , toMins - fromMins + 1 )
2023-03-12 23:53:57 -07:00
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
series1Samples = append ( series1Samples , sample { ts , float64 ( ts ) , nil , nil } )
series2Samples = append ( series2Samples , sample { ts , float64 ( 2 * ts ) , nil , nil } )
}
2023-08-24 06:21:17 -07:00
expRes := map [ string ] [ ] chunks . Sample {
2023-03-12 23:53:57 -07:00
series1 . String ( ) : series1Samples ,
series2 . String ( ) : series2Samples ,
}
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( fromMins * time . Minute . Milliseconds ( ) , toMins * time . Minute . Milliseconds ( ) )
2023-03-12 23:53:57 -07:00
require . NoError ( t , err )
actRes := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "foo" , "bar.*" ) )
require . Equal ( t , expRes , actRes )
}
// Checking for expected ooo data from mmap chunks.
verifySamples ( 90 , 109 )
// Compaction should also work fine.
2023-12-07 03:35:01 -08:00
require . Empty ( t , db . Blocks ( ) )
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . CompactOOOHead ( ctx ) )
2023-12-07 03:35:01 -08:00
require . Len ( t , db . Blocks ( ) , 1 ) // One block from OOO data.
2023-03-12 23:53:57 -07:00
require . Equal ( t , int64 ( 0 ) , db . Blocks ( ) [ 0 ] . MinTime ( ) )
require . Equal ( t , 120 * time . Minute . Milliseconds ( ) , db . Blocks ( ) [ 0 ] . MaxTime ( ) )
// Checking that ooo chunk is empty in Head.
for _ , lbls := range [ ] labels . Labels { series1 , series2 } {
ms , created , err := db . head . getOrCreate ( lbls . Hash ( ) , lbls )
require . NoError ( t , err )
require . False ( t , created )
require . Nil ( t , ms . ooo )
}
verifySamples ( 90 , 109 )
}
2022-09-20 10:05:50 -07:00
func Test_Querier_OOOQuery ( t * testing . T ) {
opts := DefaultOptions ( )
opts . OutOfOrderCapMax = 30
opts . OutOfOrderTimeWindow = 24 * time . Hour . Milliseconds ( )
series1 := labels . FromStrings ( "foo" , "bar1" )
minutes := func ( m int64 ) int64 { return m * time . Minute . Milliseconds ( ) }
2023-08-24 06:21:17 -07:00
addSample := func ( db * DB , fromMins , toMins , queryMinT , queryMaxT int64 , expSamples [ ] chunks . Sample ) ( [ ] chunks . Sample , int ) {
2022-09-20 10:05:50 -07:00
app := db . Appender ( context . Background ( ) )
totalAppended := 0
for min := fromMins ; min <= toMins ; min += time . Minute . Milliseconds ( ) {
_ , err := app . Append ( 0 , series1 , min , float64 ( min ) )
if min >= queryMinT && min <= queryMaxT {
2023-03-30 10:50:13 -07:00
expSamples = append ( expSamples , sample { t : min , f : float64 ( min ) } )
2022-09-20 10:05:50 -07:00
}
require . NoError ( t , err )
totalAppended ++
}
require . NoError ( t , app . Commit ( ) )
return expSamples , totalAppended
}
tests := [ ] struct {
name string
queryMinT int64
queryMaxT int64
inOrderMinT int64
inOrderMaxT int64
oooMinT int64
oooMaxT int64
} {
{
name : "query interval covering ooomint and inordermaxt returns all ingested samples" ,
queryMinT : minutes ( 0 ) ,
queryMaxT : minutes ( 200 ) ,
inOrderMinT : minutes ( 100 ) ,
inOrderMaxT : minutes ( 200 ) ,
oooMinT : minutes ( 0 ) ,
oooMaxT : minutes ( 99 ) ,
} ,
{
name : "partial query interval returns only samples within interval" ,
queryMinT : minutes ( 20 ) ,
queryMaxT : minutes ( 180 ) ,
inOrderMinT : minutes ( 100 ) ,
inOrderMaxT : minutes ( 200 ) ,
oooMinT : minutes ( 0 ) ,
oooMaxT : minutes ( 99 ) ,
} ,
}
for _ , tc := range tests {
t . Run ( fmt . Sprintf ( "name=%s" , tc . name ) , func ( t * testing . T ) {
db := openTestDB ( t , opts , nil )
db . DisableCompactions ( )
defer func ( ) {
require . NoError ( t , db . Close ( ) )
} ( )
2023-08-24 06:21:17 -07:00
var expSamples [ ] chunks . Sample
2022-09-20 10:05:50 -07:00
// Add in-order samples.
expSamples , _ = addSample ( db , tc . inOrderMinT , tc . inOrderMaxT , tc . queryMinT , tc . queryMaxT , expSamples )
// Add out-of-order samples.
expSamples , oooSamples := addSample ( db , tc . oooMinT , tc . oooMaxT , tc . queryMinT , tc . queryMaxT , expSamples )
sort . Slice ( expSamples , func ( i , j int ) bool {
return expSamples [ i ] . T ( ) < expSamples [ j ] . T ( )
} )
2023-09-12 03:37:38 -07:00
querier , err := db . Querier ( tc . queryMinT , tc . queryMaxT )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
defer querier . Close ( )
seriesSet := query ( t , querier , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar1" ) )
require . NotNil ( t , seriesSet [ series1 . String ( ) ] )
2023-12-07 03:35:01 -08:00
require . Len ( t , seriesSet , 1 )
2022-09-20 10:05:50 -07:00
require . Equal ( t , expSamples , seriesSet [ series1 . String ( ) ] )
require . GreaterOrEqual ( t , float64 ( oooSamples ) , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamplesAppended ) , "number of ooo appended samples mismatch" )
} )
}
}
func Test_ChunkQuerier_OOOQuery ( t * testing . T ) {
opts := DefaultOptions ( )
opts . OutOfOrderCapMax = 30
opts . OutOfOrderTimeWindow = 24 * time . Hour . Milliseconds ( )
series1 := labels . FromStrings ( "foo" , "bar1" )
minutes := func ( m int64 ) int64 { return m * time . Minute . Milliseconds ( ) }
2023-08-24 06:21:17 -07:00
addSample := func ( db * DB , fromMins , toMins , queryMinT , queryMaxT int64 , expSamples [ ] chunks . Sample ) ( [ ] chunks . Sample , int ) {
2022-09-20 10:05:50 -07:00
app := db . Appender ( context . Background ( ) )
totalAppended := 0
for min := fromMins ; min <= toMins ; min += time . Minute . Milliseconds ( ) {
_ , err := app . Append ( 0 , series1 , min , float64 ( min ) )
if min >= queryMinT && min <= queryMaxT {
2023-03-30 10:50:13 -07:00
expSamples = append ( expSamples , sample { t : min , f : float64 ( min ) } )
2022-09-20 10:05:50 -07:00
}
require . NoError ( t , err )
totalAppended ++
}
require . NoError ( t , app . Commit ( ) )
return expSamples , totalAppended
}
tests := [ ] struct {
name string
queryMinT int64
queryMaxT int64
inOrderMinT int64
inOrderMaxT int64
oooMinT int64
oooMaxT int64
} {
{
name : "query interval covering ooomint and inordermaxt returns all ingested samples" ,
queryMinT : minutes ( 0 ) ,
queryMaxT : minutes ( 200 ) ,
inOrderMinT : minutes ( 100 ) ,
inOrderMaxT : minutes ( 200 ) ,
oooMinT : minutes ( 0 ) ,
oooMaxT : minutes ( 99 ) ,
} ,
{
name : "partial query interval returns only samples within interval" ,
queryMinT : minutes ( 20 ) ,
queryMaxT : minutes ( 180 ) ,
inOrderMinT : minutes ( 100 ) ,
inOrderMaxT : minutes ( 200 ) ,
oooMinT : minutes ( 0 ) ,
oooMaxT : minutes ( 99 ) ,
} ,
}
for _ , tc := range tests {
t . Run ( fmt . Sprintf ( "name=%s" , tc . name ) , func ( t * testing . T ) {
db := openTestDB ( t , opts , nil )
db . DisableCompactions ( )
defer func ( ) {
require . NoError ( t , db . Close ( ) )
} ( )
2023-08-24 06:21:17 -07:00
var expSamples [ ] chunks . Sample
2022-09-20 10:05:50 -07:00
// Add in-order samples.
expSamples , _ = addSample ( db , tc . inOrderMinT , tc . inOrderMaxT , tc . queryMinT , tc . queryMaxT , expSamples )
// Add out-of-order samples.
expSamples , oooSamples := addSample ( db , tc . oooMinT , tc . oooMaxT , tc . queryMinT , tc . queryMaxT , expSamples )
sort . Slice ( expSamples , func ( i , j int ) bool {
return expSamples [ i ] . T ( ) < expSamples [ j ] . T ( )
} )
2023-09-12 03:37:38 -07:00
querier , err := db . ChunkQuerier ( tc . queryMinT , tc . queryMaxT )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
defer querier . Close ( )
chks := queryChunks ( t , querier , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar1" ) )
require . NotNil ( t , chks [ series1 . String ( ) ] )
2023-12-07 03:35:01 -08:00
require . Len ( t , chks , 1 )
2022-09-20 10:05:50 -07:00
require . Equal ( t , float64 ( oooSamples ) , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamplesAppended ) , "number of ooo appended samples mismatch" )
2023-08-24 06:21:17 -07:00
var gotSamples [ ] chunks . Sample
2022-09-20 10:05:50 -07:00
for _ , chunk := range chks [ series1 . String ( ) ] {
it := chunk . Chunk . Iterator ( nil )
2022-10-11 09:35:35 -07:00
for it . Next ( ) == chunkenc . ValFloat {
2022-09-20 10:05:50 -07:00
ts , v := it . At ( )
2023-03-30 10:50:13 -07:00
gotSamples = append ( gotSamples , sample { t : ts , f : v } )
2022-09-20 10:05:50 -07:00
}
}
require . Equal ( t , expSamples , gotSamples )
} )
}
}
func TestOOOAppendAndQuery ( t * testing . T ) {
opts := DefaultOptions ( )
opts . OutOfOrderCapMax = 30
opts . OutOfOrderTimeWindow = 4 * time . Hour . Milliseconds ( )
db := openTestDB ( t , opts , nil )
db . DisableCompactions ( )
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
s1 := labels . FromStrings ( "foo" , "bar1" )
s2 := labels . FromStrings ( "foo" , "bar2" )
minutes := func ( m int64 ) int64 { return m * time . Minute . Milliseconds ( ) }
2023-08-24 06:21:17 -07:00
appendedSamples := make ( map [ string ] [ ] chunks . Sample )
2022-09-20 10:05:50 -07:00
totalSamples := 0
addSample := func ( lbls labels . Labels , fromMins , toMins int64 , faceError bool ) {
app := db . Appender ( context . Background ( ) )
key := lbls . String ( )
from , to := minutes ( fromMins ) , minutes ( toMins )
for min := from ; min <= to ; min += time . Minute . Milliseconds ( ) {
val := rand . Float64 ( )
_ , err := app . Append ( 0 , lbls , min , val )
if faceError {
require . Error ( t , err )
} else {
require . NoError ( t , err )
2023-03-30 10:50:13 -07:00
appendedSamples [ key ] = append ( appendedSamples [ key ] , sample { t : min , f : val } )
2022-09-20 10:05:50 -07:00
totalSamples ++
}
}
if faceError {
require . NoError ( t , app . Rollback ( ) )
} else {
require . NoError ( t , app . Commit ( ) )
}
}
testQuery := func ( from , to int64 ) {
2023-09-12 03:37:38 -07:00
querier , err := db . Querier ( from , to )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
seriesSet := query ( t , querier , labels . MustNewMatcher ( labels . MatchRegexp , "foo" , "bar." ) )
for k , v := range appendedSamples {
sort . Slice ( v , func ( i , j int ) bool {
return v [ i ] . T ( ) < v [ j ] . T ( )
} )
appendedSamples [ k ] = v
}
2023-08-24 06:21:17 -07:00
expSamples := make ( map [ string ] [ ] chunks . Sample )
2022-09-20 10:05:50 -07:00
for k , samples := range appendedSamples {
for _ , s := range samples {
if s . T ( ) < from {
continue
}
if s . T ( ) > to {
continue
}
expSamples [ k ] = append ( expSamples [ k ] , s )
}
}
require . Equal ( t , expSamples , seriesSet )
require . Equal ( t , float64 ( totalSamples - 2 ) , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamplesAppended ) , "number of ooo appended samples mismatch" )
}
verifyOOOMinMaxTimes := func ( expMin , expMax int64 ) {
require . Equal ( t , minutes ( expMin ) , db . head . MinOOOTime ( ) )
require . Equal ( t , minutes ( expMax ) , db . head . MaxOOOTime ( ) )
}
// In-order samples.
addSample ( s1 , 300 , 300 , false )
addSample ( s2 , 290 , 290 , false )
require . Equal ( t , float64 ( 2 ) , prom_testutil . ToFloat64 ( db . head . metrics . chunksCreated ) )
testQuery ( math . MinInt64 , math . MaxInt64 )
// Some ooo samples.
addSample ( s1 , 250 , 260 , false )
addSample ( s2 , 255 , 265 , false )
verifyOOOMinMaxTimes ( 250 , 265 )
testQuery ( math . MinInt64 , math . MaxInt64 )
testQuery ( minutes ( 250 ) , minutes ( 265 ) ) // Test querying ono data time range
testQuery ( minutes ( 290 ) , minutes ( 300 ) ) // Test querying in-order data time range
testQuery ( minutes ( 250 ) , minutes ( 300 ) ) // Test querying the entire range
// Out of time window.
addSample ( s1 , 59 , 59 , true )
addSample ( s2 , 49 , 49 , true )
verifyOOOMinMaxTimes ( 250 , 265 )
testQuery ( math . MinInt64 , math . MaxInt64 )
// At the edge of time window, also it would be "out of bound" without the ooo support.
addSample ( s1 , 60 , 65 , false )
verifyOOOMinMaxTimes ( 60 , 265 )
testQuery ( math . MinInt64 , math . MaxInt64 )
// This sample is not within the time window w.r.t. the head's maxt, but it is within the window
// w.r.t. the series' maxt. But we consider only head's maxt.
addSample ( s2 , 59 , 59 , true )
verifyOOOMinMaxTimes ( 60 , 265 )
testQuery ( math . MinInt64 , math . MaxInt64 )
// Now the sample is within time window w.r.t. the head's maxt.
addSample ( s2 , 60 , 65 , false )
verifyOOOMinMaxTimes ( 60 , 265 )
testQuery ( math . MinInt64 , math . MaxInt64 )
// Out of time window again.
addSample ( s1 , 59 , 59 , true )
addSample ( s2 , 49 , 49 , true )
testQuery ( math . MinInt64 , math . MaxInt64 )
// Generating some m-map chunks. The m-map chunks here are in such a way
// that when sorted w.r.t. mint, the last chunk's maxt is not the overall maxt
// of the merged chunk. This tests a bug fixed in https://github.com/grafana/mimir-prometheus/pull/238/.
require . Equal ( t , float64 ( 4 ) , prom_testutil . ToFloat64 ( db . head . metrics . chunksCreated ) )
addSample ( s1 , 180 , 249 , false )
require . Equal ( t , float64 ( 6 ) , prom_testutil . ToFloat64 ( db . head . metrics . chunksCreated ) )
verifyOOOMinMaxTimes ( 60 , 265 )
testQuery ( math . MinInt64 , math . MaxInt64 )
}
func TestOOODisabled ( t * testing . T ) {
opts := DefaultOptions ( )
opts . OutOfOrderTimeWindow = 0
db := openTestDB ( t , opts , nil )
db . DisableCompactions ( )
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
s1 := labels . FromStrings ( "foo" , "bar1" )
minutes := func ( m int64 ) int64 { return m * time . Minute . Milliseconds ( ) }
2023-08-24 06:21:17 -07:00
expSamples := make ( map [ string ] [ ] chunks . Sample )
2022-09-20 10:05:50 -07:00
totalSamples := 0
failedSamples := 0
addSample := func ( lbls labels . Labels , fromMins , toMins int64 , faceError bool ) {
app := db . Appender ( context . Background ( ) )
key := lbls . String ( )
from , to := minutes ( fromMins ) , minutes ( toMins )
for min := from ; min <= to ; min += time . Minute . Milliseconds ( ) {
val := rand . Float64 ( )
_ , err := app . Append ( 0 , lbls , min , val )
if faceError {
require . Error ( t , err )
failedSamples ++
} else {
require . NoError ( t , err )
2023-03-30 10:50:13 -07:00
expSamples [ key ] = append ( expSamples [ key ] , sample { t : min , f : val } )
2022-09-20 10:05:50 -07:00
totalSamples ++
}
}
if faceError {
require . NoError ( t , app . Rollback ( ) )
} else {
require . NoError ( t , app . Commit ( ) )
}
}
addSample ( s1 , 300 , 300 , false ) // In-order samples.
addSample ( s1 , 250 , 260 , true ) // Some ooo samples.
addSample ( s1 , 59 , 59 , true ) // Out of time window.
addSample ( s1 , 60 , 65 , true ) // At the edge of time window, also it would be "out of bound" without the ooo support.
addSample ( s1 , 59 , 59 , true ) // Out of time window again.
addSample ( s1 , 301 , 310 , false ) // More in-order samples.
2023-09-12 03:37:38 -07:00
querier , err := db . Querier ( math . MinInt64 , math . MaxInt64 )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
seriesSet := query ( t , querier , labels . MustNewMatcher ( labels . MatchRegexp , "foo" , "bar." ) )
require . Equal ( t , expSamples , seriesSet )
require . Equal ( t , float64 ( 0 ) , prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamplesAppended ) , "number of ooo appended samples mismatch" )
require . Equal ( t , float64 ( failedSamples ) ,
2022-10-11 09:35:35 -07:00
prom_testutil . ToFloat64 ( db . head . metrics . outOfOrderSamples . WithLabelValues ( sampleMetricTypeFloat ) ) + prom_testutil . ToFloat64 ( db . head . metrics . outOfBoundSamples . WithLabelValues ( sampleMetricTypeFloat ) ) ,
2022-09-20 10:05:50 -07:00
"number of ooo/oob samples mismatch" )
// Verifying that no OOO artifacts were generated.
2022-10-10 08:08:46 -07:00
_ , err = os . ReadDir ( path . Join ( db . Dir ( ) , wlog . WblDirName ) )
2022-09-20 10:05:50 -07:00
require . True ( t , os . IsNotExist ( err ) )
ms , created , err := db . head . getOrCreate ( s1 . Hash ( ) , s1 )
require . NoError ( t , err )
require . False ( t , created )
require . NotNil ( t , ms )
2022-12-28 02:19:41 -08:00
require . Nil ( t , ms . ooo )
2022-09-20 10:05:50 -07:00
}
func TestWBLAndMmapReplay ( t * testing . T ) {
opts := DefaultOptions ( )
opts . OutOfOrderCapMax = 30
opts . OutOfOrderTimeWindow = 4 * time . Hour . Milliseconds ( )
db := openTestDB ( t , opts , nil )
db . DisableCompactions ( )
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
s1 := labels . FromStrings ( "foo" , "bar1" )
minutes := func ( m int64 ) int64 { return m * time . Minute . Milliseconds ( ) }
2023-08-24 06:21:17 -07:00
expSamples := make ( map [ string ] [ ] chunks . Sample )
2022-09-20 10:05:50 -07:00
totalSamples := 0
addSample := func ( lbls labels . Labels , fromMins , toMins int64 ) {
app := db . Appender ( context . Background ( ) )
key := lbls . String ( )
from , to := minutes ( fromMins ) , minutes ( toMins )
for min := from ; min <= to ; min += time . Minute . Milliseconds ( ) {
val := rand . Float64 ( )
_ , err := app . Append ( 0 , lbls , min , val )
require . NoError ( t , err )
2023-03-30 10:50:13 -07:00
expSamples [ key ] = append ( expSamples [ key ] , sample { t : min , f : val } )
2022-09-20 10:05:50 -07:00
totalSamples ++
}
require . NoError ( t , app . Commit ( ) )
}
2023-08-24 06:21:17 -07:00
testQuery := func ( exp map [ string ] [ ] chunks . Sample ) {
2023-09-12 03:37:38 -07:00
querier , err := db . Querier ( math . MinInt64 , math . MaxInt64 )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
seriesSet := query ( t , querier , labels . MustNewMatcher ( labels . MatchRegexp , "foo" , "bar." ) )
for k , v := range exp {
sort . Slice ( v , func ( i , j int ) bool {
return v [ i ] . T ( ) < v [ j ] . T ( )
} )
exp [ k ] = v
}
require . Equal ( t , exp , seriesSet )
}
// In-order samples.
addSample ( s1 , 300 , 300 )
require . Equal ( t , float64 ( 1 ) , prom_testutil . ToFloat64 ( db . head . metrics . chunksCreated ) )
// Some ooo samples.
addSample ( s1 , 250 , 260 )
addSample ( s1 , 195 , 249 ) // This creates some m-map chunks.
require . Equal ( t , float64 ( 4 ) , prom_testutil . ToFloat64 ( db . head . metrics . chunksCreated ) )
testQuery ( expSamples )
oooMint , oooMaxt := minutes ( 195 ) , minutes ( 260 )
// Collect the samples only present in the ooo m-map chunks.
ms , created , err := db . head . getOrCreate ( s1 . Hash ( ) , s1 )
require . False ( t , created )
require . NoError ( t , err )
2023-08-24 06:21:17 -07:00
var s1MmapSamples [ ] chunks . Sample
2022-12-28 02:19:41 -08:00
for _ , mc := range ms . ooo . oooMmappedChunks {
2022-09-20 10:05:50 -07:00
chk , err := db . head . chunkDiskMapper . Chunk ( mc . ref )
require . NoError ( t , err )
it := chk . Iterator ( nil )
2022-10-11 09:35:35 -07:00
for it . Next ( ) == chunkenc . ValFloat {
2022-09-20 10:05:50 -07:00
ts , val := it . At ( )
2023-03-30 10:50:13 -07:00
s1MmapSamples = append ( s1MmapSamples , sample { t : ts , f : val } )
2022-09-20 10:05:50 -07:00
}
}
2023-12-07 03:35:01 -08:00
require . NotEmpty ( t , s1MmapSamples )
2022-09-20 10:05:50 -07:00
require . NoError ( t , db . Close ( ) )
// Making a copy of original state of WBL and Mmap files to use it later.
mmapDir := mmappedChunksDir ( db . head . opts . ChunkDirRoot )
wblDir := db . head . wbl . Dir ( )
originalWblDir := filepath . Join ( t . TempDir ( ) , "original_wbl" )
originalMmapDir := filepath . Join ( t . TempDir ( ) , "original_mmap" )
require . NoError ( t , fileutil . CopyDirs ( wblDir , originalWblDir ) )
require . NoError ( t , fileutil . CopyDirs ( mmapDir , originalMmapDir ) )
resetWBLToOriginal := func ( ) {
require . NoError ( t , os . RemoveAll ( wblDir ) )
require . NoError ( t , fileutil . CopyDirs ( originalWblDir , wblDir ) )
}
resetMmapToOriginal := func ( ) {
require . NoError ( t , os . RemoveAll ( mmapDir ) )
require . NoError ( t , fileutil . CopyDirs ( originalMmapDir , mmapDir ) )
}
t . Run ( "Restart DB with both WBL and M-map files for ooo data" , func ( t * testing . T ) {
db , err = Open ( db . dir , nil , nil , opts , nil )
require . NoError ( t , err )
require . Equal ( t , oooMint , db . head . MinOOOTime ( ) )
require . Equal ( t , oooMaxt , db . head . MaxOOOTime ( ) )
testQuery ( expSamples )
require . NoError ( t , db . Close ( ) )
} )
t . Run ( "Restart DB with only WBL for ooo data" , func ( t * testing . T ) {
require . NoError ( t , os . RemoveAll ( mmapDir ) )
db , err = Open ( db . dir , nil , nil , opts , nil )
require . NoError ( t , err )
require . Equal ( t , oooMint , db . head . MinOOOTime ( ) )
require . Equal ( t , oooMaxt , db . head . MaxOOOTime ( ) )
testQuery ( expSamples )
require . NoError ( t , db . Close ( ) )
} )
t . Run ( "Restart DB with only M-map files for ooo data" , func ( t * testing . T ) {
require . NoError ( t , os . RemoveAll ( wblDir ) )
resetMmapToOriginal ( )
db , err = Open ( db . dir , nil , nil , opts , nil )
require . NoError ( t , err )
require . Equal ( t , oooMint , db . head . MinOOOTime ( ) )
require . Equal ( t , oooMaxt , db . head . MaxOOOTime ( ) )
inOrderSample := expSamples [ s1 . String ( ) ] [ len ( expSamples [ s1 . String ( ) ] ) - 1 ]
2023-08-24 06:21:17 -07:00
testQuery ( map [ string ] [ ] chunks . Sample {
2022-09-20 10:05:50 -07:00
s1 . String ( ) : append ( s1MmapSamples , inOrderSample ) ,
} )
require . NoError ( t , db . Close ( ) )
} )
t . Run ( "Restart DB with WBL+Mmap while increasing the OOOCapMax" , func ( t * testing . T ) {
resetWBLToOriginal ( )
resetMmapToOriginal ( )
opts . OutOfOrderCapMax = 60
db , err = Open ( db . dir , nil , nil , opts , nil )
require . NoError ( t , err )
require . Equal ( t , oooMint , db . head . MinOOOTime ( ) )
require . Equal ( t , oooMaxt , db . head . MaxOOOTime ( ) )
testQuery ( expSamples )
require . NoError ( t , db . Close ( ) )
} )
t . Run ( "Restart DB with WBL+Mmap while decreasing the OOOCapMax" , func ( t * testing . T ) {
resetMmapToOriginal ( ) // We need to reset because new duplicate chunks can be written above.
opts . OutOfOrderCapMax = 10
db , err = Open ( db . dir , nil , nil , opts , nil )
require . NoError ( t , err )
require . Equal ( t , oooMint , db . head . MinOOOTime ( ) )
require . Equal ( t , oooMaxt , db . head . MaxOOOTime ( ) )
testQuery ( expSamples )
require . NoError ( t , db . Close ( ) )
} )
t . Run ( "Restart DB with WBL+Mmap while having no m-map markers in WBL" , func ( t * testing . T ) {
resetMmapToOriginal ( ) // We neet to reset because new duplicate chunks can be written above.
// Removing m-map markers in WBL by rewriting it.
2023-07-11 05:57:57 -07:00
newWbl , err := wlog . New ( log . NewNopLogger ( ) , nil , filepath . Join ( t . TempDir ( ) , "new_wbl" ) , wlog . CompressionNone )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
2022-10-10 08:08:46 -07:00
sr , err := wlog . NewSegmentsReader ( originalWblDir )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
var dec record . Decoder
2022-10-10 08:08:46 -07:00
r , markers , addedRecs := wlog . NewReader ( sr ) , 0 , 0
2022-09-20 10:05:50 -07:00
for r . Next ( ) {
rec := r . Record ( )
if dec . Type ( rec ) == record . MmapMarkers {
markers ++
continue
}
addedRecs ++
require . NoError ( t , newWbl . Log ( rec ) )
}
require . Greater ( t , markers , 0 )
require . Greater ( t , addedRecs , 0 )
require . NoError ( t , newWbl . Close ( ) )
require . NoError ( t , sr . Close ( ) )
require . NoError ( t , os . RemoveAll ( wblDir ) )
require . NoError ( t , os . Rename ( newWbl . Dir ( ) , wblDir ) )
opts . OutOfOrderCapMax = 30
db , err = Open ( db . dir , nil , nil , opts , nil )
require . NoError ( t , err )
require . Equal ( t , oooMint , db . head . MinOOOTime ( ) )
require . Equal ( t , oooMaxt , db . head . MaxOOOTime ( ) )
testQuery ( expSamples )
} )
}
func TestOOOCompactionFailure ( t * testing . T ) {
dir := t . TempDir ( )
2023-09-13 08:45:06 -07:00
ctx := context . Background ( )
2022-09-20 10:05:50 -07:00
opts := DefaultOptions ( )
opts . OutOfOrderCapMax = 30
opts . OutOfOrderTimeWindow = 300 * time . Minute . Milliseconds ( )
db , err := Open ( dir , nil , nil , opts , nil )
require . NoError ( t , err )
db . DisableCompactions ( ) // We want to manually call it.
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
series1 := labels . FromStrings ( "foo" , "bar1" )
addSample := func ( fromMins , toMins int64 ) {
app := db . Appender ( context . Background ( ) )
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
_ , err := app . Append ( 0 , series1 , ts , float64 ( ts ) )
require . NoError ( t , err )
}
require . NoError ( t , app . Commit ( ) )
}
// Add an in-order samples.
addSample ( 250 , 350 )
// Add ooo samples that creates multiple chunks.
addSample ( 90 , 310 )
// No blocks before compaction.
2023-12-07 03:35:01 -08:00
require . Empty ( t , db . Blocks ( ) )
2022-09-20 10:05:50 -07:00
// There is a 0th WBL file.
verifyFirstWBLFileIs0 := func ( count int ) {
require . NoError ( t , db . head . wbl . Sync ( ) ) // syncing to make sure wbl is flushed in windows
files , err := os . ReadDir ( db . head . wbl . Dir ( ) )
require . NoError ( t , err )
require . Len ( t , files , count )
require . Equal ( t , "00000000" , files [ 0 ] . Name ( ) )
f , err := files [ 0 ] . Info ( )
require . NoError ( t , err )
require . Greater ( t , f . Size ( ) , int64 ( 100 ) )
}
verifyFirstWBLFileIs0 ( 1 )
verifyMmapFiles := func ( exp ... string ) {
mmapDir := mmappedChunksDir ( db . head . opts . ChunkDirRoot )
files , err := os . ReadDir ( mmapDir )
require . NoError ( t , err )
require . Len ( t , files , len ( exp ) )
for i , f := range files {
require . Equal ( t , exp [ i ] , f . Name ( ) )
}
}
verifyMmapFiles ( "000001" )
// OOO compaction fails 5 times.
originalCompactor := db . compactor
db . compactor = & mockCompactorFailing { t : t }
for i := 0 ; i < 5 ; i ++ {
2023-09-13 08:45:06 -07:00
require . Error ( t , db . CompactOOOHead ( ctx ) )
2022-09-20 10:05:50 -07:00
}
2023-12-07 03:35:01 -08:00
require . Empty ( t , db . Blocks ( ) )
2022-09-20 10:05:50 -07:00
// M-map files don't change after failed compaction.
verifyMmapFiles ( "000001" )
// Because of 5 compaction attempts, there are 6 files now.
verifyFirstWBLFileIs0 ( 6 )
db . compactor = originalCompactor
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . CompactOOOHead ( ctx ) )
2022-09-20 10:05:50 -07:00
oldBlocks := db . Blocks ( )
2023-12-07 03:35:01 -08:00
require . Len ( t , db . Blocks ( ) , 3 )
2022-09-20 10:05:50 -07:00
// Check that the ooo chunks were removed.
ms , created , err := db . head . getOrCreate ( series1 . Hash ( ) , series1 )
require . NoError ( t , err )
require . False ( t , created )
2022-12-28 02:19:41 -08:00
require . Nil ( t , ms . ooo )
2022-09-20 10:05:50 -07:00
// The failed compaction should not have left the ooo Head corrupted.
// Hence, expect no new blocks with another OOO compaction call.
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . CompactOOOHead ( ctx ) )
2023-12-07 03:35:01 -08:00
require . Len ( t , db . Blocks ( ) , 3 )
2022-09-20 10:05:50 -07:00
require . Equal ( t , oldBlocks , db . Blocks ( ) )
// There should be a single m-map file
verifyMmapFiles ( "000001" )
// All but last WBL file will be deleted.
// 8 files in total (starting at 0) because of 7 compaction calls.
files , err := os . ReadDir ( db . head . wbl . Dir ( ) )
require . NoError ( t , err )
require . Len ( t , files , 1 )
require . Equal ( t , "00000007" , files [ 0 ] . Name ( ) )
f , err := files [ 0 ] . Info ( )
require . NoError ( t , err )
require . Equal ( t , int64 ( 0 ) , f . Size ( ) )
verifySamples := func ( block * Block , fromMins , toMins int64 ) {
2023-08-24 06:21:17 -07:00
series1Samples := make ( [ ] chunks . Sample , 0 , toMins - fromMins + 1 )
2022-09-20 10:05:50 -07:00
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
2022-10-11 09:35:35 -07:00
series1Samples = append ( series1Samples , sample { ts , float64 ( ts ) , nil , nil } )
2022-09-20 10:05:50 -07:00
}
2023-08-24 06:21:17 -07:00
expRes := map [ string ] [ ] chunks . Sample {
2022-09-20 10:05:50 -07:00
series1 . String ( ) : series1Samples ,
}
q , err := NewBlockQuerier ( block , math . MinInt64 , math . MaxInt64 )
require . NoError ( t , err )
actRes := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "foo" , "bar.*" ) )
require . Equal ( t , expRes , actRes )
}
// Checking for expected data in the blocks.
verifySamples ( db . Blocks ( ) [ 0 ] , 90 , 119 )
verifySamples ( db . Blocks ( ) [ 1 ] , 120 , 239 )
verifySamples ( db . Blocks ( ) [ 2 ] , 240 , 310 )
// Compact the in-order head and expect another block.
// Since this is a forced compaction, this block is not aligned with 2h.
err = db . CompactHead ( NewRangeHead ( db . head , 250 * time . Minute . Milliseconds ( ) , 350 * time . Minute . Milliseconds ( ) ) )
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Len ( t , db . Blocks ( ) , 4 ) // [0, 120), [120, 240), [240, 360), [250, 351)
2022-09-20 10:05:50 -07:00
verifySamples ( db . Blocks ( ) [ 3 ] , 250 , 350 )
// The compaction also clears out the old m-map files. Including
// the file that has ooo chunks.
verifyMmapFiles ( "000001" )
}
func TestWBLCorruption ( t * testing . T ) {
dir := t . TempDir ( )
opts := DefaultOptions ( )
opts . OutOfOrderCapMax = 30
opts . OutOfOrderTimeWindow = 300 * time . Minute . Milliseconds ( )
db , err := Open ( dir , nil , nil , opts , nil )
require . NoError ( t , err )
db . DisableCompactions ( )
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
series1 := labels . FromStrings ( "foo" , "bar1" )
2023-08-24 06:21:17 -07:00
var allSamples , expAfterRestart [ ] chunks . Sample
2022-09-20 10:05:50 -07:00
addSamples := func ( fromMins , toMins int64 , afterRestart bool ) {
app := db . Appender ( context . Background ( ) )
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
_ , err := app . Append ( 0 , series1 , ts , float64 ( ts ) )
require . NoError ( t , err )
2023-03-30 10:50:13 -07:00
allSamples = append ( allSamples , sample { t : ts , f : float64 ( ts ) } )
2022-09-20 10:05:50 -07:00
if afterRestart {
2023-03-30 10:50:13 -07:00
expAfterRestart = append ( expAfterRestart , sample { t : ts , f : float64 ( ts ) } )
2022-09-20 10:05:50 -07:00
}
}
require . NoError ( t , app . Commit ( ) )
}
// Add an in-order samples.
addSamples ( 340 , 350 , true )
// OOO samples.
addSamples ( 90 , 99 , true )
addSamples ( 100 , 119 , true )
addSamples ( 120 , 130 , true )
// Moving onto the second file.
_ , err = db . head . wbl . NextSegment ( )
require . NoError ( t , err )
// More OOO samples.
addSamples ( 200 , 230 , true )
addSamples ( 240 , 255 , true )
// We corrupt WBL after the sample at 255. So everything added later
// should be deleted after replay.
// Checking where we corrupt it.
require . NoError ( t , db . head . wbl . Sync ( ) ) // syncing to make sure wbl is flushed in windows
files , err := os . ReadDir ( db . head . wbl . Dir ( ) )
require . NoError ( t , err )
require . Len ( t , files , 2 )
f1 , err := files [ 1 ] . Info ( )
require . NoError ( t , err )
corruptIndex := f1 . Size ( )
corruptFilePath := path . Join ( db . head . wbl . Dir ( ) , files [ 1 ] . Name ( ) )
// Corrupt the WBL by adding a malformed record.
require . NoError ( t , db . head . wbl . Log ( [ ] byte { byte ( record . Samples ) , 99 , 9 , 99 , 9 , 99 , 9 , 99 } ) )
// More samples after the corruption point.
addSamples ( 260 , 280 , false )
addSamples ( 290 , 300 , false )
// Another file.
_ , err = db . head . wbl . NextSegment ( )
require . NoError ( t , err )
addSamples ( 310 , 320 , false )
// Verifying that we have data after corruption point.
require . NoError ( t , db . head . wbl . Sync ( ) ) // syncing to make sure wbl is flushed in windows
files , err = os . ReadDir ( db . head . wbl . Dir ( ) )
require . NoError ( t , err )
require . Len ( t , files , 3 )
f1 , err = files [ 1 ] . Info ( )
require . NoError ( t , err )
require . Greater ( t , f1 . Size ( ) , corruptIndex )
f0 , err := files [ 0 ] . Info ( )
require . NoError ( t , err )
require . Greater ( t , f0 . Size ( ) , int64 ( 100 ) )
f2 , err := files [ 2 ] . Info ( )
require . NoError ( t , err )
require . Greater ( t , f2 . Size ( ) , int64 ( 100 ) )
2023-08-24 06:21:17 -07:00
verifySamples := func ( expSamples [ ] chunks . Sample ) {
2022-09-20 10:05:50 -07:00
sort . Slice ( expSamples , func ( i , j int ) bool {
return expSamples [ i ] . T ( ) < expSamples [ j ] . T ( )
} )
2023-08-24 06:21:17 -07:00
expRes := map [ string ] [ ] chunks . Sample {
2022-09-20 10:05:50 -07:00
series1 . String ( ) : expSamples ,
}
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( math . MinInt64 , math . MaxInt64 )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
actRes := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "foo" , "bar.*" ) )
require . Equal ( t , expRes , actRes )
}
verifySamples ( allSamples )
require . NoError ( t , db . Close ( ) )
// We want everything to be replayed from the WBL. So we delete the m-map files.
require . NoError ( t , os . RemoveAll ( mmappedChunksDir ( db . head . opts . ChunkDirRoot ) ) )
// Restart does the replay and repair.
db , err = Open ( db . dir , nil , nil , opts , nil )
require . NoError ( t , err )
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . walCorruptionsTotal ) )
require . Less ( t , len ( expAfterRestart ) , len ( allSamples ) )
verifySamples ( expAfterRestart )
// Verify that it did the repair on disk.
files , err = os . ReadDir ( db . head . wbl . Dir ( ) )
require . NoError ( t , err )
require . Len ( t , files , 3 )
f0 , err = files [ 0 ] . Info ( )
require . NoError ( t , err )
require . Greater ( t , f0 . Size ( ) , int64 ( 100 ) )
f2 , err = files [ 2 ] . Info ( )
require . NoError ( t , err )
require . Equal ( t , int64 ( 0 ) , f2 . Size ( ) )
require . Equal ( t , corruptFilePath , path . Join ( db . head . wbl . Dir ( ) , files [ 1 ] . Name ( ) ) )
// Verifying that everything after the corruption point is set to 0.
b , err := os . ReadFile ( corruptFilePath )
require . NoError ( t , err )
sum := 0
for _ , val := range b [ corruptIndex : ] {
sum += int ( val )
}
require . Equal ( t , 0 , sum )
// Another restart, everything normal with no repair.
require . NoError ( t , db . Close ( ) )
db , err = Open ( db . dir , nil , nil , opts , nil )
require . NoError ( t , err )
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( db . head . metrics . walCorruptionsTotal ) )
verifySamples ( expAfterRestart )
}
func TestOOOMmapCorruption ( t * testing . T ) {
dir := t . TempDir ( )
opts := DefaultOptions ( )
opts . OutOfOrderCapMax = 10
opts . OutOfOrderTimeWindow = 300 * time . Minute . Milliseconds ( )
db , err := Open ( dir , nil , nil , opts , nil )
require . NoError ( t , err )
db . DisableCompactions ( )
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
series1 := labels . FromStrings ( "foo" , "bar1" )
2023-08-24 06:21:17 -07:00
var allSamples , expInMmapChunks [ ] chunks . Sample
2022-09-20 10:05:50 -07:00
addSamples := func ( fromMins , toMins int64 , inMmapAfterCorruption bool ) {
app := db . Appender ( context . Background ( ) )
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
_ , err := app . Append ( 0 , series1 , ts , float64 ( ts ) )
require . NoError ( t , err )
2023-03-30 10:50:13 -07:00
allSamples = append ( allSamples , sample { t : ts , f : float64 ( ts ) } )
2022-09-20 10:05:50 -07:00
if inMmapAfterCorruption {
2023-03-30 10:50:13 -07:00
expInMmapChunks = append ( expInMmapChunks , sample { t : ts , f : float64 ( ts ) } )
2022-09-20 10:05:50 -07:00
}
}
require . NoError ( t , app . Commit ( ) )
}
// Add an in-order samples.
addSamples ( 340 , 350 , true )
// OOO samples.
addSamples ( 90 , 99 , true )
addSamples ( 100 , 109 , true )
// This sample m-maps a chunk. But 120 goes into a new chunk.
addSamples ( 120 , 120 , false )
// Second m-map file. We will corrupt this file. Sample 120 goes into this new file.
db . head . chunkDiskMapper . CutNewFile ( )
// More OOO samples.
addSamples ( 200 , 230 , false )
addSamples ( 240 , 255 , false )
db . head . chunkDiskMapper . CutNewFile ( )
addSamples ( 260 , 290 , false )
2023-08-24 06:21:17 -07:00
verifySamples := func ( expSamples [ ] chunks . Sample ) {
2022-09-20 10:05:50 -07:00
sort . Slice ( expSamples , func ( i , j int ) bool {
return expSamples [ i ] . T ( ) < expSamples [ j ] . T ( )
} )
2023-08-24 06:21:17 -07:00
expRes := map [ string ] [ ] chunks . Sample {
2022-09-20 10:05:50 -07:00
series1 . String ( ) : expSamples ,
}
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( math . MinInt64 , math . MaxInt64 )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
actRes := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "foo" , "bar.*" ) )
require . Equal ( t , expRes , actRes )
}
verifySamples ( allSamples )
// Verifying existing files.
mmapDir := mmappedChunksDir ( db . head . opts . ChunkDirRoot )
files , err := os . ReadDir ( mmapDir )
require . NoError ( t , err )
require . Len ( t , files , 3 )
// Corrupting the 2nd file.
f , err := os . OpenFile ( path . Join ( mmapDir , files [ 1 ] . Name ( ) ) , os . O_RDWR , 0 o666 )
require . NoError ( t , err )
_ , err = f . WriteAt ( [ ] byte { 99 , 9 , 99 , 9 , 99 } , 20 )
require . NoError ( t , err )
require . NoError ( t , f . Close ( ) )
firstFileName := files [ 0 ] . Name ( )
require . NoError ( t , db . Close ( ) )
// Moving OOO WBL to use it later.
wblDir := db . head . wbl . Dir ( )
wblDirTmp := path . Join ( t . TempDir ( ) , "wbl_tmp" )
require . NoError ( t , os . Rename ( wblDir , wblDirTmp ) )
// Restart does the replay and repair of m-map files.
db , err = Open ( db . dir , nil , nil , opts , nil )
require . NoError ( t , err )
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( db . head . metrics . mmapChunkCorruptionTotal ) )
require . Less ( t , len ( expInMmapChunks ) , len ( allSamples ) )
// Since there is no WBL, only samples from m-map chunks comes in the query.
verifySamples ( expInMmapChunks )
// Verify that it did the repair on disk. All files from the point of corruption
// should be deleted.
files , err = os . ReadDir ( mmapDir )
require . NoError ( t , err )
require . Len ( t , files , 1 )
f0 , err := files [ 0 ] . Info ( )
require . NoError ( t , err )
require . Greater ( t , f0 . Size ( ) , int64 ( 100 ) )
require . Equal ( t , firstFileName , files [ 0 ] . Name ( ) )
// Another restart, everything normal with no repair.
require . NoError ( t , db . Close ( ) )
db , err = Open ( db . dir , nil , nil , opts , nil )
require . NoError ( t , err )
require . Equal ( t , 0.0 , prom_testutil . ToFloat64 ( db . head . metrics . mmapChunkCorruptionTotal ) )
verifySamples ( expInMmapChunks )
// Restart again with the WBL, all samples should be present now.
require . NoError ( t , db . Close ( ) )
require . NoError ( t , os . RemoveAll ( wblDir ) )
require . NoError ( t , os . Rename ( wblDirTmp , wblDir ) )
db , err = Open ( db . dir , nil , nil , opts , nil )
require . NoError ( t , err )
verifySamples ( allSamples )
}
func TestOutOfOrderRuntimeConfig ( t * testing . T ) {
2023-09-13 08:45:06 -07:00
ctx := context . Background ( )
2022-09-20 10:05:50 -07:00
getDB := func ( oooTimeWindow int64 ) * DB {
dir := t . TempDir ( )
opts := DefaultOptions ( )
opts . OutOfOrderTimeWindow = oooTimeWindow
db , err := Open ( dir , nil , nil , opts , nil )
require . NoError ( t , err )
db . DisableCompactions ( )
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
return db
}
makeConfig := func ( oooTimeWindow int ) * config . Config {
return & config . Config {
StorageConfig : config . StorageConfig {
TSDBConfig : & config . TSDBConfig {
OutOfOrderTimeWindow : int64 ( oooTimeWindow ) * time . Minute . Milliseconds ( ) ,
} ,
} ,
}
}
series1 := labels . FromStrings ( "foo" , "bar1" )
2023-08-24 06:21:17 -07:00
addSamples := func ( t * testing . T , db * DB , fromMins , toMins int64 , success bool , allSamples [ ] chunks . Sample ) [ ] chunks . Sample {
2022-09-20 10:05:50 -07:00
app := db . Appender ( context . Background ( ) )
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
_ , err := app . Append ( 0 , series1 , ts , float64 ( ts ) )
if success {
require . NoError ( t , err )
2023-03-30 10:50:13 -07:00
allSamples = append ( allSamples , sample { t : ts , f : float64 ( ts ) } )
2022-09-20 10:05:50 -07:00
} else {
require . Error ( t , err )
}
}
require . NoError ( t , app . Commit ( ) )
return allSamples
}
2023-08-24 06:21:17 -07:00
verifySamples := func ( t * testing . T , db * DB , expSamples [ ] chunks . Sample ) {
2022-09-20 10:05:50 -07:00
sort . Slice ( expSamples , func ( i , j int ) bool {
return expSamples [ i ] . T ( ) < expSamples [ j ] . T ( )
} )
2023-08-24 06:21:17 -07:00
expRes := map [ string ] [ ] chunks . Sample {
2022-09-20 10:05:50 -07:00
series1 . String ( ) : expSamples ,
}
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( math . MinInt64 , math . MaxInt64 )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
actRes := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "foo" , "bar.*" ) )
require . Equal ( t , expRes , actRes )
}
doOOOCompaction := func ( t * testing . T , db * DB ) {
// WBL is not empty.
size , err := db . head . wbl . Size ( )
require . NoError ( t , err )
require . Greater ( t , size , int64 ( 0 ) )
2023-12-07 03:35:01 -08:00
require . Empty ( t , db . Blocks ( ) )
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . compactOOOHead ( ctx ) )
2023-12-07 03:35:01 -08:00
require . NotEmpty ( t , db . Blocks ( ) )
2022-09-20 10:05:50 -07:00
// WBL is empty.
size , err = db . head . wbl . Size ( )
require . NoError ( t , err )
require . Equal ( t , int64 ( 0 ) , size )
}
t . Run ( "increase time window" , func ( t * testing . T ) {
2023-08-24 06:21:17 -07:00
var allSamples [ ] chunks . Sample
2022-09-20 10:05:50 -07:00
db := getDB ( 30 * time . Minute . Milliseconds ( ) )
// In-order.
allSamples = addSamples ( t , db , 300 , 310 , true , allSamples )
// OOO upto 30m old is success.
allSamples = addSamples ( t , db , 281 , 290 , true , allSamples )
// OOO of 59m old fails.
s := addSamples ( t , db , 251 , 260 , false , nil )
2023-12-07 03:35:01 -08:00
require . Empty ( t , s )
2022-09-20 10:05:50 -07:00
verifySamples ( t , db , allSamples )
oldWblPtr := fmt . Sprintf ( "%p" , db . head . wbl )
// Increase time window and try adding again.
err := db . ApplyConfig ( makeConfig ( 60 ) )
require . NoError ( t , err )
allSamples = addSamples ( t , db , 251 , 260 , true , allSamples )
// WBL does not change.
newWblPtr := fmt . Sprintf ( "%p" , db . head . wbl )
require . Equal ( t , oldWblPtr , newWblPtr )
doOOOCompaction ( t , db )
verifySamples ( t , db , allSamples )
} )
t . Run ( "decrease time window and increase again" , func ( t * testing . T ) {
2023-08-24 06:21:17 -07:00
var allSamples [ ] chunks . Sample
2022-09-20 10:05:50 -07:00
db := getDB ( 60 * time . Minute . Milliseconds ( ) )
// In-order.
allSamples = addSamples ( t , db , 300 , 310 , true , allSamples )
// OOO upto 59m old is success.
allSamples = addSamples ( t , db , 251 , 260 , true , allSamples )
oldWblPtr := fmt . Sprintf ( "%p" , db . head . wbl )
// Decrease time window.
err := db . ApplyConfig ( makeConfig ( 30 ) )
require . NoError ( t , err )
// OOO of 49m old fails.
s := addSamples ( t , db , 261 , 270 , false , nil )
2023-12-07 03:35:01 -08:00
require . Empty ( t , s )
2022-09-20 10:05:50 -07:00
// WBL does not change.
newWblPtr := fmt . Sprintf ( "%p" , db . head . wbl )
require . Equal ( t , oldWblPtr , newWblPtr )
verifySamples ( t , db , allSamples )
// Increase time window again and check
err = db . ApplyConfig ( makeConfig ( 60 ) )
require . NoError ( t , err )
allSamples = addSamples ( t , db , 261 , 270 , true , allSamples )
verifySamples ( t , db , allSamples )
// WBL does not change.
newWblPtr = fmt . Sprintf ( "%p" , db . head . wbl )
require . Equal ( t , oldWblPtr , newWblPtr )
doOOOCompaction ( t , db )
verifySamples ( t , db , allSamples )
} )
t . Run ( "disabled to enabled" , func ( t * testing . T ) {
2023-08-24 06:21:17 -07:00
var allSamples [ ] chunks . Sample
2022-09-20 10:05:50 -07:00
db := getDB ( 0 )
// In-order.
allSamples = addSamples ( t , db , 300 , 310 , true , allSamples )
// OOO fails.
s := addSamples ( t , db , 251 , 260 , false , nil )
2023-12-07 03:35:01 -08:00
require . Empty ( t , s )
2022-09-20 10:05:50 -07:00
verifySamples ( t , db , allSamples )
require . Nil ( t , db . head . wbl )
// Increase time window and try adding again.
err := db . ApplyConfig ( makeConfig ( 60 ) )
require . NoError ( t , err )
allSamples = addSamples ( t , db , 251 , 260 , true , allSamples )
// WBL gets created.
require . NotNil ( t , db . head . wbl )
verifySamples ( t , db , allSamples )
// OOO compaction works now.
doOOOCompaction ( t , db )
verifySamples ( t , db , allSamples )
} )
t . Run ( "enabled to disabled" , func ( t * testing . T ) {
2023-08-24 06:21:17 -07:00
var allSamples [ ] chunks . Sample
2022-09-20 10:05:50 -07:00
db := getDB ( 60 * time . Minute . Milliseconds ( ) )
// In-order.
allSamples = addSamples ( t , db , 300 , 310 , true , allSamples )
// OOO upto 59m old is success.
allSamples = addSamples ( t , db , 251 , 260 , true , allSamples )
oldWblPtr := fmt . Sprintf ( "%p" , db . head . wbl )
// Time Window to 0, hence disabled.
err := db . ApplyConfig ( makeConfig ( 0 ) )
require . NoError ( t , err )
// OOO within old time window fails.
s := addSamples ( t , db , 290 , 309 , false , nil )
2023-12-07 03:35:01 -08:00
require . Empty ( t , s )
2022-09-20 10:05:50 -07:00
// WBL does not change and is not removed.
newWblPtr := fmt . Sprintf ( "%p" , db . head . wbl )
require . Equal ( t , oldWblPtr , newWblPtr )
verifySamples ( t , db , allSamples )
// Compaction still works after disabling with WBL cleanup.
doOOOCompaction ( t , db )
verifySamples ( t , db , allSamples )
} )
t . Run ( "disabled to disabled" , func ( t * testing . T ) {
2023-08-24 06:21:17 -07:00
var allSamples [ ] chunks . Sample
2022-09-20 10:05:50 -07:00
db := getDB ( 0 )
// In-order.
allSamples = addSamples ( t , db , 300 , 310 , true , allSamples )
// OOO fails.
s := addSamples ( t , db , 290 , 309 , false , nil )
2023-12-07 03:35:01 -08:00
require . Empty ( t , s )
2022-09-20 10:05:50 -07:00
verifySamples ( t , db , allSamples )
require . Nil ( t , db . head . wbl )
// Time window to 0.
err := db . ApplyConfig ( makeConfig ( 0 ) )
require . NoError ( t , err )
// OOO still fails.
s = addSamples ( t , db , 290 , 309 , false , nil )
2023-12-07 03:35:01 -08:00
require . Empty ( t , s )
2022-09-20 10:05:50 -07:00
verifySamples ( t , db , allSamples )
require . Nil ( t , db . head . wbl )
} )
}
func TestNoGapAfterRestartWithOOO ( t * testing . T ) {
series1 := labels . FromStrings ( "foo" , "bar1" )
addSamples := func ( t * testing . T , db * DB , fromMins , toMins int64 , success bool ) {
app := db . Appender ( context . Background ( ) )
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
_ , err := app . Append ( 0 , series1 , ts , float64 ( ts ) )
if success {
require . NoError ( t , err )
} else {
require . Error ( t , err )
}
}
require . NoError ( t , app . Commit ( ) )
}
verifySamples := func ( t * testing . T , db * DB , fromMins , toMins int64 ) {
2023-08-24 06:21:17 -07:00
var expSamples [ ] chunks . Sample
2022-09-20 10:05:50 -07:00
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
2023-03-30 10:50:13 -07:00
expSamples = append ( expSamples , sample { t : ts , f : float64 ( ts ) } )
2022-09-20 10:05:50 -07:00
}
2023-08-24 06:21:17 -07:00
expRes := map [ string ] [ ] chunks . Sample {
2022-09-20 10:05:50 -07:00
series1 . String ( ) : expSamples ,
}
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( math . MinInt64 , math . MaxInt64 )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
actRes := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "foo" , "bar.*" ) )
require . Equal ( t , expRes , actRes )
}
cases := [ ] struct {
inOrderMint , inOrderMaxt int64
oooMint , oooMaxt int64
// After compaction.
blockRanges [ ] [ 2 ] int64
headMint , headMaxt int64
} {
{
300 , 490 ,
489 , 489 ,
[ ] [ 2 ] int64 { { 300 , 360 } , { 480 , 600 } } ,
360 , 490 ,
} ,
{
300 , 490 ,
479 , 479 ,
[ ] [ 2 ] int64 { { 300 , 360 } , { 360 , 480 } } ,
360 , 490 ,
} ,
}
for i , c := range cases {
t . Run ( fmt . Sprintf ( "case=%d" , i ) , func ( t * testing . T ) {
dir := t . TempDir ( )
2023-09-13 08:45:06 -07:00
ctx := context . Background ( )
2022-09-20 10:05:50 -07:00
opts := DefaultOptions ( )
opts . OutOfOrderTimeWindow = 30 * time . Minute . Milliseconds ( )
db , err := Open ( dir , nil , nil , opts , nil )
require . NoError ( t , err )
db . DisableCompactions ( )
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
// 3h10m=190m worth in-order data.
addSamples ( t , db , c . inOrderMint , c . inOrderMaxt , true )
verifySamples ( t , db , c . inOrderMint , c . inOrderMaxt )
// One ooo samples.
addSamples ( t , db , c . oooMint , c . oooMaxt , true )
verifySamples ( t , db , c . inOrderMint , c . inOrderMaxt )
// We get 2 blocks. 1 from OOO, 1 from in-order.
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2022-09-20 10:05:50 -07:00
verifyBlockRanges := func ( ) {
blocks := db . Blocks ( )
require . Equal ( t , len ( c . blockRanges ) , len ( blocks ) )
for j , br := range c . blockRanges {
require . Equal ( t , br [ 0 ] * time . Minute . Milliseconds ( ) , blocks [ j ] . MinTime ( ) )
require . Equal ( t , br [ 1 ] * time . Minute . Milliseconds ( ) , blocks [ j ] . MaxTime ( ) )
}
}
verifyBlockRanges ( )
require . Equal ( t , c . headMint * time . Minute . Milliseconds ( ) , db . head . MinTime ( ) )
require . Equal ( t , c . headMaxt * time . Minute . Milliseconds ( ) , db . head . MaxTime ( ) )
// Restart and expect all samples to be present.
require . NoError ( t , db . Close ( ) )
db , err = Open ( dir , nil , nil , opts , nil )
require . NoError ( t , err )
db . DisableCompactions ( )
verifyBlockRanges ( )
require . Equal ( t , c . headMint * time . Minute . Milliseconds ( ) , db . head . MinTime ( ) )
require . Equal ( t , c . headMaxt * time . Minute . Milliseconds ( ) , db . head . MaxTime ( ) )
verifySamples ( t , db , c . inOrderMint , c . inOrderMaxt )
} )
}
}
func TestWblReplayAfterOOODisableAndRestart ( t * testing . T ) {
dir := t . TempDir ( )
opts := DefaultOptions ( )
opts . OutOfOrderTimeWindow = 60 * time . Minute . Milliseconds ( )
db , err := Open ( dir , nil , nil , opts , nil )
require . NoError ( t , err )
db . DisableCompactions ( )
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
series1 := labels . FromStrings ( "foo" , "bar1" )
2023-08-24 06:21:17 -07:00
var allSamples [ ] chunks . Sample
2022-09-20 10:05:50 -07:00
addSamples := func ( fromMins , toMins int64 ) {
app := db . Appender ( context . Background ( ) )
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
_ , err := app . Append ( 0 , series1 , ts , float64 ( ts ) )
require . NoError ( t , err )
2023-03-30 10:50:13 -07:00
allSamples = append ( allSamples , sample { t : ts , f : float64 ( ts ) } )
2022-09-20 10:05:50 -07:00
}
require . NoError ( t , app . Commit ( ) )
}
// In-order samples.
addSamples ( 290 , 300 )
// OOO samples.
addSamples ( 250 , 260 )
2023-08-24 06:21:17 -07:00
verifySamples := func ( expSamples [ ] chunks . Sample ) {
2022-09-20 10:05:50 -07:00
sort . Slice ( expSamples , func ( i , j int ) bool {
return expSamples [ i ] . T ( ) < expSamples [ j ] . T ( )
} )
2023-08-24 06:21:17 -07:00
expRes := map [ string ] [ ] chunks . Sample {
2022-09-20 10:05:50 -07:00
series1 . String ( ) : expSamples ,
}
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( math . MinInt64 , math . MaxInt64 )
2022-09-20 10:05:50 -07:00
require . NoError ( t , err )
actRes := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "foo" , "bar.*" ) )
require . Equal ( t , expRes , actRes )
}
verifySamples ( allSamples )
// Restart DB with OOO disabled.
require . NoError ( t , db . Close ( ) )
opts . OutOfOrderTimeWindow = 0
db , err = Open ( db . dir , nil , nil , opts , nil )
require . NoError ( t , err )
// We can still query OOO samples when OOO is disabled.
verifySamples ( allSamples )
}
func TestPanicOnApplyConfig ( t * testing . T ) {
dir := t . TempDir ( )
opts := DefaultOptions ( )
opts . OutOfOrderTimeWindow = 60 * time . Minute . Milliseconds ( )
db , err := Open ( dir , nil , nil , opts , nil )
require . NoError ( t , err )
db . DisableCompactions ( )
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
series1 := labels . FromStrings ( "foo" , "bar1" )
2023-08-24 06:21:17 -07:00
var allSamples [ ] chunks . Sample
2022-09-20 10:05:50 -07:00
addSamples := func ( fromMins , toMins int64 ) {
app := db . Appender ( context . Background ( ) )
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
_ , err := app . Append ( 0 , series1 , ts , float64 ( ts ) )
require . NoError ( t , err )
2023-03-30 10:50:13 -07:00
allSamples = append ( allSamples , sample { t : ts , f : float64 ( ts ) } )
2022-09-20 10:05:50 -07:00
}
require . NoError ( t , app . Commit ( ) )
}
// In-order samples.
addSamples ( 290 , 300 )
// OOO samples.
addSamples ( 250 , 260 )
// Restart DB with OOO disabled.
require . NoError ( t , db . Close ( ) )
opts . OutOfOrderTimeWindow = 0
db , err = Open ( db . dir , nil , prometheus . NewRegistry ( ) , opts , nil )
require . NoError ( t , err )
// ApplyConfig with OOO enabled and expect no panic.
err = db . ApplyConfig ( & config . Config {
StorageConfig : config . StorageConfig {
TSDBConfig : & config . TSDBConfig {
OutOfOrderTimeWindow : 60 * time . Minute . Milliseconds ( ) ,
} ,
} ,
} )
require . NoError ( t , err )
}
func TestDiskFillingUpAfterDisablingOOO ( t * testing . T ) {
dir := t . TempDir ( )
2023-09-13 08:45:06 -07:00
ctx := context . Background ( )
2022-09-20 10:05:50 -07:00
opts := DefaultOptions ( )
opts . OutOfOrderTimeWindow = 60 * time . Minute . Milliseconds ( )
db , err := Open ( dir , nil , nil , opts , nil )
require . NoError ( t , err )
db . DisableCompactions ( )
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
series1 := labels . FromStrings ( "foo" , "bar1" )
2023-08-24 06:21:17 -07:00
var allSamples [ ] chunks . Sample
2022-09-20 10:05:50 -07:00
addSamples := func ( fromMins , toMins int64 ) {
app := db . Appender ( context . Background ( ) )
for min := fromMins ; min <= toMins ; min ++ {
ts := min * time . Minute . Milliseconds ( )
_ , err := app . Append ( 0 , series1 , ts , float64 ( ts ) )
require . NoError ( t , err )
2023-03-30 10:50:13 -07:00
allSamples = append ( allSamples , sample { t : ts , f : float64 ( ts ) } )
2022-09-20 10:05:50 -07:00
}
require . NoError ( t , app . Commit ( ) )
}
// In-order samples.
addSamples ( 290 , 300 )
// OOO samples.
addSamples ( 250 , 299 )
// Restart DB with OOO disabled.
require . NoError ( t , db . Close ( ) )
opts . OutOfOrderTimeWindow = 0
db , err = Open ( db . dir , nil , prometheus . NewRegistry ( ) , opts , nil )
require . NoError ( t , err )
db . DisableCompactions ( )
ms := db . head . series . getByHash ( series1 . Hash ( ) , series1 )
2023-12-07 03:35:01 -08:00
require . NotEmpty ( t , ms . ooo . oooMmappedChunks , "OOO mmap chunk was not replayed" )
2022-09-20 10:05:50 -07:00
checkMmapFileContents := func ( contains , notContains [ ] string ) {
mmapDir := mmappedChunksDir ( db . head . opts . ChunkDirRoot )
files , err := os . ReadDir ( mmapDir )
require . NoError ( t , err )
fnames := make ( [ ] string , 0 , len ( files ) )
for _ , f := range files {
fnames = append ( fnames , f . Name ( ) )
}
for _ , f := range contains {
require . Contains ( t , fnames , f )
}
for _ , f := range notContains {
require . NotContains ( t , fnames , f )
}
}
// Add in-order samples until ready for compaction..
addSamples ( 301 , 500 )
// Check that m-map files gets deleted properly after compactions.
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
db . head . mmapHeadChunks ( )
2022-09-20 10:05:50 -07:00
checkMmapFileContents ( [ ] string { "000001" , "000002" } , nil )
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2022-09-20 10:05:50 -07:00
checkMmapFileContents ( [ ] string { "000002" } , [ ] string { "000001" } )
2022-12-28 02:19:41 -08:00
require . Nil ( t , ms . ooo , "OOO mmap chunk was not compacted" )
2022-09-20 10:05:50 -07:00
addSamples ( 501 , 650 )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
db . head . mmapHeadChunks ( )
2022-09-20 10:05:50 -07:00
checkMmapFileContents ( [ ] string { "000002" , "000003" } , [ ] string { "000001" } )
2023-09-13 08:45:06 -07:00
require . NoError ( t , db . Compact ( ctx ) )
2022-09-20 10:05:50 -07:00
checkMmapFileContents ( nil , [ ] string { "000001" , "000002" , "000003" } )
// Verify that WBL is empty.
files , err := os . ReadDir ( db . head . wbl . Dir ( ) )
require . NoError ( t , err )
require . Len ( t , files , 1 ) // Last empty file after compaction.
finfo , err := files [ 0 ] . Info ( )
require . NoError ( t , err )
require . Equal ( t , int64 ( 0 ) , finfo . Size ( ) )
2022-07-19 01:58:52 -07:00
}
2022-08-29 03:05:03 -07:00
func TestHistogramAppendAndQuery ( t * testing . T ) {
2022-12-28 00:55:07 -08:00
t . Run ( "integer histograms" , func ( t * testing . T ) {
testHistogramAppendAndQueryHelper ( t , false )
} )
t . Run ( "float histograms" , func ( t * testing . T ) {
testHistogramAppendAndQueryHelper ( t , true )
} )
}
func testHistogramAppendAndQueryHelper ( t * testing . T , floatHistogram bool ) {
t . Helper ( )
2022-08-29 03:05:03 -07:00
db := openTestDB ( t , nil , nil )
minute := func ( m int ) int64 { return int64 ( m ) * time . Minute . Milliseconds ( ) }
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
ctx := context . Background ( )
2023-01-18 08:59:29 -08:00
appendHistogram := func (
lbls labels . Labels , tsMinute int , h * histogram . Histogram ,
2023-08-24 06:21:17 -07:00
exp * [ ] chunks . Sample , expCRH histogram . CounterResetHint ,
2023-01-18 08:59:29 -08:00
) {
2022-08-29 03:05:03 -07:00
t . Helper ( )
2022-12-28 00:55:07 -08:00
var err error
2022-08-29 03:05:03 -07:00
app := db . Appender ( ctx )
2022-12-28 00:55:07 -08:00
if floatHistogram {
2023-11-29 06:15:57 -08:00
_ , err = app . AppendHistogram ( 0 , lbls , minute ( tsMinute ) , nil , h . ToFloat ( nil ) )
efh := h . ToFloat ( nil )
2023-01-18 08:59:29 -08:00
efh . CounterResetHint = expCRH
* exp = append ( * exp , sample { t : minute ( tsMinute ) , fh : efh } )
2022-12-28 00:55:07 -08:00
} else {
_ , err = app . AppendHistogram ( 0 , lbls , minute ( tsMinute ) , h . Copy ( ) , nil )
2023-01-18 08:59:29 -08:00
eh := h . Copy ( )
eh . CounterResetHint = expCRH
* exp = append ( * exp , sample { t : minute ( tsMinute ) , h : eh } )
2022-12-28 00:55:07 -08:00
}
2022-08-29 03:05:03 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
}
2023-08-24 06:21:17 -07:00
appendFloat := func ( lbls labels . Labels , tsMinute int , val float64 , exp * [ ] chunks . Sample ) {
2022-08-29 03:05:03 -07:00
t . Helper ( )
app := db . Appender ( ctx )
_ , err := app . Append ( 0 , lbls , minute ( tsMinute ) , val )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2023-03-30 10:50:13 -07:00
* exp = append ( * exp , sample { t : minute ( tsMinute ) , f : val } )
2022-08-29 03:05:03 -07:00
}
2023-08-24 06:21:17 -07:00
testQuery := func ( name , value string , exp map [ string ] [ ] chunks . Sample ) {
2022-08-29 03:05:03 -07:00
t . Helper ( )
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( math . MinInt64 , math . MaxInt64 )
2022-08-29 03:05:03 -07:00
require . NoError ( t , err )
act := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , name , value ) )
require . Equal ( t , exp , act )
}
baseH := & histogram . Histogram {
2023-08-22 12:51:56 -07:00
Count : 15 ,
2022-08-29 03:05:03 -07:00
ZeroCount : 4 ,
ZeroThreshold : 0.001 ,
Sum : 35.5 ,
Schema : 1 ,
PositiveSpans : [ ] histogram . Span {
{ Offset : 0 , Length : 2 } ,
{ Offset : 2 , Length : 2 } ,
} ,
PositiveBuckets : [ ] int64 { 1 , 1 , - 1 , 0 } ,
NegativeSpans : [ ] histogram . Span {
{ Offset : 0 , Length : 1 } ,
{ Offset : 1 , Length : 2 } ,
} ,
NegativeBuckets : [ ] int64 { 1 , 2 , - 1 } ,
}
var (
series1 = labels . FromStrings ( "foo" , "bar1" )
series2 = labels . FromStrings ( "foo" , "bar2" )
series3 = labels . FromStrings ( "foo" , "bar3" )
series4 = labels . FromStrings ( "foo" , "bar4" )
2023-08-24 06:21:17 -07:00
exp1 , exp2 , exp3 , exp4 [ ] chunks . Sample
2022-08-29 03:05:03 -07:00
)
// TODO(codesome): test everything for negative buckets as well.
t . Run ( "series with only histograms" , func ( t * testing . T ) {
h := baseH . Copy ( ) // This is shared across all sub tests.
2023-01-18 08:59:29 -08:00
appendHistogram ( series1 , 100 , h , & exp1 , histogram . UnknownCounterReset )
2023-08-24 06:21:17 -07:00
testQuery ( "foo" , "bar1" , map [ string ] [ ] chunks . Sample { series1 . String ( ) : exp1 } )
2022-08-29 03:05:03 -07:00
h . PositiveBuckets [ 0 ] ++
h . NegativeBuckets [ 0 ] += 2
h . Count += 10
2023-01-18 08:59:29 -08:00
appendHistogram ( series1 , 101 , h , & exp1 , histogram . NotCounterReset )
2023-08-24 06:21:17 -07:00
testQuery ( "foo" , "bar1" , map [ string ] [ ] chunks . Sample { series1 . String ( ) : exp1 } )
2022-08-29 03:05:03 -07:00
t . Run ( "changing schema" , func ( t * testing . T ) {
h . Schema = 2
2023-01-18 08:59:29 -08:00
appendHistogram ( series1 , 102 , h , & exp1 , histogram . UnknownCounterReset )
2023-08-24 06:21:17 -07:00
testQuery ( "foo" , "bar1" , map [ string ] [ ] chunks . Sample { series1 . String ( ) : exp1 } )
2022-08-29 03:05:03 -07:00
// Schema back to old.
h . Schema = 1
2023-01-18 08:59:29 -08:00
appendHistogram ( series1 , 103 , h , & exp1 , histogram . UnknownCounterReset )
2023-08-24 06:21:17 -07:00
testQuery ( "foo" , "bar1" , map [ string ] [ ] chunks . Sample { series1 . String ( ) : exp1 } )
2022-08-29 03:05:03 -07:00
} )
t . Run ( "new buckets incoming" , func ( t * testing . T ) {
2022-10-11 09:35:35 -07:00
// In the previous unit test, during the last histogram append, we
// changed the schema and that caused a new chunk creation. Because
// of the next append the layout of the last histogram will change
// because the chunk will be re-encoded. So this forces us to modify
// the last histogram in exp1 so when we query we get the expected
// results.
2022-12-28 00:55:07 -08:00
if floatHistogram {
lh := exp1 [ len ( exp1 ) - 1 ] . FH ( ) . Copy ( )
lh . PositiveSpans [ 1 ] . Length ++
lh . PositiveBuckets = append ( lh . PositiveBuckets , 0 )
exp1 [ len ( exp1 ) - 1 ] = sample { t : exp1 [ len ( exp1 ) - 1 ] . T ( ) , fh : lh }
} else {
lh := exp1 [ len ( exp1 ) - 1 ] . H ( ) . Copy ( )
lh . PositiveSpans [ 1 ] . Length ++
lh . PositiveBuckets = append ( lh . PositiveBuckets , - 2 ) // -2 makes the last bucket 0.
exp1 [ len ( exp1 ) - 1 ] = sample { t : exp1 [ len ( exp1 ) - 1 ] . T ( ) , h : lh }
}
2022-10-11 09:35:35 -07:00
2022-08-29 03:05:03 -07:00
// This histogram with new bucket at the end causes the re-encoding of the previous histogram.
// Hence the previous histogram is recoded into this new layout.
// But the query returns the histogram from the in-memory buffer, hence we don't see the recode here yet.
h . PositiveSpans [ 1 ] . Length ++
h . PositiveBuckets = append ( h . PositiveBuckets , 1 )
h . Count += 3
2023-01-18 08:59:29 -08:00
appendHistogram ( series1 , 104 , h , & exp1 , histogram . NotCounterReset )
2023-08-24 06:21:17 -07:00
testQuery ( "foo" , "bar1" , map [ string ] [ ] chunks . Sample { series1 . String ( ) : exp1 } )
2022-08-29 03:05:03 -07:00
2022-10-11 09:35:35 -07:00
// Because of the previous two histograms being on the active chunk,
// and the next append is only adding a new bucket, the active chunk
// will be re-encoded to the new layout.
2022-12-28 00:55:07 -08:00
if floatHistogram {
lh := exp1 [ len ( exp1 ) - 2 ] . FH ( ) . Copy ( )
lh . PositiveSpans [ 0 ] . Length ++
lh . PositiveSpans [ 1 ] . Offset --
lh . PositiveBuckets = [ ] float64 { 2 , 3 , 0 , 2 , 2 , 0 }
exp1 [ len ( exp1 ) - 2 ] = sample { t : exp1 [ len ( exp1 ) - 2 ] . T ( ) , fh : lh }
lh = exp1 [ len ( exp1 ) - 1 ] . FH ( ) . Copy ( )
lh . PositiveSpans [ 0 ] . Length ++
lh . PositiveSpans [ 1 ] . Offset --
lh . PositiveBuckets = [ ] float64 { 2 , 3 , 0 , 2 , 2 , 3 }
exp1 [ len ( exp1 ) - 1 ] = sample { t : exp1 [ len ( exp1 ) - 1 ] . T ( ) , fh : lh }
} else {
lh := exp1 [ len ( exp1 ) - 2 ] . H ( ) . Copy ( )
lh . PositiveSpans [ 0 ] . Length ++
lh . PositiveSpans [ 1 ] . Offset --
lh . PositiveBuckets = [ ] int64 { 2 , 1 , - 3 , 2 , 0 , - 2 }
exp1 [ len ( exp1 ) - 2 ] = sample { t : exp1 [ len ( exp1 ) - 2 ] . T ( ) , h : lh }
lh = exp1 [ len ( exp1 ) - 1 ] . H ( ) . Copy ( )
lh . PositiveSpans [ 0 ] . Length ++
lh . PositiveSpans [ 1 ] . Offset --
lh . PositiveBuckets = [ ] int64 { 2 , 1 , - 3 , 2 , 0 , 1 }
exp1 [ len ( exp1 ) - 1 ] = sample { t : exp1 [ len ( exp1 ) - 1 ] . T ( ) , h : lh }
}
2022-10-11 09:35:35 -07:00
2022-08-29 03:05:03 -07:00
// Now we add the new buckets in between. Empty bucket is again not present for the old histogram.
h . PositiveSpans [ 0 ] . Length ++
h . PositiveSpans [ 1 ] . Offset --
h . Count += 3
// {2, 1, -1, 0, 1} -> {2, 1, 0, -1, 0, 1}
h . PositiveBuckets = append ( h . PositiveBuckets [ : 2 ] , append ( [ ] int64 { 0 } , h . PositiveBuckets [ 2 : ] ... ) ... )
2023-01-18 08:59:29 -08:00
appendHistogram ( series1 , 105 , h , & exp1 , histogram . NotCounterReset )
2023-08-24 06:21:17 -07:00
testQuery ( "foo" , "bar1" , map [ string ] [ ] chunks . Sample { series1 . String ( ) : exp1 } )
2022-08-29 03:05:03 -07:00
// We add 4 more histograms to clear out the buffer and see the re-encoded histograms.
2023-01-18 08:59:29 -08:00
appendHistogram ( series1 , 106 , h , & exp1 , histogram . NotCounterReset )
appendHistogram ( series1 , 107 , h , & exp1 , histogram . NotCounterReset )
appendHistogram ( series1 , 108 , h , & exp1 , histogram . NotCounterReset )
appendHistogram ( series1 , 109 , h , & exp1 , histogram . NotCounterReset )
2022-08-29 03:05:03 -07:00
// Update the expected histograms to reflect the re-encoding.
2022-12-28 00:55:07 -08:00
if floatHistogram {
l := len ( exp1 )
h7 := exp1 [ l - 7 ] . FH ( )
h7 . PositiveSpans = exp1 [ l - 1 ] . FH ( ) . PositiveSpans
h7 . PositiveBuckets = [ ] float64 { 2 , 3 , 0 , 2 , 2 , 0 }
exp1 [ l - 7 ] = sample { t : exp1 [ l - 7 ] . T ( ) , fh : h7 }
h6 := exp1 [ l - 6 ] . FH ( )
h6 . PositiveSpans = exp1 [ l - 1 ] . FH ( ) . PositiveSpans
h6 . PositiveBuckets = [ ] float64 { 2 , 3 , 0 , 2 , 2 , 3 }
exp1 [ l - 6 ] = sample { t : exp1 [ l - 6 ] . T ( ) , fh : h6 }
} else {
l := len ( exp1 )
h7 := exp1 [ l - 7 ] . H ( )
h7 . PositiveSpans = exp1 [ l - 1 ] . H ( ) . PositiveSpans
h7 . PositiveBuckets = [ ] int64 { 2 , 1 , - 3 , 2 , 0 , - 2 } // -3 and -2 are the empty buckets.
exp1 [ l - 7 ] = sample { t : exp1 [ l - 7 ] . T ( ) , h : h7 }
h6 := exp1 [ l - 6 ] . H ( )
h6 . PositiveSpans = exp1 [ l - 1 ] . H ( ) . PositiveSpans
h6 . PositiveBuckets = [ ] int64 { 2 , 1 , - 3 , 2 , 0 , 1 } // -3 is the empty bucket.
exp1 [ l - 6 ] = sample { t : exp1 [ l - 6 ] . T ( ) , h : h6 }
}
2022-08-29 03:05:03 -07:00
2023-08-24 06:21:17 -07:00
testQuery ( "foo" , "bar1" , map [ string ] [ ] chunks . Sample { series1 . String ( ) : exp1 } )
2022-08-29 03:05:03 -07:00
} )
t . Run ( "buckets disappearing" , func ( t * testing . T ) {
h . PositiveSpans [ 1 ] . Length --
h . PositiveBuckets = h . PositiveBuckets [ : len ( h . PositiveBuckets ) - 1 ]
2023-10-13 00:58:26 -07:00
h . Count -= 3
2023-01-18 08:59:29 -08:00
appendHistogram ( series1 , 110 , h , & exp1 , histogram . CounterReset )
2023-08-24 06:21:17 -07:00
testQuery ( "foo" , "bar1" , map [ string ] [ ] chunks . Sample { series1 . String ( ) : exp1 } )
2022-08-29 03:05:03 -07:00
} )
} )
t . Run ( "series starting with float and then getting histograms" , func ( t * testing . T ) {
appendFloat ( series2 , 100 , 100 , & exp2 )
appendFloat ( series2 , 101 , 101 , & exp2 )
appendFloat ( series2 , 102 , 102 , & exp2 )
2023-08-24 06:21:17 -07:00
testQuery ( "foo" , "bar2" , map [ string ] [ ] chunks . Sample { series2 . String ( ) : exp2 } )
2022-08-29 03:05:03 -07:00
h := baseH . Copy ( )
2023-01-18 08:59:29 -08:00
appendHistogram ( series2 , 103 , h , & exp2 , histogram . UnknownCounterReset )
appendHistogram ( series2 , 104 , h , & exp2 , histogram . NotCounterReset )
appendHistogram ( series2 , 105 , h , & exp2 , histogram . NotCounterReset )
2023-08-24 06:21:17 -07:00
testQuery ( "foo" , "bar2" , map [ string ] [ ] chunks . Sample { series2 . String ( ) : exp2 } )
2022-08-29 03:05:03 -07:00
// Switching between float and histograms again.
appendFloat ( series2 , 106 , 106 , & exp2 )
appendFloat ( series2 , 107 , 107 , & exp2 )
2023-08-24 06:21:17 -07:00
testQuery ( "foo" , "bar2" , map [ string ] [ ] chunks . Sample { series2 . String ( ) : exp2 } )
2022-08-29 03:05:03 -07:00
2023-01-18 08:59:29 -08:00
appendHistogram ( series2 , 108 , h , & exp2 , histogram . UnknownCounterReset )
appendHistogram ( series2 , 109 , h , & exp2 , histogram . NotCounterReset )
2023-08-24 06:21:17 -07:00
testQuery ( "foo" , "bar2" , map [ string ] [ ] chunks . Sample { series2 . String ( ) : exp2 } )
2022-08-29 03:05:03 -07:00
} )
t . Run ( "series starting with histogram and then getting float" , func ( t * testing . T ) {
h := baseH . Copy ( )
2023-01-18 08:59:29 -08:00
appendHistogram ( series3 , 101 , h , & exp3 , histogram . UnknownCounterReset )
appendHistogram ( series3 , 102 , h , & exp3 , histogram . NotCounterReset )
appendHistogram ( series3 , 103 , h , & exp3 , histogram . NotCounterReset )
2023-08-24 06:21:17 -07:00
testQuery ( "foo" , "bar3" , map [ string ] [ ] chunks . Sample { series3 . String ( ) : exp3 } )
2022-08-29 03:05:03 -07:00
appendFloat ( series3 , 104 , 100 , & exp3 )
appendFloat ( series3 , 105 , 101 , & exp3 )
appendFloat ( series3 , 106 , 102 , & exp3 )
2023-08-24 06:21:17 -07:00
testQuery ( "foo" , "bar3" , map [ string ] [ ] chunks . Sample { series3 . String ( ) : exp3 } )
2022-08-29 03:05:03 -07:00
// Switching between histogram and float again.
2023-01-18 08:59:29 -08:00
appendHistogram ( series3 , 107 , h , & exp3 , histogram . UnknownCounterReset )
appendHistogram ( series3 , 108 , h , & exp3 , histogram . NotCounterReset )
2023-08-24 06:21:17 -07:00
testQuery ( "foo" , "bar3" , map [ string ] [ ] chunks . Sample { series3 . String ( ) : exp3 } )
2022-08-29 03:05:03 -07:00
appendFloat ( series3 , 109 , 106 , & exp3 )
appendFloat ( series3 , 110 , 107 , & exp3 )
2023-08-24 06:21:17 -07:00
testQuery ( "foo" , "bar3" , map [ string ] [ ] chunks . Sample { series3 . String ( ) : exp3 } )
2022-08-29 03:05:03 -07:00
} )
t . Run ( "query mix of histogram and float series" , func ( t * testing . T ) {
// A float only series.
appendFloat ( series4 , 100 , 100 , & exp4 )
appendFloat ( series4 , 101 , 101 , & exp4 )
appendFloat ( series4 , 102 , 102 , & exp4 )
2023-08-24 06:21:17 -07:00
testQuery ( "foo" , "bar.*" , map [ string ] [ ] chunks . Sample {
2022-08-29 03:05:03 -07:00
series1 . String ( ) : exp1 ,
series2 . String ( ) : exp2 ,
series3 . String ( ) : exp3 ,
series4 . String ( ) : exp4 ,
} )
} )
}
2022-10-12 01:01:12 -07:00
func TestQueryHistogramFromBlocksWithCompaction ( t * testing . T ) {
2022-08-29 03:05:03 -07:00
minute := func ( m int ) int64 { return int64 ( m ) * time . Minute . Milliseconds ( ) }
testBlockQuerying := func ( t * testing . T , blockSeries ... [ ] storage . Series ) {
t . Helper ( )
opts := DefaultOptions ( )
db := openTestDB ( t , opts , nil )
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
2022-09-20 10:16:45 -07:00
var it chunkenc . Iterator
2023-08-24 06:21:17 -07:00
exp := make ( map [ string ] [ ] chunks . Sample )
2022-08-29 03:05:03 -07:00
for _ , series := range blockSeries {
createBlock ( t , db . Dir ( ) , series )
for _ , s := range series {
key := s . Labels ( ) . String ( )
2022-09-20 10:16:45 -07:00
it = s . Iterator ( it )
2022-08-29 03:05:03 -07:00
slice := exp [ key ]
for typ := it . Next ( ) ; typ != chunkenc . ValNone ; typ = it . Next ( ) {
switch typ {
case chunkenc . ValFloat :
ts , v := it . At ( )
2023-03-30 10:50:13 -07:00
slice = append ( slice , sample { t : ts , f : v } )
2022-08-29 03:05:03 -07:00
case chunkenc . ValHistogram :
remote write 2.0: sync with `main` branch (#13510)
* consoles: exclude iowait and steal from CPU Utilisation
'iowait' and 'steal' indicate specific idle/wait states, which shouldn't
be counted into CPU Utilisation. Also see
https://github.com/prometheus-operator/kube-prometheus/pull/796 and
https://github.com/kubernetes-monitoring/kubernetes-mixin/pull/667.
Per the iostat man page:
%idle
Show the percentage of time that the CPU or CPUs were idle and the
system did not have an outstanding disk I/O request.
%iowait
Show the percentage of time that the CPU or CPUs were idle during
which the system had an outstanding disk I/O request.
%steal
Show the percentage of time spent in involuntary wait by the
virtual CPU or CPUs while the hypervisor was servicing another
virtual processor.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
* tsdb: shrink txRing with smaller integers
4 billion active transactions ought to be enough for anyone.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: create isolation transaction slice on demand
When Prometheus restarts it creates every series read in from the WAL,
but many of those series will be finished, and never receive any more
samples. By defering allocation of the txRing slice to when it is first
needed, we save 32 bytes per stale series.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* add cluster variable to Overview dashboard
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* promql: simplify Native Histogram arithmetics
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
* Cut 2.49.0-rc.0 (#13270)
* Cut 2.49.0-rc.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Removed the duplicate.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Add unit protobuf parser
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Go on adding protobuf parsing for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* ui: create a reproduction for https://github.com/prometheus/prometheus/issues/13292
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Get conditional right
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Get VM Scale Set NIC (#13283)
Calling `*armnetwork.InterfacesClient.Get()` doesn't work for Scale Set
VM NIC, because these use a different Resource ID format.
Use `*armnetwork.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface()`
instead. This needs both the scale set name and the instance ID, so
add an `InstanceID` field to the `virtualMachine` struct. `InstanceID`
is empty for a VM that isn't a ScaleSetVM.
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
* Cut v2.49.0-rc.1
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Delete debugging lines, amend error message for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Correct order in error message
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Consider storage.ErrTooOldSample as non-retryable
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
* scrape_test.go: Increase scrape interval in TestScrapeLoopCache to reduce potential flakiness
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Avoid creating string for suffix, consider counters without _total suffix
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* build(deps): bump github.com/prometheus/client_golang
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.17.0 to 1.18.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0)
---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump actions/setup-node from 3.8.1 to 4.0.1
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 3.8.1 to 4.0.1.
- [Release notes](https://github.com/actions/setup-node/releases)
- [Commits](https://github.com/actions/setup-node/compare/5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d...b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8)
---
updated-dependencies:
- dependency-name: actions/setup-node
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
* scripts: sort file list in embed directive
Otherwise the resulting string depends on find, which afaict depends on
the underlying filesystem. A stable file list make it easier to detect
UI changes in downstreams that need to track UI assets.
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
* Fix DataTableProps['data'] for resultType string
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* Fix handling of scalar and string in isHeatmapData
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* build(deps): bump github.com/influxdata/influxdb
Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.2 to 1.11.4.
- [Release notes](https://github.com/influxdata/influxdb/releases)
- [Commits](https://github.com/influxdata/influxdb/compare/v1.11.2...v1.11.4)
---
updated-dependencies:
- dependency-name: github.com/influxdata/influxdb
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump github.com/prometheus/prometheus
Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.48.0 to 0.48.1.
- [Release notes](https://github.com/prometheus/prometheus/releases)
- [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/prometheus/compare/v0.48.0...v0.48.1)
---
updated-dependencies:
- dependency-name: github.com/prometheus/prometheus
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* Bump client_golang to v1.18.0 (#13373)
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Drop old inmemory samples (#13002)
* Drop old inmemory samples
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Avoid copying timeseries when the feature is disabled
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Run gofmt
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Clarify docs
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Add more logging info
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Remove loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* optimize function and add tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Simplify filter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Update help info from metrics
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use metrics to keep track of drop elements during buildWriteRequest
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var in tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* pass time.Now as parameter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Change buildwriterequest during retries
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Revert "Remove loggers"
This reverts commit 54f91dfcae20488944162335ab4ad8be459df1ab.
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use log level debug for loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Fix linter
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove noisy debug-level logs; add 'reason' label to drop metrics
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove accidentally committed files
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Propagate logger to buildWriteRequest to log dropped data
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix docs comment
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Make drop reason more specific
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove unnecessary pass of logger
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Use snake_case for reason label
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix dropped samples metric
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
---------
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
* fix(discovery): allow requireUpdate util to timeout in discovery/file/file_test.go.
The loop ran indefinitely if the condition isn't met.
Before, each iteration created a new timer channel which was always outpaced by
the other timer channel with smaller duration.
minor detail: There was a memory leak: resources of the ~10 previous timers were
constantly kept. With the fix, we may keep the resources of one timer around for defaultWait
but this isn't worth the changes to make it right.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Merge pull request #13371 from kevinmingtarja/fix-isHeatmapData
ui: fix handling of scalar and string in isHeatmapData
* tsdb/{index,compact}: allow using custom postings encoding format (#13242)
* tsdb/{index,compact}: allow using custom postings encoding format
We would like to experiment with a different postings encoding format in
Thanos so in this change I am proposing adding another argument to
`NewWriter` which would allow users to change the format if needed.
Also, wire the leveled compactor so that it would be possible to change
the format there too.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb/compact: use a struct for leveled compactor options
As discussed on Slack, let's use a struct for the options in leveled
compactor.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: make changes after Bryan's review
- Make changes less intrusive
- Turn the postings encoder type into a function
- Add NewWriterWithEncoder()
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
---------
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0-rc.2
Signed-off-by: bwplotka <bwplotka@gmail.com>
* build(deps): bump actions/setup-go from 3.5.0 to 5.0.0 in /scripts (#13362)
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3.5.0 to 5.0.0.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/6edd4406fa81c3da01a34fa6f6343087c207a568...0c52d547c9bc32b1aa3301fd7a9cb496313a4491)
---
updated-dependencies:
- dependency-name: actions/setup-go
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump github/codeql-action from 2.22.8 to 3.22.12 (#13358)
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.22.8 to 3.22.12.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/407ffafae6a767df3e0230c3df91b6443ae8df75...012739e5082ff0c22ca6d6ab32e07c36df03c4a4)
---
updated-dependencies:
- dependency-name: github/codeql-action
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* put @nexucis has a release shepherd (#13383)
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
* Add analyze histograms command to promtool (#12331)
Add `query analyze` command to promtool
This command analyzes the buckets of classic and native histograms,
based on data queried from the Prometheus query API, i.e. it
doesn't require direct access to the TSDB files.
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* included instance in all necessary descriptions
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* tsdb/compact: fix passing merge func
Fixing a very small logical problem I've introduced :(.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: add enable overlapping compaction
This functionality is needed in downstream projects because they have a
separate component that does compaction.
Upstreaming
https://github.com/grafana/mimir-prometheus/blob/7c8e9a2a76fc729e9078889782928b2fdfe240e9/tsdb/compact.go#L323-L325.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* promtool: allow setting multiple matchers to "promtool tsdb dump" command. (#13296)
Conditions are ANDed inside the same matcher but matchers are ORed
Including unit tests for "promtool tsdb dump".
Refactor some matchers scraping utils.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Fixed changelog
Signed-off-by: bwplotka <bwplotka@gmail.com>
* tsdb/main: wire "EnableOverlappingCompaction" to tsdb.Options (#13398)
This added the https://github.com/prometheus/prometheus/pull/13393
"EnableOverlappingCompaction" parameter to the compactor code but not to
the tsdb.Options. I forgot about that. Add it to `tsdb.Options` too and
set it to `true` in Prometheus.
Copy/paste the description from
https://github.com/prometheus/prometheus/pull/13393#issuecomment-1891787986
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Issue #13268: fix quality value in accept header
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
* Cut 2.49.1 with scrape q= bugfix.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Cut 2.49.1 web package.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Restore more efficient version of NewPossibleNonCounterInfo annotation (#13022)
Restore more efficient version of NewPossibleNonCounterInfo annotation
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Fix regressions introduced by #13242
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* fix slice copy in 1.20 (#13389)
The slices package is added to the standard library in Go 1.21;
we need to import from the exp area to maintain compatibility with Go 1.20.
Signed-off-by: tyltr <tylitianrui@126.com>
* Docs: Query Basics: link to rate (#10538)
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
* chore(kubernetes): check preconditions earlier and avoid unnecessary checks or iterations
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Examples: link to `rate` for new users (#10535)
* Examples: link to `rate` for new users
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
* promql: use natural sort in sort_by_label and sort_by_label_desc (#13411)
These functions are intended for humans, as robots can already sort the results
however they please. Humans like things sorted "naturally":
* https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
A similar thing has been done to Grafana, which is also used by humans:
* https://github.com/grafana/grafana/pull/78024
* https://github.com/grafana/grafana/pull/78494
Signed-off-by: Ivan Babrou <github@ivan.computer>
* TestLabelValuesWithMatchers: Add test case
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* remove obsolete build tag
Signed-off-by: tyltr <tylitianrui@126.com>
* Upgrade some golang dependencies for resty 2.11
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
* Native Histograms: support `native_histogram_min_bucket_factor` in scrape_config (#13222)
Native Histograms: support native_histogram_min_bucket_factor in scrape_config
---------
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram (#13392)
Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Minor fixes to otlp vendor update script
Signed-off-by: Goutham <gouthamve@gmail.com>
* build(deps): bump github.com/hetznercloud/hcloud-go/v2
Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.4.0 to 2.6.0.
- [Release notes](https://github.com/hetznercloud/hcloud-go/releases)
- [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.4.0...v2.6.0)
---
updated-dependencies:
- dependency-name: github.com/hetznercloud/hcloud-go/v2
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* Enhanced visibility for `promtool test rules` with JSON colored formatting (#13342)
* Added diff flag for unit test to improvise readability & debugging
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Removed blank spaces
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed linting error
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Added cli flags to documentation
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Revert unrrelated linting fixes
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed review suggestions
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Cleanup
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
---------
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* storage: skip merging when no remote storage configured
Prometheus is hard-coded to use a fanout storage between TSDB and
a remote storage which by default is empty.
This change detects the empty storage and skips merging between
result sets, which would make `Select()` sort results.
Bottom line: we skip a sort unless there really is some remote storage
configured.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Remove csmarchbanks from remote write owners (#13432)
I have not had the time to keep up with remote write and have no plans
to work on it in the near future so I am withdrawing my maintainership
of that part of the codebase. I continue to focus on client_python.
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
* add more context cancellation check at evaluation time
Signed-off-by: Ben Ye <benye@amazon.com>
* Optimize label values with matchers by taking shortcuts (#13426)
Don't calculate postings beforehand: we may not need them. If all
matchers are for the requested label, we can just filter its values.
Also, if there are no values at all, no need to run any kind of
logic.
Also add more labelValuesWithMatchers benchmarks
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Add automatic memory limit handling
Enable automatic detection of memory limits and configure GOMEMLIMIT to
match.
* Also includes a flag to allow controlling the reserved ratio.
Signed-off-by: SuperQ <superq@gmail.com>
* Update OSSF badge link (#13433)
Provide a more user friendly interface
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
* SD Managers taking over responsibility for registration of debug metrics (#13375)
SD Managers take over responsibility for SD metrics registration
---------
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Optimize histogram iterators (#13340)
Optimize histogram iterators
Histogram iterators allocate new objects in the AtHistogram and
AtFloatHistogram methods, which makes calculating rates over long
ranges expensive.
In #13215 we allowed an existing object to be reused
when converting an integer histogram to a float histogram. This commit follows
the same idea and allows injecting an existing object in the AtHistogram and
AtFloatHistogram methods. When the injected value is nil, iterators allocate
new histograms, otherwise they populate and return the injected object.
The commit also adds a CopyTo method to Histogram and FloatHistogram which
is used in the BufferedIterator to overwrite items in the ring instead of making
new copies.
Note that a specialized HPoint pool is needed for all of this to work
(`matrixSelectorHPool`).
---------
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
* doc: Mark `mad_over_time` as experimental (#13440)
We forgot to do that in
https://github.com/prometheus/prometheus/pull/13059
Signed-off-by: beorn7 <beorn@grafana.com>
* Change metric label for Puppetdb from 'http' to 'puppetdb'
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
* mirror metrics.proto change & generate code
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
* TestHeadLabelValuesWithMatchers: Add test case (#13414)
Add test case to TestHeadLabelValuesWithMatchers, while fixing a couple
of typos in other test cases. Also enclosing some implicit sub-tests in a
`t.Run` call to make them explicitly sub-tests.
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* update all go dependencies (#13438)
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
* build(deps): bump the k8s-io group with 2 updates (#13454)
Bumps the k8s-io group with 2 updates: [k8s.io/api](https://github.com/kubernetes/api) and [k8s.io/client-go](https://github.com/kubernetes/client-go).
Updates `k8s.io/api` from 0.28.4 to 0.29.1
- [Commits](https://github.com/kubernetes/api/compare/v0.28.4...v0.29.1)
Updates `k8s.io/client-go` from 0.28.4 to 0.29.1
- [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/kubernetes/client-go/compare/v0.28.4...v0.29.1)
---
updated-dependencies:
- dependency-name: k8s.io/api
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
- dependency-name: k8s.io/client-go
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump the go-opentelemetry-io group with 1 update (#13453)
Bumps the go-opentelemetry-io group with 1 update: [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector).
Updates `go.opentelemetry.io/collector/semconv` from 0.92.0 to 0.93.0
- [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.92.0...v0.93.0)
---
updated-dependencies:
- dependency-name: go.opentelemetry.io/collector/semconv
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: go-opentelemetry-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump actions/upload-artifact from 3.1.3 to 4.0.0 (#13355)
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3.1.3 to 4.0.0.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/a8a3f3ad30e3422c9c7b888a15615d19a852ae32...c7d193f32edcb7bfad88892161225aeda64e9392)
---
updated-dependencies:
- dependency-name: actions/upload-artifact
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump bufbuild/buf-push-action (#13357)
Bumps [bufbuild/buf-push-action](https://github.com/bufbuild/buf-push-action) from 342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 to a654ff18effe4641ebea4a4ce242c49800728459.
- [Release notes](https://github.com/bufbuild/buf-push-action/releases)
- [Commits](https://github.com/bufbuild/buf-push-action/compare/342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1...a654ff18effe4641ebea4a4ce242c49800728459)
---
updated-dependencies:
- dependency-name: bufbuild/buf-push-action
dependency-type: direct:production
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* Labels: Add DropMetricName function, used in PromQL (#13446)
This function is called very frequently when executing PromQL functions,
and we can do it much more efficiently inside Labels.
In the common case that `__name__` comes first in the labels, we simply
re-point to start at the next label, which is nearly free.
`DropMetricName` is now so cheap I removed the cache - benchmarks show
everything still goes faster.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: simplify internal series delete function (#13261)
Lifting an optimisation from Agent code, `seriesHashmap.del` can use
the unique series reference, doesn't need to check Labels.
Also streamline the logic for deleting from `unique` and `conflicts` maps,
and add some comments to help the next person.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* otlptranslator/update-copy.sh: Fix sed command lines
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Rollback k8s.io requirements (#13462)
Rollback k8s.io Go modules to v0.28.6 to avoid forcing upgrade of Go to
1.21. This allows us to keep compatibility with the currently supported
upstream Go releases.
Signed-off-by: SuperQ <superq@gmail.com>
* Make update-copy.sh work for both OSX and GNU sed
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Name @beorn7 and @krajorama as maintainers for native histograms
I have been the de-facto maintainer for native histograms from the
beginning. So let's put this into MAINTAINERS.md.
In addition, I hereby proposose George Krajcsovits AKA Krajo as a
co-maintainer. He has contributed a lot of native histogram code, but
more importantly, he has contributed substantially to reviewing other
contributors' native histogram code, up to a point where I was merely
rubberstamping the PRs he had already reviewed. I'm confident that he
is ready to to be granted commit rights as outlined in the
"Maintainers" section of the governance:
https://prometheus.io/governance/#maintainers
According to the same section of the governance, I will announce the
proposed change on the developers mailing list and will give some time
for lazy consensus before merging this PR.
Signed-off-by: beorn7 <beorn@grafana.com>
* ui/fix: correct url handling for stacked graphs (#13460)
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
* tsdb: use cheaper Mutex on series
Mutex is 8 bytes; RWMutex is 24 bytes and much more complicated. Since
`RLock` is only used in two places, `UpdateMetadata` and `Delete`,
neither of which are hotspots, we should use the cheaper one.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Fix last_over_time for native histograms
The last_over_time retains a histogram sample without making a copy.
This sample is now coming from the buffered iterator used for windowing functions,
and can be reused for reading subsequent samples as the iterator progresses.
I would propose copying the sample in the last_over_time function, similar to
how it is done for rate, sum_over_time and others.
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
* Implementation
NOTE:
Rebased from main after refactor in #13014
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Add feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactor concurrency control
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Optimising dependencies/dependents funcs to not produce new slices each request
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Rename flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring for performance, and to allow controller to be overridden
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Block until all rules, both sync & async, have completed evaluating
Updated & added tests
Review feedback nits
Return empty map if not indeterminate
Use highWatermark to track inflight requests counter
Appease the linter
Clarify feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Fix typo in CLI flag description
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Fixed auto-generated doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improve doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Simplify the design to update concurrency controller once the rule evaluation has done
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Add more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Added more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improved RuleConcurrencyController interface doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Introduced sequentialRuleEvalController
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Remove superfluous nil check in Group.metrics
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* api: Serialize discovered and target labels into JSON directly (#13469)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* api: Serialize discovered labels into JSON directly in dropped targets (#13484)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* Add ShardedPostings() support to TSDB (#10421)
This PR is a reference implementation of the proposal described in #10420.
In addition to what described in #10420, in this PR I've introduced labels.StableHash(). The idea is to offer an hashing function which doesn't change over time, and that's used by query sharding in order to get a stable behaviour over time. The implementation of labels.StableHash() is the hashing function used by Prometheus before stringlabels, and what's used by Grafana Mimir for query sharding (because built before stringlabels was a thing).
Follow up work
As mentioned in #10420, if this PR is accepted I'm also open to upload another foundamental piece used by Grafana Mimir query sharding to accelerate the query execution: an optional, configurable and fast in-memory cache for the series hashes.
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* storage/remote: document why two benchmarks are skipped
One was silently doing nothing; one was doing something but the work
didn't go up linearly with iteration count.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Pod status changes not discovered by Kube Endpoints SD (#13337)
* fix(discovery/kubernetes/endpoints): react to changes on Pods because some modifications can occur on them without triggering an update on the related Endpoints (The Pod phase changing from Pending to Running e.g.).
---------
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
* Small improvements, add const, remove copypasta (#8106)
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
* Proposal to improve FPointSlice and HPointSlice allocation. (#13448)
* Reusing points slice from previous series when the slice is under utilized
* Adding comments on the bench test
Signed-off-by: Alan Protasio <alanprot@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* go mod tidy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
---------
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
Signed-off-by: Erik Sommer <ersotech@posteo.de>
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Signed-off-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: tyltr <tylitianrui@126.com>
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Signed-off-by: Ivan Babrou <github@ivan.computer>
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Signed-off-by: Goutham <gouthamve@gmail.com>
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
Signed-off-by: Ben Ye <benye@amazon.com>
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Signed-off-by: SuperQ <superq@gmail.com>
Signed-off-by: Ben Kochie <superq@gmail.com>
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Signed-off-by: beorn7 <beorn@grafana.com>
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
Signed-off-by: Alan Protasio <alanprot@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
Co-authored-by: Julian Wiedmann <jwi@linux.ibm.com>
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
Co-authored-by: Erik Sommer <ersotech@posteo.de>
Co-authored-by: Linas Medziunas <linas.medziunas@gmail.com>
Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
Co-authored-by: Arianna Vespri <arianna.vespri@yahoo.it>
Co-authored-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: daniel-resdiary <109083091+daniel-resdiary@users.noreply.github.com>
Co-authored-by: Daniel Kerbel <nmdanny@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jan Fajerski <jfajersk@redhat.com>
Co-authored-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Marc Tudurí <marctc@protonmail.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Co-authored-by: Augustin Husson <husson.augustin@gmail.com>
Co-authored-by: Björn Rabenstein <beorn@grafana.com>
Co-authored-by: zenador <zenador@users.noreply.github.com>
Co-authored-by: gotjosh <josue.abreu@gmail.com>
Co-authored-by: Ben Kochie <superq@gmail.com>
Co-authored-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Co-authored-by: tyltr <tylitianrui@126.com>
Co-authored-by: Ted Robertson <10043369+tredondo@users.noreply.github.com>
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
Co-authored-by: Matthias Loibl <mail@matthiasloibl.com>
Co-authored-by: Ivan Babrou <github@ivan.computer>
Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com>
Co-authored-by: Israel Blancas <iblancasa@gmail.com>
Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Goutham <gouthamve@gmail.com>
Co-authored-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Co-authored-by: Chris Marchbanks <csmarchbanks@gmail.com>
Co-authored-by: Ben Ye <benye@amazon.com>
Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Co-authored-by: Paulin Todev <paulin.todev@gmail.com>
Co-authored-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: Yury Molodov <yurymolodov@gmail.com>
Co-authored-by: Danny Kopping <danny.kopping@grafana.com>
Co-authored-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
Co-authored-by: Mikhail Fesenko <proggga@gmail.com>
Co-authored-by: Alan Protasio <alanprot@gmail.com>
2024-02-02 10:38:50 -08:00
ts , h := it . AtHistogram ( nil )
2022-08-29 03:05:03 -07:00
slice = append ( slice , sample { t : ts , h : h } )
2022-12-28 00:55:07 -08:00
case chunkenc . ValFloatHistogram :
remote write 2.0: sync with `main` branch (#13510)
* consoles: exclude iowait and steal from CPU Utilisation
'iowait' and 'steal' indicate specific idle/wait states, which shouldn't
be counted into CPU Utilisation. Also see
https://github.com/prometheus-operator/kube-prometheus/pull/796 and
https://github.com/kubernetes-monitoring/kubernetes-mixin/pull/667.
Per the iostat man page:
%idle
Show the percentage of time that the CPU or CPUs were idle and the
system did not have an outstanding disk I/O request.
%iowait
Show the percentage of time that the CPU or CPUs were idle during
which the system had an outstanding disk I/O request.
%steal
Show the percentage of time spent in involuntary wait by the
virtual CPU or CPUs while the hypervisor was servicing another
virtual processor.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
* tsdb: shrink txRing with smaller integers
4 billion active transactions ought to be enough for anyone.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: create isolation transaction slice on demand
When Prometheus restarts it creates every series read in from the WAL,
but many of those series will be finished, and never receive any more
samples. By defering allocation of the txRing slice to when it is first
needed, we save 32 bytes per stale series.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* add cluster variable to Overview dashboard
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* promql: simplify Native Histogram arithmetics
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
* Cut 2.49.0-rc.0 (#13270)
* Cut 2.49.0-rc.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Removed the duplicate.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Add unit protobuf parser
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Go on adding protobuf parsing for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* ui: create a reproduction for https://github.com/prometheus/prometheus/issues/13292
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Get conditional right
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Get VM Scale Set NIC (#13283)
Calling `*armnetwork.InterfacesClient.Get()` doesn't work for Scale Set
VM NIC, because these use a different Resource ID format.
Use `*armnetwork.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface()`
instead. This needs both the scale set name and the instance ID, so
add an `InstanceID` field to the `virtualMachine` struct. `InstanceID`
is empty for a VM that isn't a ScaleSetVM.
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
* Cut v2.49.0-rc.1
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Delete debugging lines, amend error message for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Correct order in error message
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Consider storage.ErrTooOldSample as non-retryable
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
* scrape_test.go: Increase scrape interval in TestScrapeLoopCache to reduce potential flakiness
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Avoid creating string for suffix, consider counters without _total suffix
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* build(deps): bump github.com/prometheus/client_golang
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.17.0 to 1.18.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0)
---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump actions/setup-node from 3.8.1 to 4.0.1
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 3.8.1 to 4.0.1.
- [Release notes](https://github.com/actions/setup-node/releases)
- [Commits](https://github.com/actions/setup-node/compare/5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d...b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8)
---
updated-dependencies:
- dependency-name: actions/setup-node
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
* scripts: sort file list in embed directive
Otherwise the resulting string depends on find, which afaict depends on
the underlying filesystem. A stable file list make it easier to detect
UI changes in downstreams that need to track UI assets.
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
* Fix DataTableProps['data'] for resultType string
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* Fix handling of scalar and string in isHeatmapData
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* build(deps): bump github.com/influxdata/influxdb
Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.2 to 1.11.4.
- [Release notes](https://github.com/influxdata/influxdb/releases)
- [Commits](https://github.com/influxdata/influxdb/compare/v1.11.2...v1.11.4)
---
updated-dependencies:
- dependency-name: github.com/influxdata/influxdb
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump github.com/prometheus/prometheus
Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.48.0 to 0.48.1.
- [Release notes](https://github.com/prometheus/prometheus/releases)
- [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/prometheus/compare/v0.48.0...v0.48.1)
---
updated-dependencies:
- dependency-name: github.com/prometheus/prometheus
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* Bump client_golang to v1.18.0 (#13373)
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Drop old inmemory samples (#13002)
* Drop old inmemory samples
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Avoid copying timeseries when the feature is disabled
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Run gofmt
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Clarify docs
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Add more logging info
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Remove loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* optimize function and add tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Simplify filter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Update help info from metrics
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use metrics to keep track of drop elements during buildWriteRequest
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var in tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* pass time.Now as parameter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Change buildwriterequest during retries
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Revert "Remove loggers"
This reverts commit 54f91dfcae20488944162335ab4ad8be459df1ab.
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use log level debug for loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Fix linter
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove noisy debug-level logs; add 'reason' label to drop metrics
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove accidentally committed files
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Propagate logger to buildWriteRequest to log dropped data
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix docs comment
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Make drop reason more specific
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove unnecessary pass of logger
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Use snake_case for reason label
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix dropped samples metric
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
---------
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
* fix(discovery): allow requireUpdate util to timeout in discovery/file/file_test.go.
The loop ran indefinitely if the condition isn't met.
Before, each iteration created a new timer channel which was always outpaced by
the other timer channel with smaller duration.
minor detail: There was a memory leak: resources of the ~10 previous timers were
constantly kept. With the fix, we may keep the resources of one timer around for defaultWait
but this isn't worth the changes to make it right.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Merge pull request #13371 from kevinmingtarja/fix-isHeatmapData
ui: fix handling of scalar and string in isHeatmapData
* tsdb/{index,compact}: allow using custom postings encoding format (#13242)
* tsdb/{index,compact}: allow using custom postings encoding format
We would like to experiment with a different postings encoding format in
Thanos so in this change I am proposing adding another argument to
`NewWriter` which would allow users to change the format if needed.
Also, wire the leveled compactor so that it would be possible to change
the format there too.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb/compact: use a struct for leveled compactor options
As discussed on Slack, let's use a struct for the options in leveled
compactor.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: make changes after Bryan's review
- Make changes less intrusive
- Turn the postings encoder type into a function
- Add NewWriterWithEncoder()
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
---------
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0-rc.2
Signed-off-by: bwplotka <bwplotka@gmail.com>
* build(deps): bump actions/setup-go from 3.5.0 to 5.0.0 in /scripts (#13362)
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3.5.0 to 5.0.0.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/6edd4406fa81c3da01a34fa6f6343087c207a568...0c52d547c9bc32b1aa3301fd7a9cb496313a4491)
---
updated-dependencies:
- dependency-name: actions/setup-go
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump github/codeql-action from 2.22.8 to 3.22.12 (#13358)
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.22.8 to 3.22.12.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/407ffafae6a767df3e0230c3df91b6443ae8df75...012739e5082ff0c22ca6d6ab32e07c36df03c4a4)
---
updated-dependencies:
- dependency-name: github/codeql-action
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* put @nexucis has a release shepherd (#13383)
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
* Add analyze histograms command to promtool (#12331)
Add `query analyze` command to promtool
This command analyzes the buckets of classic and native histograms,
based on data queried from the Prometheus query API, i.e. it
doesn't require direct access to the TSDB files.
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* included instance in all necessary descriptions
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* tsdb/compact: fix passing merge func
Fixing a very small logical problem I've introduced :(.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: add enable overlapping compaction
This functionality is needed in downstream projects because they have a
separate component that does compaction.
Upstreaming
https://github.com/grafana/mimir-prometheus/blob/7c8e9a2a76fc729e9078889782928b2fdfe240e9/tsdb/compact.go#L323-L325.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* promtool: allow setting multiple matchers to "promtool tsdb dump" command. (#13296)
Conditions are ANDed inside the same matcher but matchers are ORed
Including unit tests for "promtool tsdb dump".
Refactor some matchers scraping utils.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Fixed changelog
Signed-off-by: bwplotka <bwplotka@gmail.com>
* tsdb/main: wire "EnableOverlappingCompaction" to tsdb.Options (#13398)
This added the https://github.com/prometheus/prometheus/pull/13393
"EnableOverlappingCompaction" parameter to the compactor code but not to
the tsdb.Options. I forgot about that. Add it to `tsdb.Options` too and
set it to `true` in Prometheus.
Copy/paste the description from
https://github.com/prometheus/prometheus/pull/13393#issuecomment-1891787986
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Issue #13268: fix quality value in accept header
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
* Cut 2.49.1 with scrape q= bugfix.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Cut 2.49.1 web package.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Restore more efficient version of NewPossibleNonCounterInfo annotation (#13022)
Restore more efficient version of NewPossibleNonCounterInfo annotation
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Fix regressions introduced by #13242
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* fix slice copy in 1.20 (#13389)
The slices package is added to the standard library in Go 1.21;
we need to import from the exp area to maintain compatibility with Go 1.20.
Signed-off-by: tyltr <tylitianrui@126.com>
* Docs: Query Basics: link to rate (#10538)
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
* chore(kubernetes): check preconditions earlier and avoid unnecessary checks or iterations
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Examples: link to `rate` for new users (#10535)
* Examples: link to `rate` for new users
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
* promql: use natural sort in sort_by_label and sort_by_label_desc (#13411)
These functions are intended for humans, as robots can already sort the results
however they please. Humans like things sorted "naturally":
* https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
A similar thing has been done to Grafana, which is also used by humans:
* https://github.com/grafana/grafana/pull/78024
* https://github.com/grafana/grafana/pull/78494
Signed-off-by: Ivan Babrou <github@ivan.computer>
* TestLabelValuesWithMatchers: Add test case
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* remove obsolete build tag
Signed-off-by: tyltr <tylitianrui@126.com>
* Upgrade some golang dependencies for resty 2.11
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
* Native Histograms: support `native_histogram_min_bucket_factor` in scrape_config (#13222)
Native Histograms: support native_histogram_min_bucket_factor in scrape_config
---------
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram (#13392)
Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Minor fixes to otlp vendor update script
Signed-off-by: Goutham <gouthamve@gmail.com>
* build(deps): bump github.com/hetznercloud/hcloud-go/v2
Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.4.0 to 2.6.0.
- [Release notes](https://github.com/hetznercloud/hcloud-go/releases)
- [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.4.0...v2.6.0)
---
updated-dependencies:
- dependency-name: github.com/hetznercloud/hcloud-go/v2
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* Enhanced visibility for `promtool test rules` with JSON colored formatting (#13342)
* Added diff flag for unit test to improvise readability & debugging
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Removed blank spaces
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed linting error
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Added cli flags to documentation
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Revert unrrelated linting fixes
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed review suggestions
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Cleanup
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
---------
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* storage: skip merging when no remote storage configured
Prometheus is hard-coded to use a fanout storage between TSDB and
a remote storage which by default is empty.
This change detects the empty storage and skips merging between
result sets, which would make `Select()` sort results.
Bottom line: we skip a sort unless there really is some remote storage
configured.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Remove csmarchbanks from remote write owners (#13432)
I have not had the time to keep up with remote write and have no plans
to work on it in the near future so I am withdrawing my maintainership
of that part of the codebase. I continue to focus on client_python.
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
* add more context cancellation check at evaluation time
Signed-off-by: Ben Ye <benye@amazon.com>
* Optimize label values with matchers by taking shortcuts (#13426)
Don't calculate postings beforehand: we may not need them. If all
matchers are for the requested label, we can just filter its values.
Also, if there are no values at all, no need to run any kind of
logic.
Also add more labelValuesWithMatchers benchmarks
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Add automatic memory limit handling
Enable automatic detection of memory limits and configure GOMEMLIMIT to
match.
* Also includes a flag to allow controlling the reserved ratio.
Signed-off-by: SuperQ <superq@gmail.com>
* Update OSSF badge link (#13433)
Provide a more user friendly interface
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
* SD Managers taking over responsibility for registration of debug metrics (#13375)
SD Managers take over responsibility for SD metrics registration
---------
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Optimize histogram iterators (#13340)
Optimize histogram iterators
Histogram iterators allocate new objects in the AtHistogram and
AtFloatHistogram methods, which makes calculating rates over long
ranges expensive.
In #13215 we allowed an existing object to be reused
when converting an integer histogram to a float histogram. This commit follows
the same idea and allows injecting an existing object in the AtHistogram and
AtFloatHistogram methods. When the injected value is nil, iterators allocate
new histograms, otherwise they populate and return the injected object.
The commit also adds a CopyTo method to Histogram and FloatHistogram which
is used in the BufferedIterator to overwrite items in the ring instead of making
new copies.
Note that a specialized HPoint pool is needed for all of this to work
(`matrixSelectorHPool`).
---------
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
* doc: Mark `mad_over_time` as experimental (#13440)
We forgot to do that in
https://github.com/prometheus/prometheus/pull/13059
Signed-off-by: beorn7 <beorn@grafana.com>
* Change metric label for Puppetdb from 'http' to 'puppetdb'
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
* mirror metrics.proto change & generate code
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
* TestHeadLabelValuesWithMatchers: Add test case (#13414)
Add test case to TestHeadLabelValuesWithMatchers, while fixing a couple
of typos in other test cases. Also enclosing some implicit sub-tests in a
`t.Run` call to make them explicitly sub-tests.
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* update all go dependencies (#13438)
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
* build(deps): bump the k8s-io group with 2 updates (#13454)
Bumps the k8s-io group with 2 updates: [k8s.io/api](https://github.com/kubernetes/api) and [k8s.io/client-go](https://github.com/kubernetes/client-go).
Updates `k8s.io/api` from 0.28.4 to 0.29.1
- [Commits](https://github.com/kubernetes/api/compare/v0.28.4...v0.29.1)
Updates `k8s.io/client-go` from 0.28.4 to 0.29.1
- [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/kubernetes/client-go/compare/v0.28.4...v0.29.1)
---
updated-dependencies:
- dependency-name: k8s.io/api
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
- dependency-name: k8s.io/client-go
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump the go-opentelemetry-io group with 1 update (#13453)
Bumps the go-opentelemetry-io group with 1 update: [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector).
Updates `go.opentelemetry.io/collector/semconv` from 0.92.0 to 0.93.0
- [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.92.0...v0.93.0)
---
updated-dependencies:
- dependency-name: go.opentelemetry.io/collector/semconv
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: go-opentelemetry-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump actions/upload-artifact from 3.1.3 to 4.0.0 (#13355)
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3.1.3 to 4.0.0.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/a8a3f3ad30e3422c9c7b888a15615d19a852ae32...c7d193f32edcb7bfad88892161225aeda64e9392)
---
updated-dependencies:
- dependency-name: actions/upload-artifact
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump bufbuild/buf-push-action (#13357)
Bumps [bufbuild/buf-push-action](https://github.com/bufbuild/buf-push-action) from 342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 to a654ff18effe4641ebea4a4ce242c49800728459.
- [Release notes](https://github.com/bufbuild/buf-push-action/releases)
- [Commits](https://github.com/bufbuild/buf-push-action/compare/342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1...a654ff18effe4641ebea4a4ce242c49800728459)
---
updated-dependencies:
- dependency-name: bufbuild/buf-push-action
dependency-type: direct:production
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* Labels: Add DropMetricName function, used in PromQL (#13446)
This function is called very frequently when executing PromQL functions,
and we can do it much more efficiently inside Labels.
In the common case that `__name__` comes first in the labels, we simply
re-point to start at the next label, which is nearly free.
`DropMetricName` is now so cheap I removed the cache - benchmarks show
everything still goes faster.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: simplify internal series delete function (#13261)
Lifting an optimisation from Agent code, `seriesHashmap.del` can use
the unique series reference, doesn't need to check Labels.
Also streamline the logic for deleting from `unique` and `conflicts` maps,
and add some comments to help the next person.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* otlptranslator/update-copy.sh: Fix sed command lines
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Rollback k8s.io requirements (#13462)
Rollback k8s.io Go modules to v0.28.6 to avoid forcing upgrade of Go to
1.21. This allows us to keep compatibility with the currently supported
upstream Go releases.
Signed-off-by: SuperQ <superq@gmail.com>
* Make update-copy.sh work for both OSX and GNU sed
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Name @beorn7 and @krajorama as maintainers for native histograms
I have been the de-facto maintainer for native histograms from the
beginning. So let's put this into MAINTAINERS.md.
In addition, I hereby proposose George Krajcsovits AKA Krajo as a
co-maintainer. He has contributed a lot of native histogram code, but
more importantly, he has contributed substantially to reviewing other
contributors' native histogram code, up to a point where I was merely
rubberstamping the PRs he had already reviewed. I'm confident that he
is ready to to be granted commit rights as outlined in the
"Maintainers" section of the governance:
https://prometheus.io/governance/#maintainers
According to the same section of the governance, I will announce the
proposed change on the developers mailing list and will give some time
for lazy consensus before merging this PR.
Signed-off-by: beorn7 <beorn@grafana.com>
* ui/fix: correct url handling for stacked graphs (#13460)
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
* tsdb: use cheaper Mutex on series
Mutex is 8 bytes; RWMutex is 24 bytes and much more complicated. Since
`RLock` is only used in two places, `UpdateMetadata` and `Delete`,
neither of which are hotspots, we should use the cheaper one.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Fix last_over_time for native histograms
The last_over_time retains a histogram sample without making a copy.
This sample is now coming from the buffered iterator used for windowing functions,
and can be reused for reading subsequent samples as the iterator progresses.
I would propose copying the sample in the last_over_time function, similar to
how it is done for rate, sum_over_time and others.
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
* Implementation
NOTE:
Rebased from main after refactor in #13014
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Add feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactor concurrency control
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Optimising dependencies/dependents funcs to not produce new slices each request
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Rename flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring for performance, and to allow controller to be overridden
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Block until all rules, both sync & async, have completed evaluating
Updated & added tests
Review feedback nits
Return empty map if not indeterminate
Use highWatermark to track inflight requests counter
Appease the linter
Clarify feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Fix typo in CLI flag description
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Fixed auto-generated doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improve doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Simplify the design to update concurrency controller once the rule evaluation has done
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Add more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Added more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improved RuleConcurrencyController interface doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Introduced sequentialRuleEvalController
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Remove superfluous nil check in Group.metrics
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* api: Serialize discovered and target labels into JSON directly (#13469)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* api: Serialize discovered labels into JSON directly in dropped targets (#13484)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* Add ShardedPostings() support to TSDB (#10421)
This PR is a reference implementation of the proposal described in #10420.
In addition to what described in #10420, in this PR I've introduced labels.StableHash(). The idea is to offer an hashing function which doesn't change over time, and that's used by query sharding in order to get a stable behaviour over time. The implementation of labels.StableHash() is the hashing function used by Prometheus before stringlabels, and what's used by Grafana Mimir for query sharding (because built before stringlabels was a thing).
Follow up work
As mentioned in #10420, if this PR is accepted I'm also open to upload another foundamental piece used by Grafana Mimir query sharding to accelerate the query execution: an optional, configurable and fast in-memory cache for the series hashes.
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* storage/remote: document why two benchmarks are skipped
One was silently doing nothing; one was doing something but the work
didn't go up linearly with iteration count.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Pod status changes not discovered by Kube Endpoints SD (#13337)
* fix(discovery/kubernetes/endpoints): react to changes on Pods because some modifications can occur on them without triggering an update on the related Endpoints (The Pod phase changing from Pending to Running e.g.).
---------
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
* Small improvements, add const, remove copypasta (#8106)
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
* Proposal to improve FPointSlice and HPointSlice allocation. (#13448)
* Reusing points slice from previous series when the slice is under utilized
* Adding comments on the bench test
Signed-off-by: Alan Protasio <alanprot@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* go mod tidy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
---------
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
Signed-off-by: Erik Sommer <ersotech@posteo.de>
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Signed-off-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: tyltr <tylitianrui@126.com>
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Signed-off-by: Ivan Babrou <github@ivan.computer>
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Signed-off-by: Goutham <gouthamve@gmail.com>
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
Signed-off-by: Ben Ye <benye@amazon.com>
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Signed-off-by: SuperQ <superq@gmail.com>
Signed-off-by: Ben Kochie <superq@gmail.com>
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Signed-off-by: beorn7 <beorn@grafana.com>
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
Signed-off-by: Alan Protasio <alanprot@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
Co-authored-by: Julian Wiedmann <jwi@linux.ibm.com>
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
Co-authored-by: Erik Sommer <ersotech@posteo.de>
Co-authored-by: Linas Medziunas <linas.medziunas@gmail.com>
Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
Co-authored-by: Arianna Vespri <arianna.vespri@yahoo.it>
Co-authored-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: daniel-resdiary <109083091+daniel-resdiary@users.noreply.github.com>
Co-authored-by: Daniel Kerbel <nmdanny@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jan Fajerski <jfajersk@redhat.com>
Co-authored-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Marc Tudurí <marctc@protonmail.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Co-authored-by: Augustin Husson <husson.augustin@gmail.com>
Co-authored-by: Björn Rabenstein <beorn@grafana.com>
Co-authored-by: zenador <zenador@users.noreply.github.com>
Co-authored-by: gotjosh <josue.abreu@gmail.com>
Co-authored-by: Ben Kochie <superq@gmail.com>
Co-authored-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Co-authored-by: tyltr <tylitianrui@126.com>
Co-authored-by: Ted Robertson <10043369+tredondo@users.noreply.github.com>
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
Co-authored-by: Matthias Loibl <mail@matthiasloibl.com>
Co-authored-by: Ivan Babrou <github@ivan.computer>
Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com>
Co-authored-by: Israel Blancas <iblancasa@gmail.com>
Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Goutham <gouthamve@gmail.com>
Co-authored-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Co-authored-by: Chris Marchbanks <csmarchbanks@gmail.com>
Co-authored-by: Ben Ye <benye@amazon.com>
Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Co-authored-by: Paulin Todev <paulin.todev@gmail.com>
Co-authored-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: Yury Molodov <yurymolodov@gmail.com>
Co-authored-by: Danny Kopping <danny.kopping@grafana.com>
Co-authored-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
Co-authored-by: Mikhail Fesenko <proggga@gmail.com>
Co-authored-by: Alan Protasio <alanprot@gmail.com>
2024-02-02 10:38:50 -08:00
ts , h := it . AtFloatHistogram ( nil )
2022-12-28 00:55:07 -08:00
slice = append ( slice , sample { t : ts , fh : h } )
default :
t . Fatalf ( "unexpected sample value type %d" , typ )
2022-08-29 03:05:03 -07:00
}
}
sort . Slice ( slice , func ( i , j int ) bool {
return slice [ i ] . T ( ) < slice [ j ] . T ( )
} )
exp [ key ] = slice
}
}
2023-12-07 03:35:01 -08:00
require . Empty ( t , db . Blocks ( ) )
2022-08-29 03:05:03 -07:00
require . NoError ( t , db . reload ( ) )
require . Len ( t , db . Blocks ( ) , len ( blockSeries ) )
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( math . MinInt64 , math . MaxInt64 )
2022-08-29 03:05:03 -07:00
require . NoError ( t , err )
res := query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , ".*" ) )
2023-01-18 08:59:29 -08:00
compareSeries ( t , exp , res )
2022-10-12 01:01:12 -07:00
// Compact all the blocks together and query again.
blocks := db . Blocks ( )
blockDirs := make ( [ ] string , 0 , len ( blocks ) )
for _ , b := range blocks {
blockDirs = append ( blockDirs , b . Dir ( ) )
}
id , err := db . compactor . Compact ( db . Dir ( ) , blockDirs , blocks )
require . NoError ( t , err )
require . NotEqual ( t , ulid . ULID { } , id )
require . NoError ( t , db . reload ( ) )
require . Len ( t , db . Blocks ( ) , 1 )
2023-09-12 03:37:38 -07:00
q , err = db . Querier ( math . MinInt64 , math . MaxInt64 )
2022-10-12 01:01:12 -07:00
require . NoError ( t , err )
res = query ( t , q , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , ".*" ) )
2023-01-18 08:59:29 -08:00
// After compaction, we do not require "unknown" counter resets
// due to origin from different overlapping chunks anymore.
for _ , ss := range exp {
for i , s := range ss [ 1 : ] {
if s . H ( ) != nil && ss [ i ] . H ( ) != nil && s . H ( ) . CounterResetHint == histogram . UnknownCounterReset {
s . H ( ) . CounterResetHint = histogram . NotCounterReset
}
if s . FH ( ) != nil && ss [ i ] . FH ( ) != nil && s . FH ( ) . CounterResetHint == histogram . UnknownCounterReset {
s . FH ( ) . CounterResetHint = histogram . NotCounterReset
}
}
}
compareSeries ( t , exp , res )
2022-08-29 03:05:03 -07:00
}
2023-01-18 08:59:29 -08:00
for _ , floatHistogram := range [ ] bool { false , true } {
2022-12-28 00:55:07 -08:00
t . Run ( fmt . Sprintf ( "floatHistogram=%t" , floatHistogram ) , func ( t * testing . T ) {
t . Run ( "serial blocks with only histograms" , func ( t * testing . T ) {
testBlockQuerying ( t ,
genHistogramSeries ( 10 , 5 , minute ( 0 ) , minute ( 119 ) , minute ( 1 ) , floatHistogram ) ,
genHistogramSeries ( 10 , 5 , minute ( 120 ) , minute ( 239 ) , minute ( 1 ) , floatHistogram ) ,
genHistogramSeries ( 10 , 5 , minute ( 240 ) , minute ( 359 ) , minute ( 1 ) , floatHistogram ) ,
)
} )
2022-08-29 03:05:03 -07:00
2022-12-28 00:55:07 -08:00
t . Run ( "serial blocks with either histograms or floats in a block and not both" , func ( t * testing . T ) {
testBlockQuerying ( t ,
genHistogramSeries ( 10 , 5 , minute ( 0 ) , minute ( 119 ) , minute ( 1 ) , floatHistogram ) ,
2023-08-24 06:21:17 -07:00
genSeriesFromSampleGenerator ( 10 , 5 , minute ( 120 ) , minute ( 239 ) , minute ( 1 ) , func ( ts int64 ) chunks . Sample {
2023-03-30 10:50:13 -07:00
return sample { t : ts , f : rand . Float64 ( ) }
2022-12-28 00:55:07 -08:00
} ) ,
genHistogramSeries ( 10 , 5 , minute ( 240 ) , minute ( 359 ) , minute ( 1 ) , floatHistogram ) ,
)
} )
2022-08-29 03:05:03 -07:00
2022-12-28 00:55:07 -08:00
t . Run ( "serial blocks with mix of histograms and float64" , func ( t * testing . T ) {
testBlockQuerying ( t ,
genHistogramAndFloatSeries ( 10 , 5 , minute ( 0 ) , minute ( 60 ) , minute ( 1 ) , floatHistogram ) ,
genHistogramSeries ( 10 , 5 , minute ( 61 ) , minute ( 120 ) , minute ( 1 ) , floatHistogram ) ,
genHistogramAndFloatSeries ( 10 , 5 , minute ( 121 ) , minute ( 180 ) , minute ( 1 ) , floatHistogram ) ,
2023-08-24 06:21:17 -07:00
genSeriesFromSampleGenerator ( 10 , 5 , minute ( 181 ) , minute ( 240 ) , minute ( 1 ) , func ( ts int64 ) chunks . Sample {
2023-03-30 10:50:13 -07:00
return sample { t : ts , f : rand . Float64 ( ) }
2022-12-28 00:55:07 -08:00
} ) ,
)
} )
2022-10-12 01:01:12 -07:00
2022-12-28 00:55:07 -08:00
t . Run ( "overlapping blocks with only histograms" , func ( t * testing . T ) {
testBlockQuerying ( t ,
genHistogramSeries ( 10 , 5 , minute ( 0 ) , minute ( 120 ) , minute ( 3 ) , floatHistogram ) ,
genHistogramSeries ( 10 , 5 , minute ( 1 ) , minute ( 120 ) , minute ( 3 ) , floatHistogram ) ,
genHistogramSeries ( 10 , 5 , minute ( 2 ) , minute ( 120 ) , minute ( 3 ) , floatHistogram ) ,
)
} )
2022-10-12 01:01:12 -07:00
2022-12-28 00:55:07 -08:00
t . Run ( "overlapping blocks with only histograms and only float in a series" , func ( t * testing . T ) {
testBlockQuerying ( t ,
genHistogramSeries ( 10 , 5 , minute ( 0 ) , minute ( 120 ) , minute ( 3 ) , floatHistogram ) ,
2023-08-24 06:21:17 -07:00
genSeriesFromSampleGenerator ( 10 , 5 , minute ( 1 ) , minute ( 120 ) , minute ( 3 ) , func ( ts int64 ) chunks . Sample {
2023-03-30 10:50:13 -07:00
return sample { t : ts , f : rand . Float64 ( ) }
2022-12-28 00:55:07 -08:00
} ) ,
genHistogramSeries ( 10 , 5 , minute ( 2 ) , minute ( 120 ) , minute ( 3 ) , floatHistogram ) ,
)
} )
2022-08-29 03:05:03 -07:00
2022-12-28 00:55:07 -08:00
t . Run ( "overlapping blocks with mix of histograms and float64" , func ( t * testing . T ) {
testBlockQuerying ( t ,
genHistogramAndFloatSeries ( 10 , 5 , minute ( 0 ) , minute ( 60 ) , minute ( 3 ) , floatHistogram ) ,
genHistogramSeries ( 10 , 5 , minute ( 46 ) , minute ( 100 ) , minute ( 3 ) , floatHistogram ) ,
genHistogramAndFloatSeries ( 10 , 5 , minute ( 89 ) , minute ( 140 ) , minute ( 3 ) , floatHistogram ) ,
2023-08-24 06:21:17 -07:00
genSeriesFromSampleGenerator ( 10 , 5 , minute ( 126 ) , minute ( 200 ) , minute ( 3 ) , func ( ts int64 ) chunks . Sample {
2023-03-30 10:50:13 -07:00
return sample { t : ts , f : rand . Float64 ( ) }
2022-12-28 00:55:07 -08:00
} ) ,
)
} )
} )
}
2022-08-29 03:05:03 -07:00
}
2022-09-14 05:08:34 -07:00
func TestNativeHistogramFlag ( t * testing . T ) {
dir := t . TempDir ( )
db , err := Open ( dir , nil , nil , nil , nil )
require . NoError ( t , err )
t . Cleanup ( func ( ) {
require . NoError ( t , db . Close ( ) )
} )
h := & histogram . Histogram {
2023-10-13 00:58:26 -07:00
Count : 9 ,
2022-09-14 05:08:34 -07:00
ZeroCount : 4 ,
ZeroThreshold : 0.001 ,
Sum : 35.5 ,
Schema : 1 ,
PositiveSpans : [ ] histogram . Span {
{ Offset : 0 , Length : 2 } ,
{ Offset : 2 , Length : 2 } ,
} ,
PositiveBuckets : [ ] int64 { 1 , 1 , - 1 , 0 } ,
}
l := labels . FromStrings ( "foo" , "bar" )
app := db . Appender ( context . Background ( ) )
// Disabled by default.
2022-12-28 00:55:07 -08:00
_ , err = app . AppendHistogram ( 0 , l , 100 , h , nil )
require . Equal ( t , storage . ErrNativeHistogramsDisabled , err )
2023-11-29 06:15:57 -08:00
_ , err = app . AppendHistogram ( 0 , l , 105 , nil , h . ToFloat ( nil ) )
2022-09-14 05:08:34 -07:00
require . Equal ( t , storage . ErrNativeHistogramsDisabled , err )
// Enable and append.
db . EnableNativeHistograms ( )
2022-12-28 00:55:07 -08:00
_ , err = app . AppendHistogram ( 0 , l , 200 , h , nil )
require . NoError ( t , err )
2023-11-29 06:15:57 -08:00
_ , err = app . AppendHistogram ( 0 , l , 205 , nil , h . ToFloat ( nil ) )
2022-09-14 05:08:34 -07:00
require . NoError ( t , err )
db . DisableNativeHistograms ( )
2022-12-28 00:55:07 -08:00
_ , err = app . AppendHistogram ( 0 , l , 300 , h , nil )
require . Equal ( t , storage . ErrNativeHistogramsDisabled , err )
2023-11-29 06:15:57 -08:00
_ , err = app . AppendHistogram ( 0 , l , 305 , nil , h . ToFloat ( nil ) )
2022-09-14 05:08:34 -07:00
require . Equal ( t , storage . ErrNativeHistogramsDisabled , err )
require . NoError ( t , app . Commit ( ) )
2023-09-12 03:37:38 -07:00
q , err := db . Querier ( math . MinInt , math . MaxInt64 )
2022-09-14 05:08:34 -07:00
require . NoError ( t , err )
act := query ( t , q , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2023-08-24 06:21:17 -07:00
require . Equal ( t , map [ string ] [ ] chunks . Sample {
2023-11-29 06:15:57 -08:00
l . String ( ) : { sample { t : 200 , h : h } , sample { t : 205 , fh : h . ToFloat ( nil ) } } ,
2022-12-28 00:55:07 -08:00
} , act )
2022-09-14 05:08:34 -07:00
}
2023-01-18 08:59:29 -08:00
// compareSeries essentially replaces `require.Equal(t, expected, actual) in
// situations where the actual series might contain more counter reset hints
// "unknown" than the expected series. This can easily happen for long series
// that trigger new chunks. This function therefore tolerates counter reset
// hints "CounterReset" and "NotCounterReset" in an expected series where the
// actual series contains a counter reset hint "UnknownCounterReset".
// "GaugeType" hints are still strictly checked, and any "UnknownCounterReset"
// in an expected series has to be matched precisely by the actual series.
2023-08-24 06:21:17 -07:00
func compareSeries ( t require . TestingT , expected , actual map [ string ] [ ] chunks . Sample ) {
2023-01-18 08:59:29 -08:00
if len ( expected ) != len ( actual ) {
// The reason for the difference is not the counter reset hints
// (alone), so let's use the pretty diffing by the require
// package.
require . Equal ( t , expected , actual , "number of series differs" )
}
for key , eSamples := range expected {
aSamples , ok := actual [ key ]
if ! ok {
require . Equal ( t , expected , actual , "expected series %q not found" , key )
}
if len ( eSamples ) != len ( aSamples ) {
require . Equal ( t , eSamples , aSamples , "number of samples for series %q differs" , key )
}
for i , eS := range eSamples {
aS := aSamples [ i ]
aH , eH := aS . H ( ) , eS . H ( )
aFH , eFH := aS . FH ( ) , eS . FH ( )
switch {
case aH != nil && eH != nil && aH . CounterResetHint == histogram . UnknownCounterReset && eH . CounterResetHint != histogram . GaugeType :
eH = eH . Copy ( )
eH . CounterResetHint = histogram . UnknownCounterReset
eS = sample { t : eS . T ( ) , h : eH }
case aFH != nil && eFH != nil && aFH . CounterResetHint == histogram . UnknownCounterReset && eFH . CounterResetHint != histogram . GaugeType :
eFH = eFH . Copy ( )
eFH . CounterResetHint = histogram . UnknownCounterReset
eS = sample { t : eS . T ( ) , fh : eFH }
}
require . Equal ( t , eS , aS , "sample %d in series %q differs" , i , key )
}
}
}
2023-02-19 09:34:51 -08:00
// TestChunkQuerierReadWriteRace looks for any possible race between appending
// samples and reading chunks because the head chunk that is being appended to
// can be read in parallel and we should be able to make a copy of the chunk without
// worrying about the parallel write.
func TestChunkQuerierReadWriteRace ( t * testing . T ) {
db := openTestDB ( t , nil , nil )
defer func ( ) {
require . NoError ( t , db . Close ( ) )
} ( )
lbls := labels . FromStrings ( "foo" , "bar" )
writer := func ( ) error {
<- time . After ( 5 * time . Millisecond ) // Initial pause while readers start.
ts := 0
for i := 0 ; i < 500 ; i ++ {
app := db . Appender ( context . Background ( ) )
for j := 0 ; j < 10 ; j ++ {
ts ++
_ , err := app . Append ( 0 , lbls , int64 ( ts ) , float64 ( ts * 100 ) )
if err != nil {
return err
}
}
err := app . Commit ( )
if err != nil {
return err
}
<- time . After ( time . Millisecond )
}
return nil
}
reader := func ( ) {
2023-09-12 03:37:38 -07:00
querier , err := db . ChunkQuerier ( math . MinInt64 , math . MaxInt64 )
2023-02-19 09:34:51 -08:00
require . NoError ( t , err )
defer func ( q storage . ChunkQuerier ) {
require . NoError ( t , q . Close ( ) )
} ( querier )
2023-09-12 03:37:38 -07:00
ss := querier . Select ( context . Background ( ) , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "foo" , "bar" ) )
2023-02-19 09:34:51 -08:00
for ss . Next ( ) {
cs := ss . At ( )
it := cs . Iterator ( nil )
for it . Next ( ) {
m := it . At ( )
b := m . Chunk . Bytes ( )
bb := make ( [ ] byte , len ( b ) )
copy ( bb , b ) // This copying of chunk bytes detects any race.
}
}
require . NoError ( t , ss . Err ( ) )
}
ch := make ( chan struct { } )
var writerErr error
go func ( ) {
defer close ( ch )
writerErr = writer ( )
} ( )
Outer :
for {
reader ( )
select {
case <- ch :
break Outer
default :
}
}
require . NoError ( t , writerErr )
}