chore: Fix typos (#14868)
Some checks failed
buf.build / lint and publish (push) Waiting to run
CI / Go tests (push) Waiting to run
CI / More Go tests (push) Waiting to run
CI / Go tests with previous Go version (push) Waiting to run
CI / UI tests (push) Waiting to run
CI / Go tests on Windows (push) Waiting to run
CI / Mixins tests (push) Waiting to run
CI / Build Prometheus for common architectures (0) (push) Waiting to run
CI / Build Prometheus for common architectures (1) (push) Waiting to run
CI / Build Prometheus for common architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (0) (push) Waiting to run
CI / Build Prometheus for all architectures (1) (push) Waiting to run
CI / Build Prometheus for all architectures (10) (push) Waiting to run
CI / Build Prometheus for all architectures (11) (push) Waiting to run
CI / Build Prometheus for all architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (3) (push) Waiting to run
CI / Build Prometheus for all architectures (4) (push) Waiting to run
CI / Build Prometheus for all architectures (5) (push) Waiting to run
CI / Build Prometheus for all architectures (6) (push) Waiting to run
CI / Build Prometheus for all architectures (7) (push) Waiting to run
CI / Build Prometheus for all architectures (8) (push) Waiting to run
CI / Build Prometheus for all architectures (9) (push) Waiting to run
CI / Report status of build Prometheus for all architectures (push) Blocked by required conditions
CI / Check generated parser (push) Waiting to run
CI / golangci-lint (push) Waiting to run
CI / fuzzing (push) Waiting to run
CI / codeql (push) Waiting to run
CI / Publish main branch artifacts (push) Blocked by required conditions
CI / Publish release artefacts (push) Blocked by required conditions
CI / Publish UI on npm Registry (push) Blocked by required conditions
Scorecards supply-chain security / Scorecards analysis (push) Waiting to run
Push README to Docker Hub / Push README to Docker Hub (push) Has been cancelled
Push README to Docker Hub / Push README to quay.io (push) Has been cancelled

* Fix typos

---------

Signed-off-by: Nathan Baulch <nathan.baulch@gmail.com>
This commit is contained in:
Nathan Baulch 2024-09-11 06:32:03 +10:00 committed by GitHub
parent 54772a2c8f
commit 50cd453c8f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
41 changed files with 98 additions and 98 deletions

View file

@ -140,7 +140,7 @@ This release changes the default for GOGC, the Go runtime control for the trade-
* [ENHANCEMENT] TSDB: Pause regular block compactions if the head needs to be compacted (prioritize head as it increases memory consumption). #13754 * [ENHANCEMENT] TSDB: Pause regular block compactions if the head needs to be compacted (prioritize head as it increases memory consumption). #13754
* [ENHANCEMENT] Observability: Improved logging during signal handling termination. #13772 * [ENHANCEMENT] Observability: Improved logging during signal handling termination. #13772
* [ENHANCEMENT] Observability: All log lines for drop series use "num_dropped" key consistently. #13823 * [ENHANCEMENT] Observability: All log lines for drop series use "num_dropped" key consistently. #13823
* [ENHANCEMENT] Observability: Log chunk snapshot and mmaped chunk replay duration during WAL replay. #13838 * [ENHANCEMENT] Observability: Log chunk snapshot and mmapped chunk replay duration during WAL replay. #13838
* [ENHANCEMENT] Observability: Log if the block is being created from WBL during compaction. #13846 * [ENHANCEMENT] Observability: Log if the block is being created from WBL during compaction. #13846
* [BUGFIX] PromQL: Fix inaccurate sample number statistic when querying histograms. #13667 * [BUGFIX] PromQL: Fix inaccurate sample number statistic when querying histograms. #13667
* [BUGFIX] PromQL: Fix `histogram_stddev` and `histogram_stdvar` for cases where the histogram has negative buckets. #13852 * [BUGFIX] PromQL: Fix `histogram_stddev` and `histogram_stdvar` for cases where the histogram has negative buckets. #13852
@ -677,7 +677,7 @@ The binaries published with this release are built with Go1.17.8 to avoid [CVE-2
## 2.33.0 / 2022-01-29 ## 2.33.0 / 2022-01-29
* [CHANGE] PromQL: Promote negative offset and `@` modifer to stable features. #10121 * [CHANGE] PromQL: Promote negative offset and `@` modifier to stable features. #10121
* [CHANGE] Web: Promote remote-write-receiver to stable. #10119 * [CHANGE] Web: Promote remote-write-receiver to stable. #10119
* [FEATURE] Config: Add `stripPort` template function. #10002 * [FEATURE] Config: Add `stripPort` template function. #10002
* [FEATURE] Promtool: Add cardinality analysis to `check metrics`, enabled by flag `--extended`. #10045 * [FEATURE] Promtool: Add cardinality analysis to `check metrics`, enabled by flag `--extended`. #10045
@ -914,7 +914,7 @@ This vulnerability has been reported by Aaron Devaney from MDSec.
* [ENHANCEMENT] Templating: Enable parsing strings in `humanize` functions. #8682 * [ENHANCEMENT] Templating: Enable parsing strings in `humanize` functions. #8682
* [BUGFIX] UI: Provide errors instead of blank page on TSDB Status Page. #8654 #8659 * [BUGFIX] UI: Provide errors instead of blank page on TSDB Status Page. #8654 #8659
* [BUGFIX] TSDB: Do not panic when writing very large records to the WAL. #8790 * [BUGFIX] TSDB: Do not panic when writing very large records to the WAL. #8790
* [BUGFIX] TSDB: Avoid panic when mmaped memory is referenced after the file is closed. #8723 * [BUGFIX] TSDB: Avoid panic when mmapped memory is referenced after the file is closed. #8723
* [BUGFIX] Scaleway Discovery: Fix nil pointer dereference. #8737 * [BUGFIX] Scaleway Discovery: Fix nil pointer dereference. #8737
* [BUGFIX] Consul Discovery: Restart no longer required after config update with no targets. #8766 * [BUGFIX] Consul Discovery: Restart no longer required after config update with no targets. #8766
@ -1840,7 +1840,7 @@ information, read the announcement blog post and migration guide.
## 1.7.0 / 2017-06-06 ## 1.7.0 / 2017-06-06
* [CHANGE] Compress remote storage requests and responses with unframed/raw snappy. * [CHANGE] Compress remote storage requests and responses with unframed/raw snappy.
* [CHANGE] Properly ellide secrets in config. * [CHANGE] Properly elide secrets in config.
* [FEATURE] Add OpenStack service discovery. * [FEATURE] Add OpenStack service discovery.
* [FEATURE] Add ability to limit Kubernetes service discovery to certain namespaces. * [FEATURE] Add ability to limit Kubernetes service discovery to certain namespaces.
* [FEATURE] Add metric for discovered number of Alertmanagers. * [FEATURE] Add metric for discovered number of Alertmanagers.

View file

@ -115,7 +115,7 @@ The Makefile provides several targets:
Prometheus is bundled with many service discovery plugins. Prometheus is bundled with many service discovery plugins.
When building Prometheus from source, you can edit the [plugins.yml](./plugins.yml) When building Prometheus from source, you can edit the [plugins.yml](./plugins.yml)
file to disable some service discoveries. The file is a yaml-formated list of go file to disable some service discoveries. The file is a yaml-formatted list of go
import path that will be built into the Prometheus binary. import path that will be built into the Prometheus binary.
After you have changed the file, you After you have changed the file, you

View file

@ -367,25 +367,25 @@ func printBlocks(blocks []tsdb.BlockReader, writeHeader, humanReadable bool) {
fmt.Fprintf(tw, fmt.Fprintf(tw,
"%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n", "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
meta.ULID, meta.ULID,
getFormatedTime(meta.MinTime, humanReadable), getFormattedTime(meta.MinTime, humanReadable),
getFormatedTime(meta.MaxTime, humanReadable), getFormattedTime(meta.MaxTime, humanReadable),
time.Duration(meta.MaxTime-meta.MinTime)*time.Millisecond, time.Duration(meta.MaxTime-meta.MinTime)*time.Millisecond,
meta.Stats.NumSamples, meta.Stats.NumSamples,
meta.Stats.NumChunks, meta.Stats.NumChunks,
meta.Stats.NumSeries, meta.Stats.NumSeries,
getFormatedBytes(b.Size(), humanReadable), getFormattedBytes(b.Size(), humanReadable),
) )
} }
} }
func getFormatedTime(timestamp int64, humanReadable bool) string { func getFormattedTime(timestamp int64, humanReadable bool) string {
if humanReadable { if humanReadable {
return time.Unix(timestamp/1000, 0).UTC().String() return time.Unix(timestamp/1000, 0).UTC().String()
} }
return strconv.FormatInt(timestamp, 10) return strconv.FormatInt(timestamp, 10)
} }
func getFormatedBytes(bytes int64, humanReadable bool) string { func getFormattedBytes(bytes int64, humanReadable bool) string {
if humanReadable { if humanReadable {
return units.Base2Bytes(bytes).String() return units.Base2Bytes(bytes).String()
} }

View file

@ -98,7 +98,7 @@ func (m *SDMock) SetupHandlers() {
if len(query) == 2 { if len(query) == 2 {
h := sha1.New() h := sha1.New()
h.Write([]byte(query[1])) h.Write([]byte(query[1]))
// Avoing long filenames for Windows. // Avoiding long filenames for Windows.
f += "__" + base64.URLEncoding.EncodeToString(h.Sum(nil))[:10] f += "__" + base64.URLEncoding.EncodeToString(h.Sum(nil))[:10]
} }
} }

View file

@ -41,10 +41,10 @@ const (
uyuniMetaLabelPrefix = model.MetaLabelPrefix + "uyuni_" uyuniMetaLabelPrefix = model.MetaLabelPrefix + "uyuni_"
uyuniLabelMinionHostname = uyuniMetaLabelPrefix + "minion_hostname" uyuniLabelMinionHostname = uyuniMetaLabelPrefix + "minion_hostname"
uyuniLabelPrimaryFQDN = uyuniMetaLabelPrefix + "primary_fqdn" uyuniLabelPrimaryFQDN = uyuniMetaLabelPrefix + "primary_fqdn"
uyuniLablelSystemID = uyuniMetaLabelPrefix + "system_id" uyuniLabelSystemID = uyuniMetaLabelPrefix + "system_id"
uyuniLablelGroups = uyuniMetaLabelPrefix + "groups" uyuniLabelGroups = uyuniMetaLabelPrefix + "groups"
uyuniLablelEndpointName = uyuniMetaLabelPrefix + "endpoint_name" uyuniLabelEndpointName = uyuniMetaLabelPrefix + "endpoint_name"
uyuniLablelExporter = uyuniMetaLabelPrefix + "exporter" uyuniLabelExporter = uyuniMetaLabelPrefix + "exporter"
uyuniLabelProxyModule = uyuniMetaLabelPrefix + "proxy_module" uyuniLabelProxyModule = uyuniMetaLabelPrefix + "proxy_module"
uyuniLabelMetricsPath = uyuniMetaLabelPrefix + "metrics_path" uyuniLabelMetricsPath = uyuniMetaLabelPrefix + "metrics_path"
uyuniLabelScheme = uyuniMetaLabelPrefix + "scheme" uyuniLabelScheme = uyuniMetaLabelPrefix + "scheme"
@ -270,10 +270,10 @@ func (d *Discovery) getEndpointLabels(
model.AddressLabel: model.LabelValue(addr), model.AddressLabel: model.LabelValue(addr),
uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname), uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname),
uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN), uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN),
uyuniLablelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)), uyuniLabelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)),
uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)), uyuniLabelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)),
uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName), uyuniLabelEndpointName: model.LabelValue(endpoint.EndpointName),
uyuniLablelExporter: model.LabelValue(endpoint.ExporterName), uyuniLabelExporter: model.LabelValue(endpoint.ExporterName),
uyuniLabelProxyModule: model.LabelValue(endpoint.Module), uyuniLabelProxyModule: model.LabelValue(endpoint.Module),
uyuniLabelMetricsPath: model.LabelValue(endpoint.Path), uyuniLabelMetricsPath: model.LabelValue(endpoint.Path),
uyuniLabelScheme: model.LabelValue(scheme), uyuniLabelScheme: model.LabelValue(scheme),

View file

@ -1407,7 +1407,7 @@ authorization:
# `credentials_file`. # `credentials_file`.
[ credentials: <secret> ] [ credentials: <secret> ]
# Sets the credentials to the credentials read from the configured file. # Sets the credentials to the credentials read from the configured file.
# It is mutuall exclusive with `credentials`. # It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ] [ credentials_file: <filename> ]
# Optional OAuth 2.0 configuration, currently not supported by AWS. # Optional OAuth 2.0 configuration, currently not supported by AWS.
@ -2627,7 +2627,7 @@ authorization:
# `credentials_file`. # `credentials_file`.
[ credentials: <secret> ] [ credentials: <secret> ]
# Sets the credentials to the credentials read from the configured file. # Sets the credentials to the credentials read from the configured file.
# It is mutuall exclusive with `credentials`. # It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ] [ credentials_file: <filename> ]
# Optional OAuth 2.0 configuration, currently not supported by AWS. # Optional OAuth 2.0 configuration, currently not supported by AWS.
@ -3988,7 +3988,7 @@ azuread:
# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or azuread. # Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or azuread.
# To use the default credentials from the Google Cloud SDK, use `google_iam: {}`. # To use the default credentials from the Google Cloud SDK, use `google_iam: {}`.
google_iam: google_iam:
# Service account key with monitoring write permessions. # Service account key with monitoring write permissions.
credentials_file: <file_name> credentials_file: <file_name>
# Configures the remote write request's TLS settings. # Configures the remote write request's TLS settings.

View file

@ -144,7 +144,7 @@ a buffer, ensuring that older entries will be removed before the allocated stora
for Prometheus becomes full. for Prometheus becomes full.
At present, we recommend setting the retention size to, at most, 80-85% of your At present, we recommend setting the retention size to, at most, 80-85% of your
allocated Prometheus disk space. This increases the likelihood that older entires allocated Prometheus disk space. This increases the likelihood that older entries
will be removed prior to hitting any disk limitations. will be removed prior to hitting any disk limitations.
## Remote storage integrations ## Remote storage integrations

View file

@ -1,4 +1,4 @@
# An example scrape configuration for running Prometheus with Ovhcloud. # An example scrape configuration for running Prometheus with OVHcloud.
scrape_configs: scrape_configs:
- job_name: 'ovhcloud' - job_name: 'ovhcloud'
ovhcloud_sd_configs: ovhcloud_sd_configs:

View file

@ -342,7 +342,7 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) {
default: default:
// All other cases shouldn't actually happen. // All other cases shouldn't actually happen.
// They are a direct collision of CounterReset and NotCounterReset. // They are a direct collision of CounterReset and NotCounterReset.
// Conservatively set the CounterResetHint to "unknown" and isse a warning. // Conservatively set the CounterResetHint to "unknown" and issue a warning.
h.CounterResetHint = UnknownCounterReset h.CounterResetHint = UnknownCounterReset
// TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place
} }
@ -658,7 +658,7 @@ func detectReset(currIt, prevIt *floatBucketIterator) bool {
if !currIt.Next() { if !currIt.Next() {
// Reached end of currIt early, therefore // Reached end of currIt early, therefore
// previous histogram has a bucket that the // previous histogram has a bucket that the
// current one does not have. Unlass all // current one does not have. Unless all
// remaining buckets in the previous histogram // remaining buckets in the previous histogram
// are unpopulated, this is a reset. // are unpopulated, this is a reset.
for { for {
@ -891,7 +891,7 @@ func (h *FloatHistogram) trimBucketsInZeroBucket() {
// reconcileZeroBuckets finds a zero bucket large enough to include the zero // reconcileZeroBuckets finds a zero bucket large enough to include the zero
// buckets of both histograms (the receiving histogram and the other histogram) // buckets of both histograms (the receiving histogram and the other histogram)
// with a zero threshold that is not within a populated bucket in either // with a zero threshold that is not within a populated bucket in either
// histogram. This method modifies the receiving histogram accourdingly, but // histogram. This method modifies the receiving histogram accordingly, but
// leaves the other histogram as is. Instead, it returns the zero count the // leaves the other histogram as is. Instead, it returns the zero count the
// other histogram would have if it were modified. // other histogram would have if it were modified.
func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 {

View file

@ -140,7 +140,7 @@ testmetric{label="\"bar\""} 1`
v: 8.3835e-05, v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
}, { }, {
// NOTE: Unlike OpenMetrics, Promparse allows spaces between label terms. This appears to be unintended and should probably be fixed. // NOTE: Unlike OpenMetrics, PromParser allows spaces between label terms. This appears to be unintended and should probably be fixed.
m: `go_gc_duration_seconds { quantile = "2.0" a = "b" }`, m: `go_gc_duration_seconds { quantile = "2.0" a = "b" }`,
v: 8.3835e-05, v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "2.0", "a", "b"), lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "2.0", "a", "b"),

View file

@ -604,7 +604,7 @@ func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) {
return totalLength, mf.Unmarshal(b[varIntLength:totalLength]) return totalLength, mf.Unmarshal(b[varIntLength:totalLength])
} }
// formatOpenMetricsFloat works like the usual Go string formatting of a fleat // formatOpenMetricsFloat works like the usual Go string formatting of a float
// but appends ".0" if the resulting number would otherwise contain neither a // but appends ".0" if the resulting number would otherwise contain neither a
// "." nor an "e". // "." nor an "e".
func formatOpenMetricsFloat(f float64) string { func formatOpenMetricsFloat(f float64) string {

View file

@ -743,7 +743,7 @@ func TestHangingNotifier(t *testing.T) {
// Initialize the discovery manager // Initialize the discovery manager
// This is relevant as the updates aren't sent continually in real life, but only each updatert. // This is relevant as the updates aren't sent continually in real life, but only each updatert.
// The old implementation of TestHangingNotifier didn't take that into acount. // The old implementation of TestHangingNotifier didn't take that into account.
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()

View file

@ -1467,7 +1467,7 @@ load 10s
}, },
{ {
// Nested subquery. // Nested subquery.
// Now the outmost subquery produces more samples than inner most rate. // Now the outermost subquery produces more samples than inner most rate.
Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[17s:1s] @ 2000`, Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[17s:1s] @ 2000`,
MaxSamples: 36, MaxSamples: 36,
Start: time.Unix(10, 0), Start: time.Unix(10, 0),

View file

@ -31,7 +31,7 @@ import (
) )
type ActiveQueryTracker struct { type ActiveQueryTracker struct {
mmapedFile []byte mmappedFile []byte
getNextIndex chan int getNextIndex chan int
logger log.Logger logger log.Logger
closer io.Closer closer io.Closer
@ -87,24 +87,24 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) {
} }
} }
type mmapedFile struct { type mmappedFile struct {
f io.Closer f io.Closer
m mmap.MMap m mmap.MMap
} }
func (f *mmapedFile) Close() error { func (f *mmappedFile) Close() error {
err := f.m.Unmap() err := f.m.Unmap()
if err != nil { if err != nil {
err = fmt.Errorf("mmapedFile: unmapping: %w", err) err = fmt.Errorf("mmappedFile: unmapping: %w", err)
} }
if fErr := f.f.Close(); fErr != nil { if fErr := f.f.Close(); fErr != nil {
return errors.Join(fmt.Errorf("close mmapedFile.f: %w", fErr), err) return errors.Join(fmt.Errorf("close mmappedFile.f: %w", fErr), err)
} }
return err return err
} }
func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) { func getMMappedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) {
file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666) file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666)
if err != nil { if err != nil {
absPath, pathErr := filepath.Abs(filename) absPath, pathErr := filepath.Abs(filename)
@ -129,7 +129,7 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io
return nil, nil, err return nil, nil, err
} }
return fileAsBytes, &mmapedFile{f: file, m: fileAsBytes}, err return fileAsBytes, &mmappedFile{f: file, m: fileAsBytes}, err
} }
func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker { func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker {
@ -141,14 +141,14 @@ func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger lo
filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxConcurrent*entrySize filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxConcurrent*entrySize
logUnfinishedQueries(filename, filesize, logger) logUnfinishedQueries(filename, filesize, logger)
fileAsBytes, closer, err := getMMapedFile(filename, filesize, logger) fileAsBytes, closer, err := getMMappedFile(filename, filesize, logger)
if err != nil { if err != nil {
panic("Unable to create mmap-ed active query log") panic("Unable to create mmap-ed active query log")
} }
copy(fileAsBytes, "[") copy(fileAsBytes, "[")
activeQueryTracker := ActiveQueryTracker{ activeQueryTracker := ActiveQueryTracker{
mmapedFile: fileAsBytes, mmappedFile: fileAsBytes,
closer: closer, closer: closer,
getNextIndex: make(chan int, maxConcurrent), getNextIndex: make(chan int, maxConcurrent),
logger: logger, logger: logger,
@ -206,14 +206,14 @@ func (tracker ActiveQueryTracker) GetMaxConcurrent() int {
} }
func (tracker ActiveQueryTracker) Delete(insertIndex int) { func (tracker ActiveQueryTracker) Delete(insertIndex int) {
copy(tracker.mmapedFile[insertIndex:], strings.Repeat("\x00", entrySize)) copy(tracker.mmappedFile[insertIndex:], strings.Repeat("\x00", entrySize))
tracker.getNextIndex <- insertIndex tracker.getNextIndex <- insertIndex
} }
func (tracker ActiveQueryTracker) Insert(ctx context.Context, query string) (int, error) { func (tracker ActiveQueryTracker) Insert(ctx context.Context, query string) (int, error) {
select { select {
case i := <-tracker.getNextIndex: case i := <-tracker.getNextIndex:
fileBytes := tracker.mmapedFile fileBytes := tracker.mmappedFile
entry := newJSONEntry(query, tracker.logger) entry := newJSONEntry(query, tracker.logger)
start, end := i, i+entrySize start, end := i, i+entrySize

View file

@ -26,7 +26,7 @@ import (
func TestQueryLogging(t *testing.T) { func TestQueryLogging(t *testing.T) {
fileAsBytes := make([]byte, 4096) fileAsBytes := make([]byte, 4096)
queryLogger := ActiveQueryTracker{ queryLogger := ActiveQueryTracker{
mmapedFile: fileAsBytes, mmappedFile: fileAsBytes,
logger: nil, logger: nil,
getNextIndex: make(chan int, 4), getNextIndex: make(chan int, 4),
} }
@ -70,7 +70,7 @@ func TestQueryLogging(t *testing.T) {
func TestIndexReuse(t *testing.T) { func TestIndexReuse(t *testing.T) {
queryBytes := make([]byte, 1+3*entrySize) queryBytes := make([]byte, 1+3*entrySize)
queryLogger := ActiveQueryTracker{ queryLogger := ActiveQueryTracker{
mmapedFile: queryBytes, mmappedFile: queryBytes,
logger: nil, logger: nil,
getNextIndex: make(chan int, 3), getNextIndex: make(chan int, 3),
} }
@ -106,10 +106,10 @@ func TestIndexReuse(t *testing.T) {
func TestMMapFile(t *testing.T) { func TestMMapFile(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
fpath := filepath.Join(dir, "mmapedFile") fpath := filepath.Join(dir, "mmappedFile")
const data = "ab" const data = "ab"
fileAsBytes, closer, err := getMMapedFile(fpath, 2, nil) fileAsBytes, closer, err := getMMappedFile(fpath, 2, nil)
require.NoError(t, err) require.NoError(t, err)
copy(fileAsBytes, data) copy(fileAsBytes, data)
require.NoError(t, closer.Close()) require.NoError(t, closer.Close())

View file

@ -526,7 +526,7 @@ func (ssi *storageSeriesIterator) Next() chunkenc.ValueType {
ssi.currH = p.H ssi.currH = p.H
return chunkenc.ValFloatHistogram return chunkenc.ValFloatHistogram
default: default:
panic("storageSeriesIterater.Next failed to pick value type") panic("storageSeriesIterator.Next failed to pick value type")
} }
} }

View file

@ -188,7 +188,7 @@ func matchesMatcherSets(matcherSets [][]*labels.Matcher, lbls labels.Labels) boo
return ok return ok
} }
// Queryable returns the group's querable. // Queryable returns the group's queryable.
func (g *Group) Queryable() storage.Queryable { return g.opts.Queryable } func (g *Group) Queryable() storage.Queryable { return g.opts.Queryable }
// Context returns the group's context. // Context returns the group's context.

View file

@ -1186,7 +1186,7 @@ scrape_configs:
} }
// TestOnlyStaleTargetsAreDropped makes sure that when a job has multiple providers, when one of them should no // TestOnlyStaleTargetsAreDropped makes sure that when a job has multiple providers, when one of them should no
// longer discover targets, only the stale targets of that provier are dropped. // longer discover targets, only the stale targets of that provider are dropped.
func TestOnlyStaleTargetsAreDropped(t *testing.T) { func TestOnlyStaleTargetsAreDropped(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()

View file

@ -241,9 +241,9 @@ type sampleRing struct {
delta int64 delta int64
// Lookback buffers. We use iBuf for mixed samples, but one of the three // Lookback buffers. We use iBuf for mixed samples, but one of the three
// concrete ones for homogenous samples. (Only one of the four bufs is // concrete ones for homogeneous samples. (Only one of the four bufs is
// allowed to be populated!) This avoids the overhead of the interface // allowed to be populated!) This avoids the overhead of the interface
// wrapper for the happy (and by far most common) case of homogenous // wrapper for the happy (and by far most common) case of homogeneous
// samples. // samples.
iBuf []chunks.Sample iBuf []chunks.Sample
fBuf []fSample fBuf []fSample
@ -268,7 +268,7 @@ const (
fhBuf fhBuf
) )
// newSampleRing creates a new sampleRing. If you do not know the prefereed // newSampleRing creates a new sampleRing. If you do not know the preferred
// value type yet, use a size of 0 (in which case the provided typ doesn't // value type yet, use a size of 0 (in which case the provided typ doesn't
// matter). On the first add, a buffer of size 16 will be allocated with the // matter). On the first add, a buffer of size 16 will be allocated with the
// preferred type being the type of the first added sample. // preferred type being the type of the first added sample.

View file

@ -68,7 +68,7 @@ func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() {
cases := []struct { cases := []struct {
cfg *AzureADConfig cfg *AzureADConfig
}{ }{
// AzureAd roundtripper with Managedidentity. // AzureAd roundtripper with ManagedIdentity.
{ {
cfg: &AzureADConfig{ cfg: &AzureADConfig{
Cloud: "AzurePublic", Cloud: "AzurePublic",

View file

@ -171,7 +171,7 @@ func TestConvertBucketsLayout(t *testing.T) {
}, },
// Downscale: // Downscale:
// 4+2+0+2, 0+0+0+0, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 0, 1 // 4+2+0+2, 0+0+0+0, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 0, 1
// Check from sclaing from previous: 6+2, 0+0, 0+0, 0+0, 1+0 = 8, 0, 0, 0, 1 // Check from scaling from previous: 6+2, 0+0, 0+0, 0+0, 1+0 = 8, 0, 0, 0, 1
wantDeltas: []int64{8, -7}, wantDeltas: []int64{8, -7},
}, },
}, },
@ -222,7 +222,7 @@ func TestConvertBucketsLayout(t *testing.T) {
}, },
// Downscale: // Downscale:
// 4+2+0+2, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 1 // 4+2+0+2, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 1
// Check from sclaing from previous: 6+2, 0+0, 0+0, 1+0 = 8, 0, 0, 1 // Check from scaling from previous: 6+2, 0+0, 0+0, 1+0 = 8, 0, 0, 1
wantDeltas: []int64{8, -8, 0, 1}, wantDeltas: []int64{8, -8, 0, 1},
}, },
}, },

View file

@ -2027,7 +2027,7 @@ func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt
// make the problem worse, particularly if we're getting rate limited. // make the problem worse, particularly if we're getting rate limited.
// //
// reshardDisableTimestamp holds the unix timestamp until which resharding // reshardDisableTimestamp holds the unix timestamp until which resharding
// is diableld. We'll update that timestamp if the period we were just told // is disabled. We'll update that timestamp if the period we were just told
// to sleep for is newer than the existing disabled timestamp. // to sleep for is newer than the existing disabled timestamp.
reshardWaitPeriod := time.Now().Add(time.Duration(sleepDuration) * 2) reshardWaitPeriod := time.Now().Add(time.Duration(sleepDuration) * 2)
if oldTS, updated := setAtomicToNewer(&t.reshardDisableEndTimestamp, reshardWaitPeriod.Unix()); updated { if oldTS, updated := setAtomicToNewer(&t.reshardDisableEndTimestamp, reshardWaitPeriod.Unix()); updated {

View file

@ -351,7 +351,7 @@ func TestMetadataDelivery(t *testing.T) {
require.Equal(t, 0.0, client_testutil.ToFloat64(m.metrics.failedMetadataTotal)) require.Equal(t, 0.0, client_testutil.ToFloat64(m.metrics.failedMetadataTotal))
require.Len(t, c.receivedMetadata, numMetadata) require.Len(t, c.receivedMetadata, numMetadata)
// One more write than the rounded qoutient should be performed in order to get samples that didn't // One more write than the rounded quotient should be performed in order to get samples that didn't
// fit into MaxSamplesPerSend. // fit into MaxSamplesPerSend.
require.Equal(t, numMetadata/config.DefaultMetadataConfig.MaxSamplesPerSend+1, c.writesReceived) require.Equal(t, numMetadata/config.DefaultMetadataConfig.MaxSamplesPerSend+1, c.writesReceived)
// Make sure the last samples were sent. // Make sure the last samples were sent.

View file

@ -308,7 +308,7 @@ func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels,
func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
// TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write. // TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write.
// UpadteMetadata is no-op for remote write (where timestampTracker is being used) for now. // UpdateMetadata is no-op for remote write (where timestampTracker is being used) for now.
return 0, nil return 0, nil
} }

View file

@ -335,7 +335,7 @@ func validateOptions(opts *Options) *Options {
opts.WALCompression = wlog.CompressionNone opts.WALCompression = wlog.CompressionNone
} }
// Revert Stripesize to DefaultStripsize if Stripsize is either 0 or not a power of 2. // Revert StripeSize to DefaultStripeSize if StripeSize is either 0 or not a power of 2.
if opts.StripeSize <= 0 || ((opts.StripeSize & (opts.StripeSize - 1)) != 0) { if opts.StripeSize <= 0 || ((opts.StripeSize & (opts.StripeSize - 1)) != 0) {
opts.StripeSize = tsdb.DefaultStripeSize opts.StripeSize = tsdb.DefaultStripeSize
} }
@ -395,7 +395,7 @@ func (db *DB) replayWAL() error {
return fmt.Errorf("finding WAL segments: %w", err) return fmt.Errorf("finding WAL segments: %w", err)
} }
// Backfil segments from the most recent checkpoint onwards. // Backfill segments from the most recent checkpoint onwards.
for i := startFrom; i <= last; i++ { for i := startFrom; i <= last; i++ {
seg, err := wlog.OpenReadSegment(wlog.SegmentName(db.wal.Dir(), i)) seg, err := wlog.OpenReadSegment(wlog.SegmentName(db.wal.Dir(), i))
if err != nil { if err != nil {

View file

@ -14,7 +14,7 @@
// The code in this file was largely written by Damian Gryski as part of // The code in this file was largely written by Damian Gryski as part of
// https://github.com/dgryski/go-tsz and published under the license below. // https://github.com/dgryski/go-tsz and published under the license below.
// It was modified to accommodate reading from byte slices without modifying // It was modified to accommodate reading from byte slices without modifying
// the underlying bytes, which would panic when reading from mmap'd // the underlying bytes, which would panic when reading from mmapped
// read-only byte slices. // read-only byte slices.
package chunkenc package chunkenc

View file

@ -14,7 +14,7 @@
// The code in this file was largely written by Damian Gryski as part of // The code in this file was largely written by Damian Gryski as part of
// https://github.com/dgryski/go-tsz and published under the license below. // https://github.com/dgryski/go-tsz and published under the license below.
// It was modified to accommodate reading from byte slices without modifying // It was modified to accommodate reading from byte slices without modifying
// the underlying bytes, which would panic when reading from mmap'd // the underlying bytes, which would panic when reading from mmapped
// read-only byte slices. // read-only byte slices.
// Copyright (c) 2015,2016 Damian Gryski <damian@gryski.com> // Copyright (c) 2015,2016 Damian Gryski <damian@gryski.com>

View file

@ -24,7 +24,7 @@ import (
) )
const ( const (
// Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkrefMap to shrink it again. // Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkRefMap to shrink it again.
chunkRefMapShrinkThreshold = 1000 chunkRefMapShrinkThreshold = 1000
// Minimum interval between shrinking of chunkWriteQueue.chunkRefMap. // Minimum interval between shrinking of chunkWriteQueue.chunkRefMap.

View file

@ -1371,7 +1371,7 @@ func TestCancelCompactions(t *testing.T) {
} }
// TestDeleteCompactionBlockAfterFailedReload ensures that a failed reloadBlocks immediately after a compaction // TestDeleteCompactionBlockAfterFailedReload ensures that a failed reloadBlocks immediately after a compaction
// deletes the resulting block to avoid creatings blocks with the same time range. // deletes the resulting block to avoid creating blocks with the same time range.
func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) { func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
tests := map[string]func(*DB) int{ tests := map[string]func(*DB) int{
"Test Head Compaction": func(db *DB) int { "Test Head Compaction": func(db *DB) int {
@ -2114,7 +2114,7 @@ func TestDelayedCompactionDoesNotBlockUnrelatedOps(t *testing.T) {
t.Parallel() t.Parallel()
tmpdir := t.TempDir() tmpdir := t.TempDir()
// Some blocks that need compation are present. // Some blocks that need compaction are present.
createBlock(t, tmpdir, genSeries(1, 1, 0, 100)) createBlock(t, tmpdir, genSeries(1, 1, 0, 100))
createBlock(t, tmpdir, genSeries(1, 1, 100, 200)) createBlock(t, tmpdir, genSeries(1, 1, 100, 200))
createBlock(t, tmpdir, genSeries(1, 1, 200, 300)) createBlock(t, tmpdir, genSeries(1, 1, 200, 300))

View file

@ -245,8 +245,8 @@ func TestDataAvailableOnlyAfterCommit(t *testing.T) {
func TestNoPanicAfterWALCorruption(t *testing.T) { func TestNoPanicAfterWALCorruption(t *testing.T) {
db := openTestDB(t, &Options{WALSegmentSize: 32 * 1024}, nil) db := openTestDB(t, &Options{WALSegmentSize: 32 * 1024}, nil)
// Append until the first mmaped head chunk. // Append until the first mmapped head chunk.
// This is to ensure that all samples can be read from the mmaped chunks when the WAL is corrupted. // This is to ensure that all samples can be read from the mmapped chunks when the WAL is corrupted.
var expSamples []chunks.Sample var expSamples []chunks.Sample
var maxt int64 var maxt int64
ctx := context.Background() ctx := context.Background()
@ -265,7 +265,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) {
// Corrupt the WAL after the first sample of the series so that it has at least one sample and // Corrupt the WAL after the first sample of the series so that it has at least one sample and
// it is not garbage collected. // it is not garbage collected.
// The repair deletes all WAL records after the corrupted record and these are read from the mmaped chunk. // The repair deletes all WAL records after the corrupted record and these are read from the mmapped chunk.
{ {
walFiles, err := os.ReadDir(path.Join(db.Dir(), "wal")) walFiles, err := os.ReadDir(path.Join(db.Dir(), "wal"))
require.NoError(t, err) require.NoError(t, err)
@ -2650,7 +2650,7 @@ func TestDBReadOnly_Querier_NoAlteration(t *testing.T) {
spinUpQuerierAndCheck := func(dir, sandboxDir string, chunksCount int) { spinUpQuerierAndCheck := func(dir, sandboxDir string, chunksCount int) {
dBDirHash := dirHash(dir) dBDirHash := dirHash(dir)
// Bootsrap a RO db from the same dir and set up a querier. // Bootstrap a RO db from the same dir and set up a querier.
dbReadOnly, err := OpenDBReadOnly(dir, sandboxDir, nil) dbReadOnly, err := OpenDBReadOnly(dir, sandboxDir, nil)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, chunksCount, countChunks(dir)) require.Equal(t, chunksCount, countChunks(dir))
@ -2669,7 +2669,7 @@ func TestDBReadOnly_Querier_NoAlteration(t *testing.T) {
require.NoError(t, db.Close()) require.NoError(t, db.Close())
}() }()
// Append until the first mmaped head chunk. // Append until the first mmapped head chunk.
for i := 0; i < 121; i++ { for i := 0; i < 121; i++ {
app := db.Appender(context.Background()) app := db.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("foo", "bar"), int64(i), 0) _, err := app.Append(0, labels.FromStrings("foo", "bar"), int64(i), 0)
@ -5156,7 +5156,7 @@ func Test_Querier_OOOQuery(t *testing.T) {
}, },
}, },
{ {
name: "query inorder contain ooo mmaped samples returns all ingested samples at the beginning of the interval", name: "query inorder contain ooo mmapped samples returns all ingested samples at the beginning of the interval",
oooCap: 5, oooCap: 5,
queryMinT: minutes(0), queryMinT: minutes(0),
queryMaxT: minutes(200), queryMaxT: minutes(200),
@ -5169,7 +5169,7 @@ func Test_Querier_OOOQuery(t *testing.T) {
}, },
{ {
minT: minutes(101), minT: minutes(101),
maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmmaped OOO chunk and fit inside the first in-order mmaped chunk. maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmapped OOO chunk and fit inside the first in-order mmapped chunk.
filter: func(t int64) bool { return t%2 == 1 }, filter: func(t int64) bool { return t%2 == 1 },
isOOO: true, isOOO: true,
}, },
@ -5182,7 +5182,7 @@ func Test_Querier_OOOQuery(t *testing.T) {
}, },
}, },
{ {
name: "query overlapping inorder and ooo mmaped samples returns all ingested samples at the beginning of the interval", name: "query overlapping inorder and ooo mmapped samples returns all ingested samples at the beginning of the interval",
oooCap: 30, oooCap: 30,
queryMinT: minutes(0), queryMinT: minutes(0),
queryMaxT: minutes(200), queryMaxT: minutes(200),
@ -5195,7 +5195,7 @@ func Test_Querier_OOOQuery(t *testing.T) {
}, },
{ {
minT: minutes(101), minT: minutes(101),
maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmmaped OOO chunk and overlap the first in-order mmaped chunk. maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmapped OOO chunk and overlap the first in-order mmapped chunk.
filter: func(t int64) bool { return t%2 == 1 }, filter: func(t int64) bool { return t%2 == 1 },
isOOO: true, isOOO: true,
}, },
@ -5367,7 +5367,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) {
}, },
}, },
{ {
name: "query inorder contain ooo mmaped samples returns all ingested samples at the beginning of the interval", name: "query inorder contain ooo mmapped samples returns all ingested samples at the beginning of the interval",
oooCap: 5, oooCap: 5,
queryMinT: minutes(0), queryMinT: minutes(0),
queryMaxT: minutes(200), queryMaxT: minutes(200),
@ -5380,7 +5380,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) {
}, },
{ {
minT: minutes(101), minT: minutes(101),
maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmmaped OOO chunk and fit inside the first in-order mmaped chunk. maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmapped OOO chunk and fit inside the first in-order mmapped chunk.
filter: func(t int64) bool { return t%2 == 1 }, filter: func(t int64) bool { return t%2 == 1 },
isOOO: true, isOOO: true,
}, },
@ -5393,7 +5393,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) {
}, },
}, },
{ {
name: "query overlapping inorder and ooo mmaped samples returns all ingested samples at the beginning of the interval", name: "query overlapping inorder and ooo mmapped samples returns all ingested samples at the beginning of the interval",
oooCap: 30, oooCap: 30,
queryMinT: minutes(0), queryMinT: minutes(0),
queryMaxT: minutes(200), queryMaxT: minutes(200),
@ -5406,7 +5406,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) {
}, },
{ {
minT: minutes(101), minT: minutes(101),
maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmmaped OOO chunk and overlap the first in-order mmaped chunk. maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmapped OOO chunk and overlap the first in-order mmapped chunk.
filter: func(t int64) bool { return t%2 == 1 }, filter: func(t int64) bool { return t%2 == 1 },
isOOO: true, isOOO: true,
}, },
@ -5555,7 +5555,7 @@ func testOOOAppendAndQuery(t *testing.T, scenario sampleTypeScenario) {
addSample(s2, 255, 265, false) addSample(s2, 255, 265, false)
verifyOOOMinMaxTimes(250, 265) verifyOOOMinMaxTimes(250, 265)
testQuery(math.MinInt64, math.MaxInt64) testQuery(math.MinInt64, math.MaxInt64)
testQuery(minutes(250), minutes(265)) // Test querying ono data time range testQuery(minutes(250), minutes(265)) // Test querying ooo data time range
testQuery(minutes(290), minutes(300)) // Test querying in-order data time range testQuery(minutes(290), minutes(300)) // Test querying in-order data time range
testQuery(minutes(250), minutes(300)) // Test querying the entire range testQuery(minutes(250), minutes(300)) // Test querying the entire range
@ -7468,7 +7468,7 @@ func TestAbortBlockCompactions(t *testing.T) {
defer func() { defer func() {
require.NoError(t, db.Close()) require.NoError(t, db.Close())
}() }()
// It should NOT be compactible at the beginning of the test // It should NOT be compactable at the beginning of the test
require.False(t, db.head.compactable(), "head should NOT be compactable") require.False(t, db.head.compactable(), "head should NOT be compactable")
// Track the number of compactions run inside db.compactBlocks() // Track the number of compactions run inside db.compactBlocks()
@ -7478,7 +7478,7 @@ func TestAbortBlockCompactions(t *testing.T) {
db.compactor = &mockCompactorFn{ db.compactor = &mockCompactorFn{
planFn: func() ([]string, error) { planFn: func() ([]string, error) {
// On every Plan() run increment compactions. After 4 compactions // On every Plan() run increment compactions. After 4 compactions
// update HEAD to make it compactible to force an exit from db.compactBlocks() loop. // update HEAD to make it compactable to force an exit from db.compactBlocks() loop.
compactions++ compactions++
if compactions > 3 { if compactions > 3 {
chunkRange := db.head.chunkRange.Load() chunkRange := db.head.chunkRange.Load()

View file

@ -29,7 +29,7 @@ import (
) )
const ( const (
// Indicates that there is no index entry for an exmplar. // Indicates that there is no index entry for an exemplar.
noExemplar = -1 noExemplar = -1
// Estimated number of exemplars per series, for sizing the index. // Estimated number of exemplars per series, for sizing the index.
estimatedExemplarsPerSeries = 16 estimatedExemplarsPerSeries = 16

View file

@ -2090,7 +2090,7 @@ type memSeries struct {
// before compaction: mmappedChunks=[p5,p6,p7,p8,p9] firstChunkID=5 // before compaction: mmappedChunks=[p5,p6,p7,p8,p9] firstChunkID=5
// after compaction: mmappedChunks=[p7,p8,p9] firstChunkID=7 // after compaction: mmappedChunks=[p7,p8,p9] firstChunkID=7
// //
// pN is the pointer to the mmappedChunk referered to by HeadChunkID=N // pN is the pointer to the mmappedChunk referred to by HeadChunkID=N
mmappedChunks []*mmappedChunk mmappedChunks []*mmappedChunk
// Most recent chunks in memory that are still being built or waiting to be mmapped. // Most recent chunks in memory that are still being built or waiting to be mmapped.
// This is a linked list, headChunks points to the most recent chunk, headChunks.next points // This is a linked list, headChunks points to the most recent chunk, headChunks.next points

View file

@ -1060,7 +1060,7 @@ func TestMemSeries_truncateChunks_scenarios(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
headChunks int // the number of head chubks to create on memSeries by appending enough samples headChunks int // the number of head chunks to create on memSeries by appending enough samples
mmappedChunks int // the number of mmapped chunks to create on memSeries by appending enough samples mmappedChunks int // the number of mmapped chunks to create on memSeries by appending enough samples
truncateBefore int64 // the mint to pass to truncateChunksBefore() truncateBefore int64 // the mint to pass to truncateChunksBefore()
expectedTruncated int // the number of chunks that we're expecting be truncated and returned by truncateChunksBefore() expectedTruncated int // the number of chunks that we're expecting be truncated and returned by truncateChunksBefore()

View file

@ -3235,7 +3235,7 @@ func (m mockMatcherIndex) LabelValueFor(context.Context, storage.SeriesRef, stri
} }
func (m mockMatcherIndex) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) { func (m mockMatcherIndex) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) {
return nil, errors.New("label names for for called") return nil, errors.New("label names for called")
} }
func (m mockMatcherIndex) Postings(context.Context, string, ...string) (index.Postings, error) { func (m mockMatcherIndex) Postings(context.Context, string, ...string) (index.Postings, error) {

View file

@ -166,7 +166,7 @@ func TestRecord_EncodeDecode(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, floatHistograms, decFloatHistograms) require.Equal(t, floatHistograms, decFloatHistograms)
// Gauge ingeger histograms. // Gauge integer histograms.
for i := range histograms { for i := range histograms {
histograms[i].H.CounterResetHint = histogram.GaugeType histograms[i].H.CounterResetHint = histogram.GaugeType
} }

View file

@ -113,7 +113,7 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me
toTimeseries(wr, labels, timestamp, m.GetCounter().GetValue()) toTimeseries(wr, labels, timestamp, m.GetCounter().GetValue())
case m.Summary != nil: case m.Summary != nil:
metricName := labels[model.MetricNameLabel] metricName := labels[model.MetricNameLabel]
// Preserve metric name order with first quantile labels timeseries then sum suffix timeserie and finally count suffix timeserie // Preserve metric name order with first quantile labels timeseries then sum suffix timeseries and finally count suffix timeseries
// Add Summary quantile timeseries // Add Summary quantile timeseries
quantileLabels := make(map[string]string, len(labels)+1) quantileLabels := make(map[string]string, len(labels)+1)
for key, value := range labels { for key, value := range labels {
@ -125,16 +125,16 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me
toTimeseries(wr, quantileLabels, timestamp, q.GetValue()) toTimeseries(wr, quantileLabels, timestamp, q.GetValue())
} }
// Overwrite label model.MetricNameLabel for count and sum metrics // Overwrite label model.MetricNameLabel for count and sum metrics
// Add Summary sum timeserie // Add Summary sum timeseries
labels[model.MetricNameLabel] = metricName + sumStr labels[model.MetricNameLabel] = metricName + sumStr
toTimeseries(wr, labels, timestamp, m.GetSummary().GetSampleSum()) toTimeseries(wr, labels, timestamp, m.GetSummary().GetSampleSum())
// Add Summary count timeserie // Add Summary count timeseries
labels[model.MetricNameLabel] = metricName + countStr labels[model.MetricNameLabel] = metricName + countStr
toTimeseries(wr, labels, timestamp, float64(m.GetSummary().GetSampleCount())) toTimeseries(wr, labels, timestamp, float64(m.GetSummary().GetSampleCount()))
case m.Histogram != nil: case m.Histogram != nil:
metricName := labels[model.MetricNameLabel] metricName := labels[model.MetricNameLabel]
// Preserve metric name order with first bucket suffix timeseries then sum suffix timeserie and finally count suffix timeserie // Preserve metric name order with first bucket suffix timeseries then sum suffix timeseries and finally count suffix timeseries
// Add Histogram bucket timeseries // Add Histogram bucket timeseries
bucketLabels := make(map[string]string, len(labels)+1) bucketLabels := make(map[string]string, len(labels)+1)
for key, value := range labels { for key, value := range labels {
@ -146,10 +146,10 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me
toTimeseries(wr, bucketLabels, timestamp, float64(b.GetCumulativeCount())) toTimeseries(wr, bucketLabels, timestamp, float64(b.GetCumulativeCount()))
} }
// Overwrite label model.MetricNameLabel for count and sum metrics // Overwrite label model.MetricNameLabel for count and sum metrics
// Add Histogram sum timeserie // Add Histogram sum timeseries
labels[model.MetricNameLabel] = metricName + sumStr labels[model.MetricNameLabel] = metricName + sumStr
toTimeseries(wr, labels, timestamp, m.GetHistogram().GetSampleSum()) toTimeseries(wr, labels, timestamp, m.GetHistogram().GetSampleSum())
// Add Histogram count timeserie // Add Histogram count timeseries
labels[model.MetricNameLabel] = metricName + countStr labels[model.MetricNameLabel] = metricName + countStr
toTimeseries(wr, labels, timestamp, float64(m.GetHistogram().GetSampleCount())) toTimeseries(wr, labels, timestamp, float64(m.GetHistogram().GetSampleCount()))

View file

@ -23,7 +23,7 @@ import (
// syscall.RLIM_INFINITY is a constant. // syscall.RLIM_INFINITY is a constant.
// Its type is int on most architectures but there are exceptions such as loong64. // Its type is int on most architectures but there are exceptions such as loong64.
// Uniform it to uint accorind to the standard. // Uniform it to uint according to the standard.
// https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_resource.h.html // https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_resource.h.html
var unlimited uint64 = syscall.RLIM_INFINITY & math.MaxUint64 var unlimited uint64 = syscall.RLIM_INFINITY & math.MaxUint64

View file

@ -81,7 +81,7 @@ func TestPool(t *testing.T) {
t.Run("does not allocate", func(t *testing.T) { t.Run("does not allocate", func(t *testing.T) {
pool := zeropool.New(func() []byte { return make([]byte, 1024) }) pool := zeropool.New(func() []byte { return make([]byte, 1024) })
// Warm up, this will alloate one slice. // Warm up, this will allocate one slice.
slice := pool.Get() slice := pool.Get()
pool.Put(slice) pool.Put(slice)

View file

@ -4034,13 +4034,13 @@ func TestGetGlobalURL(t *testing.T) {
false, false,
}, },
{ {
mustParseURL(t, "http://exemple.com"), mustParseURL(t, "http://example.com"),
GlobalURLOptions{ GlobalURLOptions{
ListenAddress: "127.0.0.1:9090", ListenAddress: "127.0.0.1:9090",
Host: "prometheus.io", Host: "prometheus.io",
Scheme: "https", Scheme: "https",
}, },
mustParseURL(t, "http://exemple.com"), mustParseURL(t, "http://example.com"),
false, false,
}, },
{ {

View file

@ -528,7 +528,7 @@ describe('promql operations', () => {
}, },
], ],
}, },
// test aggregration // test aggregation
{ {
expr: 'sum by (foo)(some_metric)', expr: 'sum by (foo)(some_metric)',
expectedValueType: ValueType.vector, expectedValueType: ValueType.vector,

View file

@ -136,7 +136,7 @@ class Panel extends Component<PanelProps, PanelState> {
this.abortInFlightFetch = () => abortController.abort(); this.abortInFlightFetch = () => abortController.abort();
this.setState({ loading: true }); this.setState({ loading: true });
const endTime = this.getEndTime().valueOf() / 1000; // TODO: shouldn't valueof only work when it's a moment? const endTime = this.getEndTime().valueOf() / 1000; // TODO: shouldn't valueOf only work when it's a moment?
const startTime = endTime - this.props.options.range / 1000; const startTime = endTime - this.props.options.range / 1000;
const resolution = this.props.options.resolution || Math.max(Math.floor(this.props.options.range / 250000), 1); const resolution = this.props.options.resolution || Math.max(Math.floor(this.props.options.range / 250000), 1);
const params: URLSearchParams = new URLSearchParams({ const params: URLSearchParams = new URLSearchParams({