From 50cd453c8fc5d7fd9270fc72dfa50367fc7a9c23 Mon Sep 17 00:00:00 2001 From: Nathan Baulch Date: Wed, 11 Sep 2024 06:32:03 +1000 Subject: [PATCH] chore: Fix typos (#14868) * Fix typos --------- Signed-off-by: Nathan Baulch --- CHANGELOG.md | 8 ++--- README.md | 2 +- cmd/promtool/tsdb.go | 10 +++--- discovery/moby/mock_test.go | 2 +- discovery/uyuni/uyuni.go | 16 +++++----- docs/configuration/configuration.md | 6 ++-- docs/storage.md | 2 +- .../examples/prometheus-ovhcloud.yml | 2 +- model/histogram/float_histogram.go | 6 ++-- model/textparse/promparse_test.go | 2 +- model/textparse/protobufparse.go | 2 +- notifier/notifier_test.go | 2 +- promql/engine_test.go | 2 +- promql/query_logger.go | 22 ++++++------- promql/query_logger_test.go | 8 ++--- promql/value.go | 2 +- rules/group.go | 2 +- scrape/manager_test.go | 2 +- storage/buffer.go | 6 ++-- storage/remote/azuread/azuread_test.go | 2 +- .../prometheusremotewrite/histograms_test.go | 4 +-- storage/remote/queue_manager.go | 2 +- storage/remote/queue_manager_test.go | 2 +- storage/remote/write.go | 2 +- tsdb/agent/db.go | 4 +-- tsdb/chunkenc/histogram_meta_test.go | 2 +- tsdb/chunkenc/xor.go | 2 +- tsdb/chunks/chunk_write_queue.go | 2 +- tsdb/compact_test.go | 4 +-- tsdb/db_test.go | 32 +++++++++---------- tsdb/exemplar.go | 2 +- tsdb/head.go | 2 +- tsdb/head_test.go | 2 +- tsdb/querier_test.go | 2 +- tsdb/record/record_test.go | 2 +- util/fmtutil/format.go | 12 +++---- util/runtime/limits_default.go | 2 +- util/zeropool/pool_test.go | 2 +- web/api/v1/api_test.go | 4 +-- .../src/parser/parser.test.ts | 2 +- web/ui/react-app/src/pages/graph/Panel.tsx | 2 +- 41 files changed, 98 insertions(+), 98 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f0daf72f..49aeeb50e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -140,7 +140,7 @@ This release changes the default for GOGC, the Go runtime control for the trade- * [ENHANCEMENT] TSDB: Pause regular block compactions if the head needs to be compacted (prioritize head as it increases memory consumption). #13754 * [ENHANCEMENT] Observability: Improved logging during signal handling termination. #13772 * [ENHANCEMENT] Observability: All log lines for drop series use "num_dropped" key consistently. #13823 -* [ENHANCEMENT] Observability: Log chunk snapshot and mmaped chunk replay duration during WAL replay. #13838 +* [ENHANCEMENT] Observability: Log chunk snapshot and mmapped chunk replay duration during WAL replay. #13838 * [ENHANCEMENT] Observability: Log if the block is being created from WBL during compaction. #13846 * [BUGFIX] PromQL: Fix inaccurate sample number statistic when querying histograms. #13667 * [BUGFIX] PromQL: Fix `histogram_stddev` and `histogram_stdvar` for cases where the histogram has negative buckets. #13852 @@ -677,7 +677,7 @@ The binaries published with this release are built with Go1.17.8 to avoid [CVE-2 ## 2.33.0 / 2022-01-29 -* [CHANGE] PromQL: Promote negative offset and `@` modifer to stable features. #10121 +* [CHANGE] PromQL: Promote negative offset and `@` modifier to stable features. #10121 * [CHANGE] Web: Promote remote-write-receiver to stable. #10119 * [FEATURE] Config: Add `stripPort` template function. #10002 * [FEATURE] Promtool: Add cardinality analysis to `check metrics`, enabled by flag `--extended`. #10045 @@ -914,7 +914,7 @@ This vulnerability has been reported by Aaron Devaney from MDSec. * [ENHANCEMENT] Templating: Enable parsing strings in `humanize` functions. #8682 * [BUGFIX] UI: Provide errors instead of blank page on TSDB Status Page. #8654 #8659 * [BUGFIX] TSDB: Do not panic when writing very large records to the WAL. #8790 -* [BUGFIX] TSDB: Avoid panic when mmaped memory is referenced after the file is closed. #8723 +* [BUGFIX] TSDB: Avoid panic when mmapped memory is referenced after the file is closed. #8723 * [BUGFIX] Scaleway Discovery: Fix nil pointer dereference. #8737 * [BUGFIX] Consul Discovery: Restart no longer required after config update with no targets. #8766 @@ -1840,7 +1840,7 @@ information, read the announcement blog post and migration guide. ## 1.7.0 / 2017-06-06 * [CHANGE] Compress remote storage requests and responses with unframed/raw snappy. -* [CHANGE] Properly ellide secrets in config. +* [CHANGE] Properly elide secrets in config. * [FEATURE] Add OpenStack service discovery. * [FEATURE] Add ability to limit Kubernetes service discovery to certain namespaces. * [FEATURE] Add metric for discovered number of Alertmanagers. diff --git a/README.md b/README.md index df974e109..7528147b0 100644 --- a/README.md +++ b/README.md @@ -115,7 +115,7 @@ The Makefile provides several targets: Prometheus is bundled with many service discovery plugins. When building Prometheus from source, you can edit the [plugins.yml](./plugins.yml) -file to disable some service discoveries. The file is a yaml-formated list of go +file to disable some service discoveries. The file is a yaml-formatted list of go import path that will be built into the Prometheus binary. After you have changed the file, you diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 971ea8ab0..e6323d294 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -367,25 +367,25 @@ func printBlocks(blocks []tsdb.BlockReader, writeHeader, humanReadable bool) { fmt.Fprintf(tw, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n", meta.ULID, - getFormatedTime(meta.MinTime, humanReadable), - getFormatedTime(meta.MaxTime, humanReadable), + getFormattedTime(meta.MinTime, humanReadable), + getFormattedTime(meta.MaxTime, humanReadable), time.Duration(meta.MaxTime-meta.MinTime)*time.Millisecond, meta.Stats.NumSamples, meta.Stats.NumChunks, meta.Stats.NumSeries, - getFormatedBytes(b.Size(), humanReadable), + getFormattedBytes(b.Size(), humanReadable), ) } } -func getFormatedTime(timestamp int64, humanReadable bool) string { +func getFormattedTime(timestamp int64, humanReadable bool) string { if humanReadable { return time.Unix(timestamp/1000, 0).UTC().String() } return strconv.FormatInt(timestamp, 10) } -func getFormatedBytes(bytes int64, humanReadable bool) string { +func getFormattedBytes(bytes int64, humanReadable bool) string { if humanReadable { return units.Base2Bytes(bytes).String() } diff --git a/discovery/moby/mock_test.go b/discovery/moby/mock_test.go index 3f35258c8..7ef5cb07c 100644 --- a/discovery/moby/mock_test.go +++ b/discovery/moby/mock_test.go @@ -98,7 +98,7 @@ func (m *SDMock) SetupHandlers() { if len(query) == 2 { h := sha1.New() h.Write([]byte(query[1])) - // Avoing long filenames for Windows. + // Avoiding long filenames for Windows. f += "__" + base64.URLEncoding.EncodeToString(h.Sum(nil))[:10] } } diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index c8af2f158..2ab339695 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -41,10 +41,10 @@ const ( uyuniMetaLabelPrefix = model.MetaLabelPrefix + "uyuni_" uyuniLabelMinionHostname = uyuniMetaLabelPrefix + "minion_hostname" uyuniLabelPrimaryFQDN = uyuniMetaLabelPrefix + "primary_fqdn" - uyuniLablelSystemID = uyuniMetaLabelPrefix + "system_id" - uyuniLablelGroups = uyuniMetaLabelPrefix + "groups" - uyuniLablelEndpointName = uyuniMetaLabelPrefix + "endpoint_name" - uyuniLablelExporter = uyuniMetaLabelPrefix + "exporter" + uyuniLabelSystemID = uyuniMetaLabelPrefix + "system_id" + uyuniLabelGroups = uyuniMetaLabelPrefix + "groups" + uyuniLabelEndpointName = uyuniMetaLabelPrefix + "endpoint_name" + uyuniLabelExporter = uyuniMetaLabelPrefix + "exporter" uyuniLabelProxyModule = uyuniMetaLabelPrefix + "proxy_module" uyuniLabelMetricsPath = uyuniMetaLabelPrefix + "metrics_path" uyuniLabelScheme = uyuniMetaLabelPrefix + "scheme" @@ -270,10 +270,10 @@ func (d *Discovery) getEndpointLabels( model.AddressLabel: model.LabelValue(addr), uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname), uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN), - uyuniLablelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)), - uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)), - uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName), - uyuniLablelExporter: model.LabelValue(endpoint.ExporterName), + uyuniLabelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)), + uyuniLabelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)), + uyuniLabelEndpointName: model.LabelValue(endpoint.EndpointName), + uyuniLabelExporter: model.LabelValue(endpoint.ExporterName), uyuniLabelProxyModule: model.LabelValue(endpoint.Module), uyuniLabelMetricsPath: model.LabelValue(endpoint.Path), uyuniLabelScheme: model.LabelValue(scheme), diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 80bb3afee..a14dd0271 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1407,7 +1407,7 @@ authorization: # `credentials_file`. [ credentials: ] # Sets the credentials to the credentials read from the configured file. - # It is mutuall exclusive with `credentials`. + # It is mutually exclusive with `credentials`. [ credentials_file: ] # Optional OAuth 2.0 configuration, currently not supported by AWS. @@ -2627,7 +2627,7 @@ authorization: # `credentials_file`. [ credentials: ] # Sets the credentials to the credentials read from the configured file. - # It is mutuall exclusive with `credentials`. + # It is mutually exclusive with `credentials`. [ credentials_file: ] # Optional OAuth 2.0 configuration, currently not supported by AWS. @@ -3988,7 +3988,7 @@ azuread: # Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or azuread. # To use the default credentials from the Google Cloud SDK, use `google_iam: {}`. google_iam: - # Service account key with monitoring write permessions. + # Service account key with monitoring write permissions. credentials_file: # Configures the remote write request's TLS settings. diff --git a/docs/storage.md b/docs/storage.md index d5193bf5b..2142c970f 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -144,7 +144,7 @@ a buffer, ensuring that older entries will be removed before the allocated stora for Prometheus becomes full. At present, we recommend setting the retention size to, at most, 80-85% of your -allocated Prometheus disk space. This increases the likelihood that older entires +allocated Prometheus disk space. This increases the likelihood that older entries will be removed prior to hitting any disk limitations. ## Remote storage integrations diff --git a/documentation/examples/prometheus-ovhcloud.yml b/documentation/examples/prometheus-ovhcloud.yml index 21facad1c..b2cc60af2 100644 --- a/documentation/examples/prometheus-ovhcloud.yml +++ b/documentation/examples/prometheus-ovhcloud.yml @@ -1,4 +1,4 @@ -# An example scrape configuration for running Prometheus with Ovhcloud. +# An example scrape configuration for running Prometheus with OVHcloud. scrape_configs: - job_name: 'ovhcloud' ovhcloud_sd_configs: diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index 2a37ea66d..af9bb5baa 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -342,7 +342,7 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) { default: // All other cases shouldn't actually happen. // They are a direct collision of CounterReset and NotCounterReset. - // Conservatively set the CounterResetHint to "unknown" and isse a warning. + // Conservatively set the CounterResetHint to "unknown" and issue a warning. h.CounterResetHint = UnknownCounterReset // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place } @@ -658,7 +658,7 @@ func detectReset(currIt, prevIt *floatBucketIterator) bool { if !currIt.Next() { // Reached end of currIt early, therefore // previous histogram has a bucket that the - // current one does not have. Unlass all + // current one does not have. Unless all // remaining buckets in the previous histogram // are unpopulated, this is a reset. for { @@ -891,7 +891,7 @@ func (h *FloatHistogram) trimBucketsInZeroBucket() { // reconcileZeroBuckets finds a zero bucket large enough to include the zero // buckets of both histograms (the receiving histogram and the other histogram) // with a zero threshold that is not within a populated bucket in either -// histogram. This method modifies the receiving histogram accourdingly, but +// histogram. This method modifies the receiving histogram accordingly, but // leaves the other histogram as is. Instead, it returns the zero count the // other histogram would have if it were modified. func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index 7971d23b7..992140ce0 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -140,7 +140,7 @@ testmetric{label="\"bar\""} 1` v: 8.3835e-05, lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), }, { - // NOTE: Unlike OpenMetrics, Promparse allows spaces between label terms. This appears to be unintended and should probably be fixed. + // NOTE: Unlike OpenMetrics, PromParser allows spaces between label terms. This appears to be unintended and should probably be fixed. m: `go_gc_duration_seconds { quantile = "2.0" a = "b" }`, v: 8.3835e-05, lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "2.0", "a", "b"), diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index e384a75fc..9f1400cee 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -604,7 +604,7 @@ func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { return totalLength, mf.Unmarshal(b[varIntLength:totalLength]) } -// formatOpenMetricsFloat works like the usual Go string formatting of a fleat +// formatOpenMetricsFloat works like the usual Go string formatting of a float // but appends ".0" if the resulting number would otherwise contain neither a // "." nor an "e". func formatOpenMetricsFloat(f float64) string { diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index cf922a537..68dd44581 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -743,7 +743,7 @@ func TestHangingNotifier(t *testing.T) { // Initialize the discovery manager // This is relevant as the updates aren't sent continually in real life, but only each updatert. - // The old implementation of TestHangingNotifier didn't take that into acount. + // The old implementation of TestHangingNotifier didn't take that into account. ctx, cancel := context.WithCancel(context.Background()) defer cancel() reg := prometheus.NewRegistry() diff --git a/promql/engine_test.go b/promql/engine_test.go index d5daa72af..94fa8d0b4 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -1467,7 +1467,7 @@ load 10s }, { // Nested subquery. - // Now the outmost subquery produces more samples than inner most rate. + // Now the outermost subquery produces more samples than inner most rate. Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[17s:1s] @ 2000`, MaxSamples: 36, Start: time.Unix(10, 0), diff --git a/promql/query_logger.go b/promql/query_logger.go index 7e06ebb97..cb3d40519 100644 --- a/promql/query_logger.go +++ b/promql/query_logger.go @@ -31,7 +31,7 @@ import ( ) type ActiveQueryTracker struct { - mmapedFile []byte + mmappedFile []byte getNextIndex chan int logger log.Logger closer io.Closer @@ -87,24 +87,24 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { } } -type mmapedFile struct { +type mmappedFile struct { f io.Closer m mmap.MMap } -func (f *mmapedFile) Close() error { +func (f *mmappedFile) Close() error { err := f.m.Unmap() if err != nil { - err = fmt.Errorf("mmapedFile: unmapping: %w", err) + err = fmt.Errorf("mmappedFile: unmapping: %w", err) } if fErr := f.f.Close(); fErr != nil { - return errors.Join(fmt.Errorf("close mmapedFile.f: %w", fErr), err) + return errors.Join(fmt.Errorf("close mmappedFile.f: %w", fErr), err) } return err } -func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) { +func getMMappedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) { file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666) if err != nil { absPath, pathErr := filepath.Abs(filename) @@ -129,7 +129,7 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io return nil, nil, err } - return fileAsBytes, &mmapedFile{f: file, m: fileAsBytes}, err + return fileAsBytes, &mmappedFile{f: file, m: fileAsBytes}, err } func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker { @@ -141,14 +141,14 @@ func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger lo filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxConcurrent*entrySize logUnfinishedQueries(filename, filesize, logger) - fileAsBytes, closer, err := getMMapedFile(filename, filesize, logger) + fileAsBytes, closer, err := getMMappedFile(filename, filesize, logger) if err != nil { panic("Unable to create mmap-ed active query log") } copy(fileAsBytes, "[") activeQueryTracker := ActiveQueryTracker{ - mmapedFile: fileAsBytes, + mmappedFile: fileAsBytes, closer: closer, getNextIndex: make(chan int, maxConcurrent), logger: logger, @@ -206,14 +206,14 @@ func (tracker ActiveQueryTracker) GetMaxConcurrent() int { } func (tracker ActiveQueryTracker) Delete(insertIndex int) { - copy(tracker.mmapedFile[insertIndex:], strings.Repeat("\x00", entrySize)) + copy(tracker.mmappedFile[insertIndex:], strings.Repeat("\x00", entrySize)) tracker.getNextIndex <- insertIndex } func (tracker ActiveQueryTracker) Insert(ctx context.Context, query string) (int, error) { select { case i := <-tracker.getNextIndex: - fileBytes := tracker.mmapedFile + fileBytes := tracker.mmappedFile entry := newJSONEntry(query, tracker.logger) start, end := i, i+entrySize diff --git a/promql/query_logger_test.go b/promql/query_logger_test.go index 7bd93781e..eb06e513e 100644 --- a/promql/query_logger_test.go +++ b/promql/query_logger_test.go @@ -26,7 +26,7 @@ import ( func TestQueryLogging(t *testing.T) { fileAsBytes := make([]byte, 4096) queryLogger := ActiveQueryTracker{ - mmapedFile: fileAsBytes, + mmappedFile: fileAsBytes, logger: nil, getNextIndex: make(chan int, 4), } @@ -70,7 +70,7 @@ func TestQueryLogging(t *testing.T) { func TestIndexReuse(t *testing.T) { queryBytes := make([]byte, 1+3*entrySize) queryLogger := ActiveQueryTracker{ - mmapedFile: queryBytes, + mmappedFile: queryBytes, logger: nil, getNextIndex: make(chan int, 3), } @@ -106,10 +106,10 @@ func TestIndexReuse(t *testing.T) { func TestMMapFile(t *testing.T) { dir := t.TempDir() - fpath := filepath.Join(dir, "mmapedFile") + fpath := filepath.Join(dir, "mmappedFile") const data = "ab" - fileAsBytes, closer, err := getMMapedFile(fpath, 2, nil) + fileAsBytes, closer, err := getMMappedFile(fpath, 2, nil) require.NoError(t, err) copy(fileAsBytes, data) require.NoError(t, closer.Close()) diff --git a/promql/value.go b/promql/value.go index f25dbcd78..f19c0b5b5 100644 --- a/promql/value.go +++ b/promql/value.go @@ -526,7 +526,7 @@ func (ssi *storageSeriesIterator) Next() chunkenc.ValueType { ssi.currH = p.H return chunkenc.ValFloatHistogram default: - panic("storageSeriesIterater.Next failed to pick value type") + panic("storageSeriesIterator.Next failed to pick value type") } } diff --git a/rules/group.go b/rules/group.go index 201d3a67d..6e98bf52f 100644 --- a/rules/group.go +++ b/rules/group.go @@ -188,7 +188,7 @@ func matchesMatcherSets(matcherSets [][]*labels.Matcher, lbls labels.Labels) boo return ok } -// Queryable returns the group's querable. +// Queryable returns the group's queryable. func (g *Group) Queryable() storage.Queryable { return g.opts.Queryable } // Context returns the group's context. diff --git a/scrape/manager_test.go b/scrape/manager_test.go index 3a1d1e1dd..cd712ca62 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -1186,7 +1186,7 @@ scrape_configs: } // TestOnlyStaleTargetsAreDropped makes sure that when a job has multiple providers, when one of them should no -// longer discover targets, only the stale targets of that provier are dropped. +// longer discover targets, only the stale targets of that provider are dropped. func TestOnlyStaleTargetsAreDropped(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/storage/buffer.go b/storage/buffer.go index 651e5c83e..9f31fb53f 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -241,9 +241,9 @@ type sampleRing struct { delta int64 // Lookback buffers. We use iBuf for mixed samples, but one of the three - // concrete ones for homogenous samples. (Only one of the four bufs is + // concrete ones for homogeneous samples. (Only one of the four bufs is // allowed to be populated!) This avoids the overhead of the interface - // wrapper for the happy (and by far most common) case of homogenous + // wrapper for the happy (and by far most common) case of homogeneous // samples. iBuf []chunks.Sample fBuf []fSample @@ -268,7 +268,7 @@ const ( fhBuf ) -// newSampleRing creates a new sampleRing. If you do not know the prefereed +// newSampleRing creates a new sampleRing. If you do not know the preferred // value type yet, use a size of 0 (in which case the provided typ doesn't // matter). On the first add, a buffer of size 16 will be allocated with the // preferred type being the type of the first added sample. diff --git a/storage/remote/azuread/azuread_test.go b/storage/remote/azuread/azuread_test.go index 7c9713812..08870382e 100644 --- a/storage/remote/azuread/azuread_test.go +++ b/storage/remote/azuread/azuread_test.go @@ -68,7 +68,7 @@ func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() { cases := []struct { cfg *AzureADConfig }{ - // AzureAd roundtripper with Managedidentity. + // AzureAd roundtripper with ManagedIdentity. { cfg: &AzureADConfig{ Cloud: "AzurePublic", diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go index e064ab28a..bd0e777e8 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go @@ -171,7 +171,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, // Downscale: // 4+2+0+2, 0+0+0+0, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 0, 1 - // Check from sclaing from previous: 6+2, 0+0, 0+0, 0+0, 1+0 = 8, 0, 0, 0, 1 + // Check from scaling from previous: 6+2, 0+0, 0+0, 0+0, 1+0 = 8, 0, 0, 0, 1 wantDeltas: []int64{8, -7}, }, }, @@ -222,7 +222,7 @@ func TestConvertBucketsLayout(t *testing.T) { }, // Downscale: // 4+2+0+2, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 1 - // Check from sclaing from previous: 6+2, 0+0, 0+0, 1+0 = 8, 0, 0, 1 + // Check from scaling from previous: 6+2, 0+0, 0+0, 1+0 = 8, 0, 0, 1 wantDeltas: []int64{8, -8, 0, 1}, }, }, diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index b1c899726..32cb6c498 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -2027,7 +2027,7 @@ func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt // make the problem worse, particularly if we're getting rate limited. // // reshardDisableTimestamp holds the unix timestamp until which resharding - // is diableld. We'll update that timestamp if the period we were just told + // is disabled. We'll update that timestamp if the period we were just told // to sleep for is newer than the existing disabled timestamp. reshardWaitPeriod := time.Now().Add(time.Duration(sleepDuration) * 2) if oldTS, updated := setAtomicToNewer(&t.reshardDisableEndTimestamp, reshardWaitPeriod.Unix()); updated { diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 032a1a92f..38f3fbea2 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -351,7 +351,7 @@ func TestMetadataDelivery(t *testing.T) { require.Equal(t, 0.0, client_testutil.ToFloat64(m.metrics.failedMetadataTotal)) require.Len(t, c.receivedMetadata, numMetadata) - // One more write than the rounded qoutient should be performed in order to get samples that didn't + // One more write than the rounded quotient should be performed in order to get samples that didn't // fit into MaxSamplesPerSend. require.Equal(t, numMetadata/config.DefaultMetadataConfig.MaxSamplesPerSend+1, c.writesReceived) // Make sure the last samples were sent. diff --git a/storage/remote/write.go b/storage/remote/write.go index 3d2f1fdfc..eba429084 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -308,7 +308,7 @@ func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { // TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write. - // UpadteMetadata is no-op for remote write (where timestampTracker is being used) for now. + // UpdateMetadata is no-op for remote write (where timestampTracker is being used) for now. return 0, nil } diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 9697739e0..596d5c8a3 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -335,7 +335,7 @@ func validateOptions(opts *Options) *Options { opts.WALCompression = wlog.CompressionNone } - // Revert Stripesize to DefaultStripsize if Stripsize is either 0 or not a power of 2. + // Revert StripeSize to DefaultStripeSize if StripeSize is either 0 or not a power of 2. if opts.StripeSize <= 0 || ((opts.StripeSize & (opts.StripeSize - 1)) != 0) { opts.StripeSize = tsdb.DefaultStripeSize } @@ -395,7 +395,7 @@ func (db *DB) replayWAL() error { return fmt.Errorf("finding WAL segments: %w", err) } - // Backfil segments from the most recent checkpoint onwards. + // Backfill segments from the most recent checkpoint onwards. for i := startFrom; i <= last; i++ { seg, err := wlog.OpenReadSegment(wlog.SegmentName(db.wal.Dir(), i)) if err != nil { diff --git a/tsdb/chunkenc/histogram_meta_test.go b/tsdb/chunkenc/histogram_meta_test.go index fdbd1825a..1774dee86 100644 --- a/tsdb/chunkenc/histogram_meta_test.go +++ b/tsdb/chunkenc/histogram_meta_test.go @@ -14,7 +14,7 @@ // The code in this file was largely written by Damian Gryski as part of // https://github.com/dgryski/go-tsz and published under the license below. // It was modified to accommodate reading from byte slices without modifying -// the underlying bytes, which would panic when reading from mmap'd +// the underlying bytes, which would panic when reading from mmapped // read-only byte slices. package chunkenc diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go index 3177762f8..ec5db39ad 100644 --- a/tsdb/chunkenc/xor.go +++ b/tsdb/chunkenc/xor.go @@ -14,7 +14,7 @@ // The code in this file was largely written by Damian Gryski as part of // https://github.com/dgryski/go-tsz and published under the license below. // It was modified to accommodate reading from byte slices without modifying -// the underlying bytes, which would panic when reading from mmap'd +// the underlying bytes, which would panic when reading from mmapped // read-only byte slices. // Copyright (c) 2015,2016 Damian Gryski diff --git a/tsdb/chunks/chunk_write_queue.go b/tsdb/chunks/chunk_write_queue.go index 6d2dc743b..ba9730d93 100644 --- a/tsdb/chunks/chunk_write_queue.go +++ b/tsdb/chunks/chunk_write_queue.go @@ -24,7 +24,7 @@ import ( ) const ( - // Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkrefMap to shrink it again. + // Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkRefMap to shrink it again. chunkRefMapShrinkThreshold = 1000 // Minimum interval between shrinking of chunkWriteQueue.chunkRefMap. diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index e7998abf7..939e93350 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -1371,7 +1371,7 @@ func TestCancelCompactions(t *testing.T) { } // TestDeleteCompactionBlockAfterFailedReload ensures that a failed reloadBlocks immediately after a compaction -// deletes the resulting block to avoid creatings blocks with the same time range. +// deletes the resulting block to avoid creating blocks with the same time range. func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) { tests := map[string]func(*DB) int{ "Test Head Compaction": func(db *DB) int { @@ -2114,7 +2114,7 @@ func TestDelayedCompactionDoesNotBlockUnrelatedOps(t *testing.T) { t.Parallel() tmpdir := t.TempDir() - // Some blocks that need compation are present. + // Some blocks that need compaction are present. createBlock(t, tmpdir, genSeries(1, 1, 0, 100)) createBlock(t, tmpdir, genSeries(1, 1, 100, 200)) createBlock(t, tmpdir, genSeries(1, 1, 200, 300)) diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 4e3a077f6..8db09d3f4 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -245,8 +245,8 @@ func TestDataAvailableOnlyAfterCommit(t *testing.T) { func TestNoPanicAfterWALCorruption(t *testing.T) { db := openTestDB(t, &Options{WALSegmentSize: 32 * 1024}, nil) - // Append until the first mmaped head chunk. - // This is to ensure that all samples can be read from the mmaped chunks when the WAL is corrupted. + // Append until the first mmapped head chunk. + // This is to ensure that all samples can be read from the mmapped chunks when the WAL is corrupted. var expSamples []chunks.Sample var maxt int64 ctx := context.Background() @@ -265,7 +265,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) { // Corrupt the WAL after the first sample of the series so that it has at least one sample and // it is not garbage collected. - // The repair deletes all WAL records after the corrupted record and these are read from the mmaped chunk. + // The repair deletes all WAL records after the corrupted record and these are read from the mmapped chunk. { walFiles, err := os.ReadDir(path.Join(db.Dir(), "wal")) require.NoError(t, err) @@ -2650,7 +2650,7 @@ func TestDBReadOnly_Querier_NoAlteration(t *testing.T) { spinUpQuerierAndCheck := func(dir, sandboxDir string, chunksCount int) { dBDirHash := dirHash(dir) - // Bootsrap a RO db from the same dir and set up a querier. + // Bootstrap a RO db from the same dir and set up a querier. dbReadOnly, err := OpenDBReadOnly(dir, sandboxDir, nil) require.NoError(t, err) require.Equal(t, chunksCount, countChunks(dir)) @@ -2669,7 +2669,7 @@ func TestDBReadOnly_Querier_NoAlteration(t *testing.T) { require.NoError(t, db.Close()) }() - // Append until the first mmaped head chunk. + // Append until the first mmapped head chunk. for i := 0; i < 121; i++ { app := db.Appender(context.Background()) _, err := app.Append(0, labels.FromStrings("foo", "bar"), int64(i), 0) @@ -5156,7 +5156,7 @@ func Test_Querier_OOOQuery(t *testing.T) { }, }, { - name: "query inorder contain ooo mmaped samples returns all ingested samples at the beginning of the interval", + name: "query inorder contain ooo mmapped samples returns all ingested samples at the beginning of the interval", oooCap: 5, queryMinT: minutes(0), queryMaxT: minutes(200), @@ -5169,7 +5169,7 @@ func Test_Querier_OOOQuery(t *testing.T) { }, { minT: minutes(101), - maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmmaped OOO chunk and fit inside the first in-order mmaped chunk. + maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmapped OOO chunk and fit inside the first in-order mmapped chunk. filter: func(t int64) bool { return t%2 == 1 }, isOOO: true, }, @@ -5182,7 +5182,7 @@ func Test_Querier_OOOQuery(t *testing.T) { }, }, { - name: "query overlapping inorder and ooo mmaped samples returns all ingested samples at the beginning of the interval", + name: "query overlapping inorder and ooo mmapped samples returns all ingested samples at the beginning of the interval", oooCap: 30, queryMinT: minutes(0), queryMaxT: minutes(200), @@ -5195,7 +5195,7 @@ func Test_Querier_OOOQuery(t *testing.T) { }, { minT: minutes(101), - maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmmaped OOO chunk and overlap the first in-order mmaped chunk. + maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmapped OOO chunk and overlap the first in-order mmapped chunk. filter: func(t int64) bool { return t%2 == 1 }, isOOO: true, }, @@ -5367,7 +5367,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { }, }, { - name: "query inorder contain ooo mmaped samples returns all ingested samples at the beginning of the interval", + name: "query inorder contain ooo mmapped samples returns all ingested samples at the beginning of the interval", oooCap: 5, queryMinT: minutes(0), queryMaxT: minutes(200), @@ -5380,7 +5380,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { }, { minT: minutes(101), - maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmmaped OOO chunk and fit inside the first in-order mmaped chunk. + maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmapped OOO chunk and fit inside the first in-order mmapped chunk. filter: func(t int64) bool { return t%2 == 1 }, isOOO: true, }, @@ -5393,7 +5393,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { }, }, { - name: "query overlapping inorder and ooo mmaped samples returns all ingested samples at the beginning of the interval", + name: "query overlapping inorder and ooo mmapped samples returns all ingested samples at the beginning of the interval", oooCap: 30, queryMinT: minutes(0), queryMaxT: minutes(200), @@ -5406,7 +5406,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { }, { minT: minutes(101), - maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmmaped OOO chunk and overlap the first in-order mmaped chunk. + maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmapped OOO chunk and overlap the first in-order mmapped chunk. filter: func(t int64) bool { return t%2 == 1 }, isOOO: true, }, @@ -5555,7 +5555,7 @@ func testOOOAppendAndQuery(t *testing.T, scenario sampleTypeScenario) { addSample(s2, 255, 265, false) verifyOOOMinMaxTimes(250, 265) testQuery(math.MinInt64, math.MaxInt64) - testQuery(minutes(250), minutes(265)) // Test querying ono data time range + testQuery(minutes(250), minutes(265)) // Test querying ooo data time range testQuery(minutes(290), minutes(300)) // Test querying in-order data time range testQuery(minutes(250), minutes(300)) // Test querying the entire range @@ -7468,7 +7468,7 @@ func TestAbortBlockCompactions(t *testing.T) { defer func() { require.NoError(t, db.Close()) }() - // It should NOT be compactible at the beginning of the test + // It should NOT be compactable at the beginning of the test require.False(t, db.head.compactable(), "head should NOT be compactable") // Track the number of compactions run inside db.compactBlocks() @@ -7478,7 +7478,7 @@ func TestAbortBlockCompactions(t *testing.T) { db.compactor = &mockCompactorFn{ planFn: func() ([]string, error) { // On every Plan() run increment compactions. After 4 compactions - // update HEAD to make it compactible to force an exit from db.compactBlocks() loop. + // update HEAD to make it compactable to force an exit from db.compactBlocks() loop. compactions++ if compactions > 3 { chunkRange := db.head.chunkRange.Load() diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 7545ab9a6..d32870f70 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -29,7 +29,7 @@ import ( ) const ( - // Indicates that there is no index entry for an exmplar. + // Indicates that there is no index entry for an exemplar. noExemplar = -1 // Estimated number of exemplars per series, for sizing the index. estimatedExemplarsPerSeries = 16 diff --git a/tsdb/head.go b/tsdb/head.go index 4ff7aab63..fdc5ad0e1 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -2090,7 +2090,7 @@ type memSeries struct { // before compaction: mmappedChunks=[p5,p6,p7,p8,p9] firstChunkID=5 // after compaction: mmappedChunks=[p7,p8,p9] firstChunkID=7 // - // pN is the pointer to the mmappedChunk referered to by HeadChunkID=N + // pN is the pointer to the mmappedChunk referred to by HeadChunkID=N mmappedChunks []*mmappedChunk // Most recent chunks in memory that are still being built or waiting to be mmapped. // This is a linked list, headChunks points to the most recent chunk, headChunks.next points diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 7b5349cfc..c4e16ae65 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -1060,7 +1060,7 @@ func TestMemSeries_truncateChunks_scenarios(t *testing.T) { tests := []struct { name string - headChunks int // the number of head chubks to create on memSeries by appending enough samples + headChunks int // the number of head chunks to create on memSeries by appending enough samples mmappedChunks int // the number of mmapped chunks to create on memSeries by appending enough samples truncateBefore int64 // the mint to pass to truncateChunksBefore() expectedTruncated int // the number of chunks that we're expecting be truncated and returned by truncateChunksBefore() diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 50525f65f..0821b2b37 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -3235,7 +3235,7 @@ func (m mockMatcherIndex) LabelValueFor(context.Context, storage.SeriesRef, stri } func (m mockMatcherIndex) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) { - return nil, errors.New("label names for for called") + return nil, errors.New("label names for called") } func (m mockMatcherIndex) Postings(context.Context, string, ...string) (index.Postings, error) { diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index da7748e18..f3a657aec 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -166,7 +166,7 @@ func TestRecord_EncodeDecode(t *testing.T) { require.NoError(t, err) require.Equal(t, floatHistograms, decFloatHistograms) - // Gauge ingeger histograms. + // Gauge integer histograms. for i := range histograms { histograms[i].H.CounterResetHint = histogram.GaugeType } diff --git a/util/fmtutil/format.go b/util/fmtutil/format.go index 9034a90fa..a10908bb8 100644 --- a/util/fmtutil/format.go +++ b/util/fmtutil/format.go @@ -113,7 +113,7 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me toTimeseries(wr, labels, timestamp, m.GetCounter().GetValue()) case m.Summary != nil: metricName := labels[model.MetricNameLabel] - // Preserve metric name order with first quantile labels timeseries then sum suffix timeserie and finally count suffix timeserie + // Preserve metric name order with first quantile labels timeseries then sum suffix timeseries and finally count suffix timeseries // Add Summary quantile timeseries quantileLabels := make(map[string]string, len(labels)+1) for key, value := range labels { @@ -125,16 +125,16 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me toTimeseries(wr, quantileLabels, timestamp, q.GetValue()) } // Overwrite label model.MetricNameLabel for count and sum metrics - // Add Summary sum timeserie + // Add Summary sum timeseries labels[model.MetricNameLabel] = metricName + sumStr toTimeseries(wr, labels, timestamp, m.GetSummary().GetSampleSum()) - // Add Summary count timeserie + // Add Summary count timeseries labels[model.MetricNameLabel] = metricName + countStr toTimeseries(wr, labels, timestamp, float64(m.GetSummary().GetSampleCount())) case m.Histogram != nil: metricName := labels[model.MetricNameLabel] - // Preserve metric name order with first bucket suffix timeseries then sum suffix timeserie and finally count suffix timeserie + // Preserve metric name order with first bucket suffix timeseries then sum suffix timeseries and finally count suffix timeseries // Add Histogram bucket timeseries bucketLabels := make(map[string]string, len(labels)+1) for key, value := range labels { @@ -146,10 +146,10 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me toTimeseries(wr, bucketLabels, timestamp, float64(b.GetCumulativeCount())) } // Overwrite label model.MetricNameLabel for count and sum metrics - // Add Histogram sum timeserie + // Add Histogram sum timeseries labels[model.MetricNameLabel] = metricName + sumStr toTimeseries(wr, labels, timestamp, m.GetHistogram().GetSampleSum()) - // Add Histogram count timeserie + // Add Histogram count timeseries labels[model.MetricNameLabel] = metricName + countStr toTimeseries(wr, labels, timestamp, float64(m.GetHistogram().GetSampleCount())) diff --git a/util/runtime/limits_default.go b/util/runtime/limits_default.go index 0126adb1a..156747d45 100644 --- a/util/runtime/limits_default.go +++ b/util/runtime/limits_default.go @@ -23,7 +23,7 @@ import ( // syscall.RLIM_INFINITY is a constant. // Its type is int on most architectures but there are exceptions such as loong64. -// Uniform it to uint accorind to the standard. +// Uniform it to uint according to the standard. // https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_resource.h.html var unlimited uint64 = syscall.RLIM_INFINITY & math.MaxUint64 diff --git a/util/zeropool/pool_test.go b/util/zeropool/pool_test.go index fea820022..e9793f64d 100644 --- a/util/zeropool/pool_test.go +++ b/util/zeropool/pool_test.go @@ -81,7 +81,7 @@ func TestPool(t *testing.T) { t.Run("does not allocate", func(t *testing.T) { pool := zeropool.New(func() []byte { return make([]byte, 1024) }) - // Warm up, this will alloate one slice. + // Warm up, this will allocate one slice. slice := pool.Get() pool.Put(slice) diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index ef9d53dd9..8323e143c 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -4034,13 +4034,13 @@ func TestGetGlobalURL(t *testing.T) { false, }, { - mustParseURL(t, "http://exemple.com"), + mustParseURL(t, "http://example.com"), GlobalURLOptions{ ListenAddress: "127.0.0.1:9090", Host: "prometheus.io", Scheme: "https", }, - mustParseURL(t, "http://exemple.com"), + mustParseURL(t, "http://example.com"), false, }, { diff --git a/web/ui/module/codemirror-promql/src/parser/parser.test.ts b/web/ui/module/codemirror-promql/src/parser/parser.test.ts index 2bc7e67ff..fc36fef03 100644 --- a/web/ui/module/codemirror-promql/src/parser/parser.test.ts +++ b/web/ui/module/codemirror-promql/src/parser/parser.test.ts @@ -528,7 +528,7 @@ describe('promql operations', () => { }, ], }, - // test aggregration + // test aggregation { expr: 'sum by (foo)(some_metric)', expectedValueType: ValueType.vector, diff --git a/web/ui/react-app/src/pages/graph/Panel.tsx b/web/ui/react-app/src/pages/graph/Panel.tsx index 1b2956fd7..cb108a5bb 100644 --- a/web/ui/react-app/src/pages/graph/Panel.tsx +++ b/web/ui/react-app/src/pages/graph/Panel.tsx @@ -136,7 +136,7 @@ class Panel extends Component { this.abortInFlightFetch = () => abortController.abort(); this.setState({ loading: true }); - const endTime = this.getEndTime().valueOf() / 1000; // TODO: shouldn't valueof only work when it's a moment? + const endTime = this.getEndTime().valueOf() / 1000; // TODO: shouldn't valueOf only work when it's a moment? const startTime = endTime - this.props.options.range / 1000; const resolution = this.props.options.resolution || Math.max(Math.floor(this.props.options.range / 250000), 1); const params: URLSearchParams = new URLSearchParams({