Merge pull request #14715 from prometheus/beorn7/lint
Some checks are pending
buf.build / lint and publish (push) Waiting to run
CI / Go tests (push) Waiting to run
CI / More Go tests (push) Waiting to run
CI / Go tests with previous Go version (push) Waiting to run
CI / UI tests (push) Waiting to run
CI / Go tests on Windows (push) Waiting to run
CI / Mixins tests (push) Waiting to run
CI / Build Prometheus for common architectures (0) (push) Waiting to run
CI / Build Prometheus for common architectures (1) (push) Waiting to run
CI / Build Prometheus for common architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (0) (push) Waiting to run
CI / Build Prometheus for all architectures (1) (push) Waiting to run
CI / Build Prometheus for all architectures (10) (push) Waiting to run
CI / Build Prometheus for all architectures (11) (push) Waiting to run
CI / Build Prometheus for all architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (3) (push) Waiting to run
CI / Build Prometheus for all architectures (4) (push) Waiting to run
CI / Build Prometheus for all architectures (5) (push) Waiting to run
CI / Build Prometheus for all architectures (6) (push) Waiting to run
CI / Build Prometheus for all architectures (7) (push) Waiting to run
CI / Build Prometheus for all architectures (8) (push) Waiting to run
CI / Build Prometheus for all architectures (9) (push) Waiting to run
CI / Report status of build Prometheus for all architectures (push) Blocked by required conditions
CI / Check generated parser (push) Waiting to run
CI / golangci-lint (push) Waiting to run
CI / fuzzing (push) Waiting to run
CI / codeql (push) Waiting to run
CI / Publish main branch artifacts (push) Blocked by required conditions
CI / Publish release artefacts (push) Blocked by required conditions
CI / Publish UI on npm Registry (push) Blocked by required conditions
Scorecards supply-chain security / Scorecards analysis (push) Waiting to run

lint: Revamp our linting rules, mostly around doc comments
This commit is contained in:
Björn Rabenstein 2024-08-22 17:56:40 +02:00 committed by GitHub
commit 5fd2717aec
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
27 changed files with 93 additions and 64 deletions

View file

@ -25,15 +25,34 @@ linters:
- loggercheck - loggercheck
issues: issues:
max-issues-per-linter: 0
max-same-issues: 0 max-same-issues: 0
# The default exclusions are too aggressive. For one, they
# essentially disable any linting on doc comments. We disable
# default exclusions here and add exclusions fitting our codebase
# further down.
exclude-use-default: false
exclude-files: exclude-files:
# Skip autogenerated files. # Skip autogenerated files.
- ^.*\.(pb|y)\.go$ - ^.*\.(pb|y)\.go$
exclude-dirs: exclude-dirs:
# Copied it from a different source # Copied it from a different source.
- storage/remote/otlptranslator/prometheusremotewrite - storage/remote/otlptranslator/prometheusremotewrite
- storage/remote/otlptranslator/prometheus - storage/remote/otlptranslator/prometheus
exclude-rules: exclude-rules:
- linters:
- errcheck
# Taken from the default exclusions (that are otherwise disabled above).
text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked
- linters:
- govet
# We use many Seek methods that do not follow the usual pattern.
text: "stdmethods: method Seek.* should have signature Seek"
- linters:
- revive
# We have stopped at some point to write doc comments on exported symbols.
# TODO(beorn7): Maybe we should enforce this again? There are ~500 offenders right now.
text: exported (.+) should have comment( \(or a comment on this block\))? or be unexported
- linters: - linters:
- gocritic - gocritic
text: "appendAssign" text: "appendAssign"
@ -94,15 +113,14 @@ linters-settings:
errorf: false errorf: false
revive: revive:
# By default, revive will enable only the linting rules that are named in the configuration file. # By default, revive will enable only the linting rules that are named in the configuration file.
# So, it's needed to explicitly set in configuration all required rules. # So, it's needed to explicitly enable all required rules here.
# The following configuration enables all the rules from the defaults.toml
# https://github.com/mgechev/revive/blob/master/defaults.toml
rules: rules:
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
- name: blank-imports - name: blank-imports
- name: comment-spacings
- name: context-as-argument - name: context-as-argument
arguments: arguments:
# allow functions with test or bench signatures # Allow functions with test or bench signatures.
- allowTypesBefore: "*testing.T,testing.TB" - allowTypesBefore: "*testing.T,testing.TB"
- name: context-keys-type - name: context-keys-type
- name: dot-imports - name: dot-imports
@ -118,6 +136,8 @@ linters-settings:
- name: increment-decrement - name: increment-decrement
- name: indent-error-flow - name: indent-error-flow
- name: package-comments - name: package-comments
# TODO(beorn7): Currently, we have a lot of missing package doc comments. Maybe we should have them.
disabled: true
- name: range - name: range
- name: receiver-naming - name: receiver-naming
- name: redefines-builtin-id - name: redefines-builtin-id

View file

@ -471,7 +471,7 @@ func (ls lintConfig) lintDuplicateRules() bool {
return ls.all || ls.duplicateRules return ls.all || ls.duplicateRules
} }
// Check server status - healthy & ready. // CheckServerStatus - healthy & ready.
func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error { func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error {
if serverURL.Scheme == "" { if serverURL.Scheme == "" {
serverURL.Scheme = "http" serverURL.Scheme = "http"

View file

@ -31,7 +31,7 @@ import (
"github.com/prometheus/prometheus/util/fmtutil" "github.com/prometheus/prometheus/util/fmtutil"
) )
// Push metrics to a prometheus remote write (for testing purpose only). // PushMetrics to a prometheus remote write (for testing purpose only).
func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int { func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int {
addressURL, err := url.Parse(url.String()) addressURL, err := url.Parse(url.String())
if err != nil { if err != nil {

View file

@ -13,7 +13,7 @@
package discovery package discovery
// Create a dummy metrics struct, because this SD doesn't have any metrics. // NoopDiscovererMetrics creates a dummy metrics struct, because this SD doesn't have any metrics.
type NoopDiscovererMetrics struct{} type NoopDiscovererMetrics struct{}
var _ DiscovererMetrics = (*NoopDiscovererMetrics)(nil) var _ DiscovererMetrics = (*NoopDiscovererMetrics)(nil)

View file

@ -39,7 +39,7 @@ type Discoverer interface {
Run(ctx context.Context, up chan<- []*targetgroup.Group) Run(ctx context.Context, up chan<- []*targetgroup.Group)
} }
// Internal metrics of service discovery mechanisms. // DiscovererMetrics are internal metrics of service discovery mechanisms.
type DiscovererMetrics interface { type DiscovererMetrics interface {
Register() error Register() error
Unregister() Unregister()
@ -56,7 +56,7 @@ type DiscovererOptions struct {
HTTPClientOptions []config.HTTPClientOption HTTPClientOptions []config.HTTPClientOption
} }
// Metrics used by the "refresh" package. // RefreshMetrics are used by the "refresh" package.
// We define them here in the "discovery" package in order to avoid a cyclic dependency between // We define them here in the "discovery" package in order to avoid a cyclic dependency between
// "discovery" and "refresh". // "discovery" and "refresh".
type RefreshMetrics struct { type RefreshMetrics struct {
@ -64,17 +64,18 @@ type RefreshMetrics struct {
Duration prometheus.Observer Duration prometheus.Observer
} }
// Instantiate the metrics used by the "refresh" package. // RefreshMetricsInstantiator instantiates the metrics used by the "refresh" package.
type RefreshMetricsInstantiator interface { type RefreshMetricsInstantiator interface {
Instantiate(mech string) *RefreshMetrics Instantiate(mech string) *RefreshMetrics
} }
// An interface for registering, unregistering, and instantiating metrics for the "refresh" package. // RefreshMetricsManager is an interface for registering, unregistering, and
// Refresh metrics are registered and unregistered outside of the service discovery mechanism. // instantiating metrics for the "refresh" package. Refresh metrics are
// This is so that the same metrics can be reused across different service discovery mechanisms. // registered and unregistered outside of the service discovery mechanism. This
// To manage refresh metrics inside the SD mechanism, we'd need to use const labels which are // is so that the same metrics can be reused across different service discovery
// specific to that SD. However, doing so would also expose too many unused metrics on // mechanisms. To manage refresh metrics inside the SD mechanism, we'd need to
// the Prometheus /metrics endpoint. // use const labels which are specific to that SD. However, doing so would also
// expose too many unused metrics on the Prometheus /metrics endpoint.
type RefreshMetricsManager interface { type RefreshMetricsManager interface {
DiscovererMetrics DiscovererMetrics
RefreshMetricsInstantiator RefreshMetricsInstantiator
@ -145,7 +146,8 @@ func (c StaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) {
return staticDiscoverer(c), nil return staticDiscoverer(c), nil
} }
// No metrics are needed for this service discovery mechanism. // NewDiscovererMetrics returns NoopDiscovererMetrics because no metrics are
// needed for this service discovery mechanism.
func (c StaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics { func (c StaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics {
return &NoopDiscovererMetrics{} return &NoopDiscovererMetrics{}
} }

View file

@ -64,7 +64,7 @@ func (p *Provider) Config() interface{} {
return p.config return p.config
} }
// Registers the metrics needed for SD mechanisms. // CreateAndRegisterSDMetrics registers the metrics needed for SD mechanisms.
// Does not register the metrics for the Discovery Manager. // Does not register the metrics for the Discovery Manager.
// TODO(ptodev): Add ability to unregister the metrics? // TODO(ptodev): Add ability to unregister the metrics?
func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]DiscovererMetrics, error) { func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]DiscovererMetrics, error) {

View file

@ -17,7 +17,7 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
// Metric vectors for the "refresh" package. // RefreshMetricsVecs are metric vectors for the "refresh" package.
// We define them here in the "discovery" package in order to avoid a cyclic dependency between // We define them here in the "discovery" package in order to avoid a cyclic dependency between
// "discovery" and "refresh". // "discovery" and "refresh".
type RefreshMetricsVecs struct { type RefreshMetricsVecs struct {

View file

@ -19,8 +19,8 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
// A utility to be used by implementations of discovery.Discoverer // MetricRegisterer is used by implementations of discovery.Discoverer that need
// which need to manage the lifetime of their metrics. // to manage the lifetime of their metrics.
type MetricRegisterer interface { type MetricRegisterer interface {
RegisterMetrics() error RegisterMetrics() error
UnregisterMetrics() UnregisterMetrics()
@ -34,7 +34,7 @@ type metricRegistererImpl struct {
var _ MetricRegisterer = &metricRegistererImpl{} var _ MetricRegisterer = &metricRegistererImpl{}
// Creates an instance of a MetricRegisterer. // NewMetricRegisterer creates an instance of a MetricRegisterer.
// Typically called inside the implementation of the NewDiscoverer() method. // Typically called inside the implementation of the NewDiscoverer() method.
func NewMetricRegisterer(reg prometheus.Registerer, metrics []prometheus.Collector) MetricRegisterer { func NewMetricRegisterer(reg prometheus.Registerer, metrics []prometheus.Collector) MetricRegisterer {
return &metricRegistererImpl{ return &metricRegistererImpl{

View file

@ -15,7 +15,9 @@ package exemplar
import "github.com/prometheus/prometheus/model/labels" import "github.com/prometheus/prometheus/model/labels"
// The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters // ExemplarMaxLabelSetLength is defined by OpenMetrics: "The combined length of
// the label names and values of an Exemplar's LabelSet MUST NOT exceed 128
// UTF-8 characters."
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars
const ExemplarMaxLabelSetLength = 128 const ExemplarMaxLabelSetLength = 128
@ -49,7 +51,7 @@ func (e Exemplar) Equals(e2 Exemplar) bool {
return e.Value == e2.Value return e.Value == e2.Value
} }
// Sort first by timestamp, then value, then labels. // Compare first timestamps, then values, then labels.
func Compare(a, b Exemplar) int { func Compare(a, b Exemplar) int {
if a.Ts < b.Ts { if a.Ts < b.Ts {
return -1 return -1

View file

@ -315,7 +315,8 @@ func Compare(a, b Labels) int {
return len(a) - len(b) return len(a) - len(b)
} }
// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. // CopyFrom copies labels from b on top of whatever was in ls previously,
// reusing memory or expanding if needed.
func (ls *Labels) CopyFrom(b Labels) { func (ls *Labels) CopyFrom(b Labels) {
(*ls) = append((*ls)[:0], b...) (*ls) = append((*ls)[:0], b...)
} }
@ -422,7 +423,7 @@ type ScratchBuilder struct {
add Labels add Labels
} }
// Symbol-table is no-op, just for api parity with dedupelabels. // SymbolTable is no-op, just for api parity with dedupelabels.
type SymbolTable struct{} type SymbolTable struct{}
func NewSymbolTable() *SymbolTable { return nil } func NewSymbolTable() *SymbolTable { return nil }
@ -458,7 +459,7 @@ func (b *ScratchBuilder) Add(name, value string) {
b.add = append(b.add, Label{Name: name, Value: value}) b.add = append(b.add, Label{Name: name, Value: value})
} }
// Add a name/value pair, using []byte instead of string. // UnsafeAddBytes adds a name/value pair, using []byte instead of string.
// The '-tags stringlabels' version of this function is unsafe, hence the name. // The '-tags stringlabels' version of this function is unsafe, hence the name.
// This version is safe - it copies the strings immediately - but we keep the same name so everything compiles. // This version is safe - it copies the strings immediately - but we keep the same name so everything compiles.
func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) {
@ -475,14 +476,14 @@ func (b *ScratchBuilder) Assign(ls Labels) {
b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice.
} }
// Return the name/value pairs added so far as a Labels object. // Labels returns the name/value pairs added so far as a Labels object.
// Note: if you want them sorted, call Sort() first. // Note: if you want them sorted, call Sort() first.
func (b *ScratchBuilder) Labels() Labels { func (b *ScratchBuilder) Labels() Labels {
// Copy the slice, so the next use of ScratchBuilder doesn't overwrite. // Copy the slice, so the next use of ScratchBuilder doesn't overwrite.
return append([]Label{}, b.add...) return append([]Label{}, b.add...)
} }
// Write the newly-built Labels out to ls. // Overwrite the newly-built Labels out to ls.
// Callers must ensure that there are no other references to ls, or any strings fetched from it. // Callers must ensure that there are no other references to ls, or any strings fetched from it.
func (b *ScratchBuilder) Overwrite(ls *Labels) { func (b *ScratchBuilder) Overwrite(ls *Labels) {
*ls = append((*ls)[:0], b.add...) *ls = append((*ls)[:0], b.add...)

View file

@ -106,8 +106,8 @@ const (
EntryInvalid Entry = -1 EntryInvalid Entry = -1
EntryType Entry = 0 EntryType Entry = 0
EntryHelp Entry = 1 EntryHelp Entry = 1
EntrySeries Entry = 2 // A series with a simple float64 as value. EntrySeries Entry = 2 // EntrySeries marks a series with a simple float64 as value.
EntryComment Entry = 3 EntryComment Entry = 3
EntryUnit Entry = 4 EntryUnit Entry = 4
EntryHistogram Entry = 5 // A series with a native histogram as a value. EntryHistogram Entry = 5 // EntryHistogram marks a series with a native histogram as a value.
) )

View file

@ -573,7 +573,7 @@ func (ng *Engine) validateOpts(expr parser.Expr) error {
return validationErr return validationErr
} }
// NewTestQuery: inject special behaviour into Query for testing. // NewTestQuery injects special behaviour into Query for testing.
func (ng *Engine) NewTestQuery(f func(context.Context) error) Query { func (ng *Engine) NewTestQuery(f func(context.Context) error) Query {
qry := &query{ qry := &query{
q: "test statement", q: "test statement",
@ -3531,14 +3531,14 @@ func makeInt64Pointer(val int64) *int64 {
return valp return valp
} }
// Add RatioSampler interface to allow unit-testing (previously: Randomizer). // RatioSampler allows unit-testing (previously: Randomizer).
type RatioSampler interface { type RatioSampler interface {
// Return this sample "offset" between [0.0, 1.0] // Return this sample "offset" between [0.0, 1.0]
sampleOffset(ts int64, sample *Sample) float64 sampleOffset(ts int64, sample *Sample) float64
AddRatioSample(r float64, sample *Sample) bool AddRatioSample(r float64, sample *Sample) bool
} }
// Use Hash(labels.String()) / maxUint64 as a "deterministic" // HashRatioSampler uses Hash(labels.String()) / maxUint64 as a "deterministic"
// value in [0.0, 1.0]. // value in [0.0, 1.0].
type HashRatioSampler struct{} type HashRatioSampler struct{}

View file

@ -352,8 +352,7 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) {
// f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f // f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f
// for all the non-nil children of node, recursively. // for all the non-nil children of node, recursively.
func Inspect(node Node, f inspector) { func Inspect(node Node, f inspector) {
//nolint: errcheck Walk(f, node, nil) //nolint:errcheck
Walk(f, node, nil)
} }
// Children returns a list of all child nodes of a syntax tree node. // Children returns a list of all child nodes of a syntax tree node.
@ -419,7 +418,7 @@ func mergeRanges(first, last Node) posrange.PositionRange {
} }
} }
// Item implements the Node interface. // PositionRange implements the Node interface.
// This makes it possible to call mergeRanges on them. // This makes it possible to call mergeRanges on them.
func (i *Item) PositionRange() posrange.PositionRange { func (i *Item) PositionRange() posrange.PositionRange {
return posrange.PositionRange{ return posrange.PositionRange{

View file

@ -23,7 +23,7 @@ import (
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
) )
// Write a MetricFamily into a protobuf. // MetricFamilyToProtobuf writes a MetricFamily into a protobuf.
// This function is intended for testing scraping by providing protobuf serialized input. // This function is intended for testing scraping by providing protobuf serialized input.
func MetricFamilyToProtobuf(metricFamily *dto.MetricFamily) ([]byte, error) { func MetricFamilyToProtobuf(metricFamily *dto.MetricFamily) ([]byte, error) {
buffer := &bytes.Buffer{} buffer := &bytes.Buffer{}
@ -34,7 +34,7 @@ func MetricFamilyToProtobuf(metricFamily *dto.MetricFamily) ([]byte, error) {
return buffer.Bytes(), nil return buffer.Bytes(), nil
} }
// Append a MetricFamily protobuf representation to a buffer. // AddMetricFamilyToProtobuf appends a MetricFamily protobuf representation to a buffer.
// This function is intended for testing scraping by providing protobuf serialized input. // This function is intended for testing scraping by providing protobuf serialized input.
func AddMetricFamilyToProtobuf(buffer *bytes.Buffer, metricFamily *dto.MetricFamily) error { func AddMetricFamilyToProtobuf(buffer *bytes.Buffer, metricFamily *dto.MetricFamily) error {
protoBuf, err := proto.Marshal(metricFamily) protoBuf, err := proto.Marshal(metricFamily)

View file

@ -227,9 +227,9 @@ type LabelHints struct {
Limit int Limit int
} }
// TODO(bwplotka): Move to promql/engine_test.go?
// QueryableFunc is an adapter to allow the use of ordinary functions as // QueryableFunc is an adapter to allow the use of ordinary functions as
// Queryables. It follows the idea of http.HandlerFunc. // Queryables. It follows the idea of http.HandlerFunc.
// TODO(bwplotka): Move to promql/engine_test.go?
type QueryableFunc func(mint, maxt int64) (Querier, error) type QueryableFunc func(mint, maxt int64) (Querier, error)
// Querier calls f() with the given parameters. // Querier calls f() with the given parameters.

View file

@ -31,13 +31,15 @@ import (
"github.com/google/uuid" "github.com/google/uuid"
) )
// Clouds.
const ( const (
// Clouds.
AzureChina = "AzureChina" AzureChina = "AzureChina"
AzureGovernment = "AzureGovernment" AzureGovernment = "AzureGovernment"
AzurePublic = "AzurePublic" AzurePublic = "AzurePublic"
)
// Audiences. // Audiences.
const (
IngestionChinaAudience = "https://monitor.azure.cn//.default" IngestionChinaAudience = "https://monitor.azure.cn//.default"
IngestionGovernmentAudience = "https://monitor.azure.us//.default" IngestionGovernmentAudience = "https://monitor.azure.us//.default"
IngestionPublicAudience = "https://monitor.azure.com//.default" IngestionPublicAudience = "https://monitor.azure.com//.default"

View file

@ -166,7 +166,7 @@ func NewTemplateExpander(
return html_template.HTML(text) return html_template.HTML(text)
}, },
"match": regexp.MatchString, "match": regexp.MatchString,
"title": strings.Title, //nolint:staticcheck "title": strings.Title, //nolint:staticcheck // TODO(beorn7): Need to come up with a replacement using the cases package.
"toUpper": strings.ToUpper, "toUpper": strings.ToUpper,
"toLower": strings.ToLower, "toLower": strings.ToLower,
"graphLink": strutil.GraphLinkForExpression, "graphLink": strutil.GraphLinkForExpression,

View file

@ -191,7 +191,7 @@ func (f *chunkPos) bytesToWriteForChunk(chkLen uint64) uint64 {
// ChunkDiskMapper is for writing the Head block chunks to disk // ChunkDiskMapper is for writing the Head block chunks to disk
// and access chunks via mmapped files. // and access chunks via mmapped files.
type ChunkDiskMapper struct { type ChunkDiskMapper struct {
/// Writer. // Writer.
dir *os.File dir *os.File
writeBufferSize int writeBufferSize int
@ -210,7 +210,7 @@ type ChunkDiskMapper struct {
crc32 hash.Hash crc32 hash.Hash
writePathMtx sync.Mutex writePathMtx sync.Mutex
/// Reader. // Reader.
// The int key in the map is the file number on the disk. // The int key in the map is the file number on the disk.
mmappedChunkFiles map[int]*mmappedChunkFile // Contains the m-mapped files for each chunk file mapped with its index. mmappedChunkFiles map[int]*mmappedChunkFile // Contains the m-mapped files for each chunk file mapped with its index.
closers map[int]io.Closer // Closers for resources behind the byte slices. closers map[int]io.Closer // Closers for resources behind the byte slices.

View file

@ -49,7 +49,7 @@ import (
) )
const ( const (
// Default duration of a block in milliseconds. // DefaultBlockDuration in milliseconds.
DefaultBlockDuration = int64(2 * time.Hour / time.Millisecond) DefaultBlockDuration = int64(2 * time.Hour / time.Millisecond)
// Block dir suffixes to make deletion and creation operations atomic. // Block dir suffixes to make deletion and creation operations atomic.

View file

@ -201,8 +201,8 @@ func (d *Decbuf) UvarintStr() string {
return string(d.UvarintBytes()) return string(d.UvarintBytes())
} }
// The return value becomes invalid if the byte slice goes away. // UvarintBytes returns invalid values if the byte slice goes away.
// Compared to UvarintStr, this avoid allocations. // Compared to UvarintStr, it avoid allocations.
func (d *Decbuf) UvarintBytes() []byte { func (d *Decbuf) UvarintBytes() []byte {
l := d.Uvarint64() l := d.Uvarint64()
if d.E != nil { if d.E != nil {

View file

@ -26,7 +26,7 @@ func (s *memSeries) labels() labels.Labels {
return s.lset return s.lset
} }
// No-op when not using dedupelabels. // RebuildSymbolTable is a no-op when not using dedupelabels.
func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable { func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable {
return nil return nil
} }

View file

@ -196,8 +196,9 @@ func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) {
return toc, d.Err() return toc, d.Err()
} }
// NewWriter returns a new Writer to the given filename. It serializes data in format version 2. // NewWriterWithEncoder returns a new Writer to the given filename. It
// It uses the given encoder to encode each postings list. // serializes data in format version 2. It uses the given encoder to encode each
// postings list.
func NewWriterWithEncoder(ctx context.Context, fn string, encoder PostingsEncoder) (*Writer, error) { func NewWriterWithEncoder(ctx context.Context, fn string, encoder PostingsEncoder) (*Writer, error) {
dir := filepath.Dir(fn) dir := filepath.Dir(fn)

View file

@ -263,8 +263,8 @@ func (cr *HeadAndOOOChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chu
return nil, mc, err return nil, mc, err
} }
// ChunkOrIterableWithCopy: implements ChunkReaderWithCopy. The special Copy behaviour // ChunkOrIterableWithCopy implements ChunkReaderWithCopy. The special Copy
// is only implemented for the in-order head chunk. // behaviour is only implemented for the in-order head chunk.
func (cr *HeadAndOOOChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) { func (cr *HeadAndOOOChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) {
_, _, isOOO := unpackHeadChunkRef(meta.Ref) _, _, isOOO := unpackHeadChunkRef(meta.Ref)
if !isOOO { if !isOOO {

View file

@ -58,15 +58,16 @@ type WriteTo interface {
StoreSeries([]record.RefSeries, int) StoreSeries([]record.RefSeries, int)
StoreMetadata([]record.RefMetadata) StoreMetadata([]record.RefMetadata)
// Next two methods are intended for garbage-collection: first we call // UpdateSeriesSegment and SeriesReset are intended for
// UpdateSeriesSegment on all current series // garbage-collection:
// First we call UpdateSeriesSegment on all current series.
UpdateSeriesSegment([]record.RefSeries, int) UpdateSeriesSegment([]record.RefSeries, int)
// Then SeriesReset is called to allow the deletion // Then SeriesReset is called to allow the deletion of all series
// of all series created in a segment lower than the argument. // created in a segment lower than the argument.
SeriesReset(int) SeriesReset(int)
} }
// Used to notify the watcher that data has been written so that it can read. // WriteNotified notifies the watcher that data has been written so that it can read.
type WriteNotified interface { type WriteNotified interface {
Notify() Notify()
} }

View file

@ -38,8 +38,8 @@ import (
) )
const ( const (
DefaultSegmentSize = 128 * 1024 * 1024 // 128 MB DefaultSegmentSize = 128 * 1024 * 1024 // DefaultSegmentSize is 128 MB.
pageSize = 32 * 1024 // 32KB pageSize = 32 * 1024 // pageSize is 32KB.
recordHeaderSize = 7 recordHeaderSize = 7
WblDirName = "wbl" WblDirName = "wbl"
) )

View file

@ -174,7 +174,7 @@ func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) error {
} }
} }
// NewInvalidQuantileWarning is used when the user specifies an invalid ratio // NewInvalidRatioWarning is used when the user specifies an invalid ratio
// value, i.e. a float that is outside the range [-1, 1] or NaN. // value, i.e. a float that is outside the range [-1, 1] or NaN.
func NewInvalidRatioWarning(q, to float64, pos posrange.PositionRange) error { func NewInvalidRatioWarning(q, to float64, pos posrange.PositionRange) error {
return annoErr{ return annoErr{

View file

@ -23,13 +23,14 @@ import (
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
) )
// Replacement for require.Equal using go-cmp adapted for Prometheus data structures, instead of DeepEqual. // RequireEqual is a replacement for require.Equal using go-cmp adapted for
// Prometheus data structures, instead of DeepEqual.
func RequireEqual(t testing.TB, expected, actual interface{}, msgAndArgs ...interface{}) { func RequireEqual(t testing.TB, expected, actual interface{}, msgAndArgs ...interface{}) {
t.Helper() t.Helper()
RequireEqualWithOptions(t, expected, actual, nil, msgAndArgs...) RequireEqualWithOptions(t, expected, actual, nil, msgAndArgs...)
} }
// As RequireEqual but allows extra cmp.Options. // RequireEqualWithOptions works like RequireEqual but allows extra cmp.Options.
func RequireEqualWithOptions(t testing.TB, expected, actual interface{}, extra []cmp.Option, msgAndArgs ...interface{}) { func RequireEqualWithOptions(t testing.TB, expected, actual interface{}, extra []cmp.Option, msgAndArgs ...interface{}) {
t.Helper() t.Helper()
options := append([]cmp.Option{cmp.Comparer(labels.Equal)}, extra...) options := append([]cmp.Option{cmp.Comparer(labels.Equal)}, extra...)