mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-27 06:29:42 -08:00
Merge pull request #1380 from prometheus/fix-typos
Fix various typos in comments.
This commit is contained in:
commit
83c5ef7c03
|
@ -115,7 +115,7 @@ func (a *Analyzer) Prepare(ctx context.Context) (local.Preloader, error) {
|
|||
return nil, errors.New("analysis must be performed before preparing query")
|
||||
}
|
||||
var err error
|
||||
// The preloader must not be closed unless an error ocurred as closing
|
||||
// The preloader must not be closed unless an error occured as closing
|
||||
// unpins the preloaded chunks.
|
||||
p := a.Storage.NewPreloader()
|
||||
defer func() {
|
||||
|
|
|
@ -145,7 +145,7 @@ type NumberLiteral struct {
|
|||
}
|
||||
|
||||
// ParenExpr wraps an expression so it cannot be disassembled as a consequence
|
||||
// of operator precendence.
|
||||
// of operator precedence.
|
||||
type ParenExpr struct {
|
||||
Expr Expr
|
||||
}
|
||||
|
|
|
@ -868,7 +868,7 @@ func (ev *evaluator) vectorBinop(op itemType, lhs, rhs vector, matching *VectorM
|
|||
if exists {
|
||||
ev.errorf("multiple matches for labels: many-to-one matching must be explicit (group_left/group_right)")
|
||||
}
|
||||
matchedSigs[sig] = nil // Set existance to true.
|
||||
matchedSigs[sig] = nil // Set existence to true.
|
||||
} else {
|
||||
// In many-to-one matching the grouping labels have to ensure a unique metric
|
||||
// for the result vector. Check whether those labels have already been added for
|
||||
|
@ -915,7 +915,7 @@ func resultMetric(met metric.Metric, op itemType, labels ...model.LabelName) met
|
|||
}
|
||||
return met
|
||||
}
|
||||
// As we definitly write, creating a new metric is the easiest solution.
|
||||
// As we definitely write, creating a new metric is the easiest solution.
|
||||
m := model.Metric{}
|
||||
for _, ln := range labels {
|
||||
// Included labels from the `group_x` modifier are taken from the "many"-side.
|
||||
|
|
|
@ -48,7 +48,7 @@ func (e *ParseErr) Error() string {
|
|||
return fmt.Sprintf("parse error at line %d, char %d: %s", e.Line, e.Pos, e.Err)
|
||||
}
|
||||
|
||||
// ParseStmts parses the input and returns the resulting statements or any ocurring error.
|
||||
// ParseStmts parses the input and returns the resulting statements or any occuring error.
|
||||
func ParseStmts(input string) (Statements, error) {
|
||||
p := newParser(input)
|
||||
|
||||
|
@ -529,7 +529,7 @@ func (p *parser) expr() Expr {
|
|||
// Parse the next operand.
|
||||
rhs := p.unaryExpr()
|
||||
|
||||
// Assign the new root based on the precendence of the LHS and RHS operators.
|
||||
// Assign the new root based on the precedence of the LHS and RHS operators.
|
||||
if lhs, ok := expr.(*BinaryExpr); ok && lhs.Op.precedence() < op.precedence() {
|
||||
expr = &BinaryExpr{
|
||||
Op: lhs.Op,
|
||||
|
|
|
@ -78,7 +78,7 @@ type ObjectMeta struct {
|
|||
Annotations map[string]string `json:"annotations,omitempty" description:"map of string keys and values that can be used by external tooling to store and retrieve arbitrary metadata about objects; see http://releases.k8s.io/HEAD/docs/user-guide/annotations.md"`
|
||||
}
|
||||
|
||||
// Protocol defines network protocols supported for things like conatiner ports.
|
||||
// Protocol defines network protocols supported for things like container ports.
|
||||
type Protocol string
|
||||
|
||||
const (
|
||||
|
@ -120,7 +120,7 @@ type ServiceSpec struct {
|
|||
Ports []ServicePort `json:"ports"`
|
||||
}
|
||||
|
||||
// ServicePort conatins information on service's port.
|
||||
// ServicePort contains information on service's port.
|
||||
type ServicePort struct {
|
||||
// The IP protocol for this port. Supports "TCP" and "UDP".
|
||||
// Default is TCP.
|
||||
|
|
|
@ -214,7 +214,7 @@ type chunk interface {
|
|||
// new version of the original chunk, followed by overflow chunks, if
|
||||
// any. The first chunk returned might be the same as the original one
|
||||
// or a newly allocated version. In any case, take the returned chunk as
|
||||
// the relevant one and discard the orginal chunk.
|
||||
// the relevant one and discard the original chunk.
|
||||
add(sample *model.SamplePair) []chunk
|
||||
clone() chunk
|
||||
firstTime() model.Time
|
||||
|
|
|
@ -619,7 +619,7 @@ func (s *memorySeriesStorage) NeedsThrottling() bool {
|
|||
float64(atomic.LoadInt64(&numMemChunks)) > float64(s.maxMemoryChunks)*toleranceFactorMemChunks {
|
||||
select {
|
||||
case s.throttled <- struct{}{}:
|
||||
default: // Do nothing, signal aready pending.
|
||||
default: // Do nothing, signal already pending.
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@ -1243,7 +1243,7 @@ func (s *memorySeriesStorage) incNumChunksToPersist(by int) {
|
|||
//
|
||||
// This method is not goroutine-safe, but it is only ever called by the single
|
||||
// goroutine that is in charge of series maintenance. According to the returned
|
||||
// score, series maintenence should be sped up. If a score of 1 is returned,
|
||||
// score, series maintenance should be sped up. If a score of 1 is returned,
|
||||
// checkpointing based on dirty-series count should be disabled, and series
|
||||
// files should not by synced anymore provided the user has specified the
|
||||
// adaptive sync strategy.
|
||||
|
|
|
@ -37,7 +37,7 @@ const (
|
|||
ViewDiskExtractionTime
|
||||
)
|
||||
|
||||
// Return a string represenation of a QueryTiming identifier.
|
||||
// Return a string representation of a QueryTiming identifier.
|
||||
func (s QueryTiming) String() string {
|
||||
switch s {
|
||||
case TotalEvalTime:
|
||||
|
|
Loading…
Reference in a new issue