mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
fix the wrong word (#6069)
Signed-off-by: chentanjun <2799194073@qq.com>
This commit is contained in:
parent
ea2c836e1b
commit
103f26d188
|
@ -24,7 +24,7 @@ jobs:
|
||||||
- run:
|
- run:
|
||||||
command: make
|
command: make
|
||||||
environment:
|
environment:
|
||||||
# Run garbage collection more aggresively to avoid getting OOMed during the lint phase.
|
# Run garbage collection more aggressively to avoid getting OOMed during the lint phase.
|
||||||
GOGC: "20"
|
GOGC: "20"
|
||||||
# By default Go uses GOMAXPROCS but a Circle CI executor has many
|
# By default Go uses GOMAXPROCS but a Circle CI executor has many
|
||||||
# cores (> 30) while the CPU and RAM resources are throttled. If we
|
# cores (> 30) while the CPU and RAM resources are throttled. If we
|
||||||
|
|
|
@ -77,7 +77,7 @@ type recoverableError struct {
|
||||||
func (c *Client) Store(ctx context.Context, req []byte) error {
|
func (c *Client) Store(ctx context.Context, req []byte) error {
|
||||||
httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(req))
|
httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(req))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Errors from NewRequest are from unparseable URLs, so are not
|
// Errors from NewRequest are from unparsable URLs, so are not
|
||||||
// recoverable.
|
// recoverable.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -122,7 +122,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use RemoteWriteConfigs and its index to get hash. So if its index changed,
|
// Use RemoteWriteConfigs and its index to get hash. So if its index changed,
|
||||||
// the correspoinding queue should also be restarted.
|
// the corresponding queue should also be restarted.
|
||||||
hash := md5.Sum(b)
|
hash := md5.Sum(b)
|
||||||
if i < len(rws.queues) && rws.hashes[i] == hash && externalLabelUnchanged {
|
if i < len(rws.queues) && rws.hashes[i] == hash && externalLabelUnchanged {
|
||||||
// The RemoteWriteConfig and index both not changed, keep the queue.
|
// The RemoteWriteConfig and index both not changed, keep the queue.
|
||||||
|
|
|
@ -275,7 +275,7 @@ type Block struct {
|
||||||
meta BlockMeta
|
meta BlockMeta
|
||||||
|
|
||||||
// Symbol Table Size in bytes.
|
// Symbol Table Size in bytes.
|
||||||
// We maintain this variable to avoid recalculation everytime.
|
// We maintain this variable to avoid recalculation every time.
|
||||||
symbolTableSize uint64
|
symbolTableSize uint64
|
||||||
|
|
||||||
chunkr ChunkReader
|
chunkr ChunkReader
|
||||||
|
|
|
@ -936,7 +936,7 @@ func TestCancelCompactions(t *testing.T) {
|
||||||
testutil.Ok(t, os.RemoveAll(tmpdirCopy))
|
testutil.Ok(t, os.RemoveAll(tmpdirCopy))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Measure the compaction time without interupting it.
|
// Measure the compaction time without interrupting it.
|
||||||
var timeCompactionUninterrupted time.Duration
|
var timeCompactionUninterrupted time.Duration
|
||||||
{
|
{
|
||||||
db, err := Open(tmpdir, log.NewNopLogger(), nil, &Options{BlockRanges: []int64{1, 2000}})
|
db, err := Open(tmpdir, log.NewNopLogger(), nil, &Options{BlockRanges: []int64{1, 2000}})
|
||||||
|
|
|
@ -260,7 +260,7 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
|
||||||
var ErrClosed = errors.New("db already closed")
|
var ErrClosed = errors.New("db already closed")
|
||||||
|
|
||||||
// DBReadOnly provides APIs for read only operations on a database.
|
// DBReadOnly provides APIs for read only operations on a database.
|
||||||
// Current implementation doesn't support concurency so
|
// Current implementation doesn't support concurrency so
|
||||||
// all API calls should happen in the same go routine.
|
// all API calls should happen in the same go routine.
|
||||||
type DBReadOnly struct {
|
type DBReadOnly struct {
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
@ -272,7 +272,7 @@ type DBReadOnly struct {
|
||||||
// OpenDBReadOnly opens DB in the given directory for read only operations.
|
// OpenDBReadOnly opens DB in the given directory for read only operations.
|
||||||
func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) {
|
func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) {
|
||||||
if _, err := os.Stat(dir); err != nil {
|
if _, err := os.Stat(dir); err != nil {
|
||||||
return nil, errors.Wrap(err, "openning the db dir")
|
return nil, errors.Wrap(err, "opening the db dir")
|
||||||
}
|
}
|
||||||
|
|
||||||
if l == nil {
|
if l == nil {
|
||||||
|
@ -359,7 +359,7 @@ func (db *DBReadOnly) Querier(mint, maxt int64) (Querier, error) {
|
||||||
maxBlockTime = blocks[len(blocks)-1].Meta().MaxTime
|
maxBlockTime = blocks[len(blocks)-1].Meta().MaxTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// Also add the WAL if the current blocks don't cover the requestes time range.
|
// Also add the WAL if the current blocks don't cover the requests time range.
|
||||||
if maxBlockTime <= maxt {
|
if maxBlockTime <= maxt {
|
||||||
w, err := wal.Open(db.logger, nil, filepath.Join(db.dir, "wal"))
|
w, err := wal.Open(db.logger, nil, filepath.Join(db.dir, "wal"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
|
|
||||||
// +build go1.12
|
// +build go1.12
|
||||||
|
|
||||||
// Package goversion enforces the go version suported by the tsdb module.
|
// Package goversion enforces the go version supported by the tsdb module.
|
||||||
package goversion
|
package goversion
|
||||||
|
|
||||||
const _SoftwareRequiresGOVERSION1_12 = uint8(0)
|
const _SoftwareRequiresGOVERSION1_12 = uint8(0)
|
||||||
|
|
|
@ -149,7 +149,7 @@ func TestHead_ReadWAL(t *testing.T) {
|
||||||
s100 := head.series.getByID(100)
|
s100 := head.series.getByID(100)
|
||||||
|
|
||||||
testutil.Equals(t, labels.FromStrings("a", "1"), s10.lset)
|
testutil.Equals(t, labels.FromStrings("a", "1"), s10.lset)
|
||||||
testutil.Equals(t, (*memSeries)(nil), s11) // Series without samples should be garbage colected at head.Init().
|
testutil.Equals(t, (*memSeries)(nil), s11) // Series without samples should be garbage collected at head.Init().
|
||||||
testutil.Equals(t, labels.FromStrings("a", "4"), s50.lset)
|
testutil.Equals(t, labels.FromStrings("a", "4"), s50.lset)
|
||||||
testutil.Equals(t, labels.FromStrings("a", "3"), s100.lset)
|
testutil.Equals(t, labels.FromStrings("a", "3"), s100.lset)
|
||||||
|
|
||||||
|
|
|
@ -906,7 +906,7 @@ func (s *chainedSeries) Iterator() SeriesIterator {
|
||||||
return newChainedSeriesIterator(s.series...)
|
return newChainedSeriesIterator(s.series...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// chainedSeriesIterator implements a series iterater over a list
|
// chainedSeriesIterator implements a series iterator over a list
|
||||||
// of time-sorted, non-overlapping iterators.
|
// of time-sorted, non-overlapping iterators.
|
||||||
type chainedSeriesIterator struct {
|
type chainedSeriesIterator struct {
|
||||||
series []Series // series in time order
|
series []Series // series in time order
|
||||||
|
@ -977,7 +977,7 @@ func (s *verticalChainedSeries) Iterator() SeriesIterator {
|
||||||
return newVerticalMergeSeriesIterator(s.series...)
|
return newVerticalMergeSeriesIterator(s.series...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// verticalMergeSeriesIterator implements a series iterater over a list
|
// verticalMergeSeriesIterator implements a series iterator over a list
|
||||||
// of time-sorted, time-overlapping iterators.
|
// of time-sorted, time-overlapping iterators.
|
||||||
type verticalMergeSeriesIterator struct {
|
type verticalMergeSeriesIterator struct {
|
||||||
a, b SeriesIterator
|
a, b SeriesIterator
|
||||||
|
|
|
@ -915,7 +915,7 @@ func (r *walReader) Read(
|
||||||
et, flag, b := r.at()
|
et, flag, b := r.at()
|
||||||
|
|
||||||
// In decoding below we never return a walCorruptionErr for now.
|
// In decoding below we never return a walCorruptionErr for now.
|
||||||
// Those should generally be catched by entry decoding before.
|
// Those should generally be caught by entry decoding before.
|
||||||
switch et {
|
switch et {
|
||||||
case WALEntrySeries:
|
case WALEntrySeries:
|
||||||
var series []record.RefSeries
|
var series []record.RefSeries
|
||||||
|
|
|
@ -46,7 +46,7 @@ func TestWALRepair_ReadingError(t *testing.T) {
|
||||||
8,
|
8,
|
||||||
},
|
},
|
||||||
// Ensures that the page buffer is big enough to fit
|
// Ensures that the page buffer is big enough to fit
|
||||||
// an entire page size without panicing.
|
// an entire page size without panicking.
|
||||||
// https://github.com/prometheus/prometheus/tsdb/pull/414
|
// https://github.com/prometheus/prometheus/tsdb/pull/414
|
||||||
"bad_header": {
|
"bad_header": {
|
||||||
1,
|
1,
|
||||||
|
|
|
@ -401,7 +401,7 @@ func TestWALRestoreCorrupted(t *testing.T) {
|
||||||
|
|
||||||
// cut() truncates and fsyncs the first segment async. If it happens after
|
// cut() truncates and fsyncs the first segment async. If it happens after
|
||||||
// the corruption we apply below, the corruption will be overwritten again.
|
// the corruption we apply below, the corruption will be overwritten again.
|
||||||
// Fire and forget a sync to avoid flakyness.
|
// Fire and forget a sync to avoid flakiness.
|
||||||
w.files[0].Sync()
|
w.files[0].Sync()
|
||||||
// Corrupt the second entry in the first file.
|
// Corrupt the second entry in the first file.
|
||||||
// After re-opening we must be able to read the first entry
|
// After re-opening we must be able to read the first entry
|
||||||
|
|
Loading…
Reference in a new issue