mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
Spelling (#6517)
* spelling: alertmanager Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: attributes Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: autocomplete Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: bootstrap Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: caught Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: chunkenc Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: compaction Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: corrupted Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: deletable Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: expected Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: fine-grained Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: initialized Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: iteration Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: javascript Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: multiple Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: number Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: overlapping Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: possible Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: postings Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: procedure Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: programmatic Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: queuing Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: querier Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: repairing Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: received Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: reproducible Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: retention Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: sample Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: segements Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: semantic Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: software [LICENSE] Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: staging Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: timestamp Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: unfortunately Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: uvarint Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: subsequently Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: ressamples Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
This commit is contained in:
parent
a9ed3b980d
commit
91d76c8023
|
@ -468,7 +468,7 @@ This release includes multiple bugfixes and features. Further, the WAL implement
|
|||
|
||||
* [FEATURE] New Service Discovery UI showing labels before and after relabelling.
|
||||
* [FEATURE] New Admin APIs added to v1 to delete, snapshot and remove tombstones.
|
||||
* [ENHANCEMENT] The graph UI autcomplete now includes your previous queries.
|
||||
* [ENHANCEMENT] The graph UI autocomplete now includes your previous queries.
|
||||
* [ENHANCEMENT] Federation is now much faster for large numbers of series.
|
||||
* [ENHANCEMENT] Added new metrics to measure rule timings.
|
||||
* [ENHANCEMENT] Rule evaluation times added to the rules UI.
|
||||
|
@ -587,7 +587,7 @@ https://prometheus.io/docs/prometheus/2.0/migration/
|
|||
|
||||
## 1.6.3 / 2017-05-18
|
||||
|
||||
* [BUGFIX] Fix disappearing Alertmanger targets in Alertmanager discovery.
|
||||
* [BUGFIX] Fix disappearing Alertmanager targets in Alertmanager discovery.
|
||||
* [BUGFIX] Fix panic with remote_write on ARMv7.
|
||||
* [BUGFIX] Fix stacked graphs to adapt min/max values.
|
||||
|
||||
|
@ -763,7 +763,7 @@ This is a breaking change to the Kubernetes service discovery.
|
|||
## 1.2.1 / 2016-10-10
|
||||
|
||||
* [BUGFIX] Count chunk evictions properly so that the server doesn't
|
||||
assume it runs out of memory and subsequencly throttles ingestion.
|
||||
assume it runs out of memory and subsequently throttles ingestion.
|
||||
* [BUGFIX] Use Go1.7.1 for prebuilt binaries to fix issues on MacOS Sierra.
|
||||
|
||||
## 1.2.0 / 2016-10-07
|
||||
|
|
2
Makefile
2
Makefile
|
@ -54,7 +54,7 @@ react-app-lint:
|
|||
|
||||
.PHONY: react-app-lint-fix
|
||||
react-app-lint-fix:
|
||||
@echo ">> running React app linting and fixing errors where possibe"
|
||||
@echo ">> running React app linting and fixing errors where possible"
|
||||
cd $(REACT_APP_PATH) && yarn lint
|
||||
|
||||
.PHONY: react-app-test
|
||||
|
|
|
@ -181,7 +181,7 @@ const (
|
|||
"NodeMeta": {"rack_name": "2304"},
|
||||
"ServiceID": "test",
|
||||
"ServiceName": "test",
|
||||
"ServiceMeta": {"version":"1.0.0","environment":"stagging"},
|
||||
"ServiceMeta": {"version":"1.0.0","environment":"staging"},
|
||||
"ServiceTags": ["tag1"],
|
||||
"ServicePort": 3341,
|
||||
"CreateIndex": 1,
|
||||
|
|
|
@ -71,14 +71,14 @@ func TestTargetGroupYamlMarshal(t *testing.T) {
|
|||
|
||||
tests := []struct {
|
||||
expectedYaml string
|
||||
expectetedErr error
|
||||
expectedErr error
|
||||
group Group
|
||||
}{
|
||||
{
|
||||
// labels should be omitted if empty.
|
||||
group: Group{},
|
||||
expectedYaml: "targets: []\n",
|
||||
expectetedErr: nil,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
// targets only exposes addresses.
|
||||
|
@ -87,13 +87,13 @@ func TestTargetGroupYamlMarshal(t *testing.T) {
|
|||
model.LabelSet{"__address__": "localhost:9091"}},
|
||||
Labels: model.LabelSet{"foo": "bar", "bar": "baz"}},
|
||||
expectedYaml: "targets:\n- localhost:9090\n- localhost:9091\nlabels:\n bar: baz\n foo: bar\n",
|
||||
expectetedErr: nil,
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
actual, err := test.group.MarshalYAML()
|
||||
testutil.Equals(t, test.expectetedErr, err)
|
||||
testutil.Equals(t, test.expectedErr, err)
|
||||
testutil.Equals(t, test.expectedYaml, string(marshal(actual)))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -132,6 +132,6 @@ Internally it works like the scrape discovery manager.
|
|||
|
||||
Prometheus serves its web UI and API on port `9090` by default. The web UI is available at `/` and serves a human-usable interface for running expression queries, inspecting active alerts, or getting other insight into the status of the Prometheus server.
|
||||
|
||||
The web API is served under `/api/v1` and allows programmatical [querying, metadata, and server status inspection](https://prometheus.io/docs/prometheus/latest/querying/api/).
|
||||
The web API is served under `/api/v1` and allows programmatic [querying, metadata, and server status inspection](https://prometheus.io/docs/prometheus/latest/querying/api/).
|
||||
|
||||
[Console templates](https://prometheus.io/docs/visualization/consoles/), which allow Prometheus to serve user-defined HTML templates that have access to TSDB data, are served under `/consoles` when console templates are present and configured.
|
||||
|
|
|
@ -299,7 +299,7 @@ func TestHandlerRelabel(t *testing.T) {
|
|||
testutil.Ok(t, alertsEqual(expected, h.queue))
|
||||
}
|
||||
|
||||
func TestHandlerQueueing(t *testing.T) {
|
||||
func TestHandlerQueuing(t *testing.T) {
|
||||
var (
|
||||
expectedc = make(chan []*Alert)
|
||||
called = make(chan struct{})
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"github.com/prometheus/prometheus/template"
|
||||
)
|
||||
|
||||
// Error represents semantical errors on parsing rule groups.
|
||||
// Error represents semantic errors on parsing rule groups.
|
||||
type Error struct {
|
||||
Group string
|
||||
Rule int
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, softwar
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
|
|
|
@ -305,7 +305,7 @@ func (p *parser) Lex(lval *yySymType) int {
|
|||
// Error is expected by the yyLexer interface of the yacc generated parser.
|
||||
//
|
||||
// It is a no-op since the parsers error routines are triggered
|
||||
// by mechanisms that allow more fine grained control
|
||||
// by mechanisms that allow more fine-grained control
|
||||
// For more information, see https://godoc.org/golang.org/x/tools/cmd/goyacc.
|
||||
func (p *parser) Error(e string) {
|
||||
}
|
||||
|
@ -962,7 +962,7 @@ func (p *parser) newLabelMatcher(label Item, operator Item, value Item) *labels.
|
|||
case NEQ_REGEX:
|
||||
matchType = labels.MatchNotRegexp
|
||||
default:
|
||||
// This should never happen, since the error should have been chaught
|
||||
// This should never happen, since the error should have been caught
|
||||
// by the generated parser.
|
||||
panic("invalid operator")
|
||||
}
|
||||
|
|
|
@ -290,7 +290,7 @@ func TestPreferLocalStorageFilter(t *testing.T) {
|
|||
}
|
||||
|
||||
if test.querier != q {
|
||||
t.Errorf("%d. expected quierer %+v, got %+v", i, test.querier, q)
|
||||
t.Errorf("%d. expected querier %+v, got %+v", i, test.querier, q)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -315,7 +315,7 @@ func TestRequiredMatchersFilter(t *testing.T) {
|
|||
}
|
||||
|
||||
if !reflect.DeepEqual(want, have) {
|
||||
t.Errorf("expected quierer %+v, got %+v", want, have)
|
||||
t.Errorf("expected querier %+v, got %+v", want, have)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
- [FEATURE] Added `DBReadOnly` to allow opening a database in read only mode.
|
||||
- `DBReadOnly.Blocks()` exposes a slice of `BlockReader`s.
|
||||
- `BlockReader` interface - removed MinTime/MaxTime methods and now exposes the full block meta via `Meta()`.
|
||||
- [FEATURE] `chunckenc.Chunk.Iterator` method now takes a `chunckenc.Iterator` interface as an argument for reuse.
|
||||
- [FEATURE] `chunkenc.Chunk.Iterator` method now takes a `chunkenc.Iterator` interface as an argument for reuse.
|
||||
|
||||
## 0.9.1
|
||||
|
||||
|
@ -67,7 +67,7 @@
|
|||
|
||||
## 0.5.0
|
||||
|
||||
- [FEATURE] Time-ovelapping blocks are now allowed. [#370](https://github.com/prometheus/tsdb/pull/370)
|
||||
- [FEATURE] Time-overlapping blocks are now allowed. [#370](https://github.com/prometheus/tsdb/pull/370)
|
||||
- Disabled by default and can be enabled via `AllowOverlappingBlock` option.
|
||||
- Added `MergeChunks` function in `chunkenc/xor.go` to merge 2 time-overlapping chunks.
|
||||
- Added `MergeOverlappingChunks` function in `chunks/chunks.go` to merge multiple time-overlapping Chunk Metas.
|
||||
|
@ -100,7 +100,7 @@
|
|||
## 0.3.0
|
||||
|
||||
- [CHANGE] `LastCheckpoint()` used to return just the segment name and now it returns the full relative path.
|
||||
- [CHANGE] `NewSegmentsRangeReader()` can now read over miltiple wal ranges by using the new `SegmentRange{}` struct.
|
||||
- [CHANGE] `NewSegmentsRangeReader()` can now read over multiple wal ranges by using the new `SegmentRange{}` struct.
|
||||
- [CHANGE] `CorruptionErr{}` now also exposes the Segment `Dir` which is added when displaying any errors.
|
||||
- [CHANGE] `Head.Init()` is changed to `Head.Init(minValidTime int64)`
|
||||
- [CHANGE] `SymbolTable()` renamed to `SymbolTableSize()` to make the name consistent with the `Block{ symbolTableSize uint64 }` field.
|
||||
|
|
18
tsdb/db.go
18
tsdb/db.go
|
@ -910,34 +910,34 @@ func (db *DB) deletableBlocks(blocks []*Block) map[ulid.ULID]*Block {
|
|||
return deletable
|
||||
}
|
||||
|
||||
func (db *DB) beyondTimeRetention(blocks []*Block) (deleteable map[ulid.ULID]*Block) {
|
||||
func (db *DB) beyondTimeRetention(blocks []*Block) (deletable map[ulid.ULID]*Block) {
|
||||
// Time retention is disabled or no blocks to work with.
|
||||
if len(db.blocks) == 0 || db.opts.RetentionDuration == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
deleteable = make(map[ulid.ULID]*Block)
|
||||
deletable = make(map[ulid.ULID]*Block)
|
||||
for i, block := range blocks {
|
||||
// The difference between the first block and this block is larger than
|
||||
// the retention period so any blocks after that are added as deleteable.
|
||||
// the retention period so any blocks after that are added as deletable.
|
||||
if i > 0 && blocks[0].Meta().MaxTime-block.Meta().MaxTime > int64(db.opts.RetentionDuration) {
|
||||
for _, b := range blocks[i:] {
|
||||
deleteable[b.meta.ULID] = b
|
||||
deletable[b.meta.ULID] = b
|
||||
}
|
||||
db.metrics.timeRetentionCount.Inc()
|
||||
break
|
||||
}
|
||||
}
|
||||
return deleteable
|
||||
return deletable
|
||||
}
|
||||
|
||||
func (db *DB) beyondSizeRetention(blocks []*Block) (deleteable map[ulid.ULID]*Block) {
|
||||
func (db *DB) beyondSizeRetention(blocks []*Block) (deletable map[ulid.ULID]*Block) {
|
||||
// Size retention is disabled or no blocks to work with.
|
||||
if len(db.blocks) == 0 || db.opts.MaxBytes <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
deleteable = make(map[ulid.ULID]*Block)
|
||||
deletable = make(map[ulid.ULID]*Block)
|
||||
|
||||
walSize, _ := db.Head().wal.Size()
|
||||
// Initializing size counter with WAL size,
|
||||
|
@ -948,13 +948,13 @@ func (db *DB) beyondSizeRetention(blocks []*Block) (deleteable map[ulid.ULID]*Bl
|
|||
if blocksSize > db.opts.MaxBytes {
|
||||
// Add this and all following blocks for deletion.
|
||||
for _, b := range blocks[i:] {
|
||||
deleteable[b.meta.ULID] = b
|
||||
deletable[b.meta.ULID] = b
|
||||
}
|
||||
db.metrics.sizeRetentionCount.Inc()
|
||||
break
|
||||
}
|
||||
}
|
||||
return deleteable
|
||||
return deletable
|
||||
}
|
||||
|
||||
// deleteBlocks closes and deletes blocks from the disk.
|
||||
|
|
|
@ -1176,11 +1176,11 @@ func TestSizeRetention(t *testing.T) {
|
|||
testutil.Ok(t, err)
|
||||
// Expected size should take into account block size + WAL size
|
||||
expSize = blockSize + walSize
|
||||
actRetentCount := int(prom_testutil.ToFloat64(db.metrics.sizeRetentionCount))
|
||||
actRetentionCount := int(prom_testutil.ToFloat64(db.metrics.sizeRetentionCount))
|
||||
actSize, err = fileutil.DirSize(db.Dir())
|
||||
testutil.Ok(t, err)
|
||||
|
||||
testutil.Equals(t, 1, actRetentCount, "metric retention count mismatch")
|
||||
testutil.Equals(t, 1, actRetentionCount, "metric retention count mismatch")
|
||||
testutil.Equals(t, actSize, expSize, "metric db size doesn't match actual disk size")
|
||||
testutil.Assert(t, expSize <= sizeLimit, "actual size (%v) is expected to be less than or equal to limit (%v)", expSize, sizeLimit)
|
||||
testutil.Equals(t, len(blocks)-1, len(actBlocks), "new block count should be decreased from:%v to:%v", len(blocks), len(blocks)-1)
|
||||
|
|
|
@ -90,7 +90,7 @@ type Head struct {
|
|||
|
||||
cardinalityMutex sync.Mutex
|
||||
cardinalityCache *index.PostingsStats // posting stats cache which will expire after 30sec
|
||||
lastPostingsStatsCall time.Duration // last posting stats call (PostgingsCardinalityStats()) time for caching
|
||||
lastPostingsStatsCall time.Duration // last posting stats call (PostingsCardinalityStats()) time for caching
|
||||
}
|
||||
|
||||
type headMetrics struct {
|
||||
|
@ -848,7 +848,7 @@ func (h *Head) Appender() Appender {
|
|||
func (h *Head) appender() *headAppender {
|
||||
return &headAppender{
|
||||
head: h,
|
||||
// Set the minimum valid time to whichever is greater the head min valid time or the compaciton window.
|
||||
// Set the minimum valid time to whichever is greater the head min valid time or the compaction window.
|
||||
// This ensures that no samples will be added within the compaction window to avoid races.
|
||||
minValidTime: max(atomic.LoadInt64(&h.minValidTime), h.MaxTime()-h.chunkRange/2),
|
||||
mint: math.MaxInt64,
|
||||
|
|
|
@ -644,9 +644,9 @@ func TestDeleteUntilCurMax(t *testing.T) {
|
|||
testutil.Assert(t, res.Next(), "series don't exist")
|
||||
exps := res.At()
|
||||
it := exps.Iterator()
|
||||
ressmpls, err := expandSeriesIterator(it)
|
||||
resSamples, err := expandSeriesIterator(it)
|
||||
testutil.Ok(t, err)
|
||||
testutil.Equals(t, []tsdbutil.Sample{sample{11, 1}}, ressmpls)
|
||||
testutil.Equals(t, []tsdbutil.Sample{sample{11, 1}}, resSamples)
|
||||
}
|
||||
|
||||
func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) {
|
||||
|
@ -1255,7 +1255,7 @@ func TestWalRepair_DecodingError(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNewWalSegmentOnTruncate(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "test_wal_segemnts")
|
||||
dir, err := ioutil.TempDir("", "test_wal_segements")
|
||||
testutil.Ok(t, err)
|
||||
defer func() {
|
||||
testutil.Ok(t, os.RemoveAll(dir))
|
||||
|
|
|
@ -221,7 +221,7 @@ func TestIndexRW_Postings(t *testing.T) {
|
|||
vals := []string{}
|
||||
nc := d.Be32int()
|
||||
if nc != 1 {
|
||||
return errors.Errorf("unexpected nuumber of label indices table names %d", nc)
|
||||
return errors.Errorf("unexpected number of label indices table names %d", nc)
|
||||
}
|
||||
for i := d.Be32(); i > 0; i-- {
|
||||
v, err := ir.lookupSymbol(d.Be32())
|
||||
|
@ -480,7 +480,7 @@ func TestPersistence_index_e2e(t *testing.T) {
|
|||
testutil.Ok(t, ir.Close())
|
||||
}
|
||||
|
||||
func TestDecbufUvariantWithInvalidBuffer(t *testing.T) {
|
||||
func TestDecbufUvarintWithInvalidBuffer(t *testing.T) {
|
||||
b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81})
|
||||
|
||||
db := encoding.NewDecbufUvarintAt(b, 0, castagnoliTable)
|
||||
|
|
|
@ -454,7 +454,7 @@ func (h *postingsHeap) Pop() interface{} {
|
|||
|
||||
type mergedPostings struct {
|
||||
h postingsHeap
|
||||
initilized bool
|
||||
initialized bool
|
||||
cur uint64
|
||||
err error
|
||||
}
|
||||
|
@ -485,10 +485,10 @@ func (it *mergedPostings) Next() bool {
|
|||
}
|
||||
|
||||
// The user must issue an initial Next.
|
||||
if !it.initilized {
|
||||
if !it.initialized {
|
||||
heap.Init(&it.h)
|
||||
it.cur = it.h[0].At()
|
||||
it.initilized = true
|
||||
it.initialized = true
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -519,7 +519,7 @@ func (it *mergedPostings) Seek(id uint64) bool {
|
|||
if it.h.Len() == 0 || it.err != nil {
|
||||
return false
|
||||
}
|
||||
if !it.initilized {
|
||||
if !it.initialized {
|
||||
if !it.Next() {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -193,7 +193,7 @@ func TestMultiIntersect(t *testing.T) {
|
|||
},
|
||||
res: []uint64{2, 5, 6, 1001},
|
||||
},
|
||||
// One of the reproduceable cases for:
|
||||
// One of the reproducible cases for:
|
||||
// https://github.com/prometheus/prometheus/issues/2616
|
||||
// The initialisation of intersectPostings was moving the iterator forward
|
||||
// prematurely making us miss some postings.
|
||||
|
|
|
@ -2135,7 +2135,7 @@ func BenchmarkQueries(b *testing.B) {
|
|||
for _, q := range queryTypes {
|
||||
// Can't run a check for error here as some of these will fail as
|
||||
// queryTypes is using the same slice for the different block queriers
|
||||
// and would have been closed in the previous iterration.
|
||||
// and would have been closed in the previous iteration.
|
||||
q.Close()
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -75,9 +75,9 @@ func TestRecord_EncodeDecode(t *testing.T) {
|
|||
}, decTstones)
|
||||
}
|
||||
|
||||
// TestRecord_Corruputed ensures that corrupted records return the correct error.
|
||||
// TestRecord_Corrupted ensures that corrupted records return the correct error.
|
||||
// Bugfix check for pull/521 and pull/523.
|
||||
func TestRecord_Corruputed(t *testing.T) {
|
||||
func TestRecord_Corrupted(t *testing.T) {
|
||||
var enc Encoder
|
||||
var dec Decoder
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ type WALReader interface {
|
|||
// the truncation threshold can be compacted.
|
||||
type segmentFile struct {
|
||||
*os.File
|
||||
maxTime int64 // highest tombstone or sample timpstamp in segment
|
||||
maxTime int64 // highest tombstone or sample timestamp in segment
|
||||
minSeries uint64 // lowerst series ID in segment
|
||||
}
|
||||
|
||||
|
|
|
@ -302,7 +302,7 @@ func TestCorruptAndCarryOn(t *testing.T) {
|
|||
err = w.Repair(corruptionErr)
|
||||
testutil.Ok(t, err)
|
||||
|
||||
// Ensure that we have a completely clean slate after reapiring.
|
||||
// Ensure that we have a completely clean slate after repairing.
|
||||
testutil.Equals(t, w.segment.Index(), 1) // We corrupted segment 0.
|
||||
testutil.Equals(t, w.donePages, 0)
|
||||
|
||||
|
@ -379,7 +379,7 @@ func TestSegmentMetric(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCompression(t *testing.T) {
|
||||
boostrap := func(compressed bool) string {
|
||||
bootstrap := func(compressed bool) string {
|
||||
const (
|
||||
segmentSize = pageSize
|
||||
recordSize = (pageSize / 2) - recordHeaderSize
|
||||
|
@ -401,11 +401,11 @@ func TestCompression(t *testing.T) {
|
|||
return dirPath
|
||||
}
|
||||
|
||||
dirCompressed := boostrap(true)
|
||||
dirCompressed := bootstrap(true)
|
||||
defer func() {
|
||||
testutil.Ok(t, os.RemoveAll(dirCompressed))
|
||||
}()
|
||||
dirUnCompressed := boostrap(false)
|
||||
dirUnCompressed := bootstrap(false)
|
||||
defer func() {
|
||||
testutil.Ok(t, os.RemoveAll(dirUnCompressed))
|
||||
}()
|
||||
|
|
|
@ -162,7 +162,7 @@ func TestSegmentWAL_Log_Restore(t *testing.T) {
|
|||
iterations = 5
|
||||
stepSize = 5
|
||||
)
|
||||
// Generate testing data. It does not make semantical sense but
|
||||
// Generate testing data. It does not make semantic sense but
|
||||
// for the purpose of this test.
|
||||
series, err := labels.ReadLabels(filepath.Join("testdata", "20kseries.json"), numMetrics)
|
||||
testutil.Ok(t, err)
|
||||
|
@ -373,7 +373,7 @@ func TestWALRestoreCorrupted(t *testing.T) {
|
|||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
// Generate testing data. It does not make semantical sense but
|
||||
// Generate testing data. It does not make semantic sense but
|
||||
// for the purpose of this test.
|
||||
dir, err := ioutil.TempDir("", "test_corrupted")
|
||||
testutil.Ok(t, err)
|
||||
|
@ -422,7 +422,7 @@ func TestWALRestoreCorrupted(t *testing.T) {
|
|||
|
||||
// Weird hack to check order of reads.
|
||||
i := 0
|
||||
samplf := func(s []record.RefSample) {
|
||||
samplef := func(s []record.RefSample) {
|
||||
if i == 0 {
|
||||
testutil.Equals(t, []record.RefSample{{T: 1, V: 2}}, s)
|
||||
i++
|
||||
|
@ -431,7 +431,7 @@ func TestWALRestoreCorrupted(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
testutil.Ok(t, r.Read(serf, samplf, nil))
|
||||
testutil.Ok(t, r.Read(serf, samplef, nil))
|
||||
|
||||
testutil.Ok(t, w2.LogSamples([]record.RefSample{{T: 99, V: 100}}))
|
||||
testutil.Ok(t, w2.Close())
|
||||
|
@ -444,13 +444,13 @@ func TestWALRestoreCorrupted(t *testing.T) {
|
|||
r = w3.Reader()
|
||||
|
||||
i = 0
|
||||
testutil.Ok(t, r.Read(serf, samplf, nil))
|
||||
testutil.Ok(t, r.Read(serf, samplef, nil))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMigrateWAL_Empty(t *testing.T) {
|
||||
// The migration proecedure must properly deal with a zero-length segment,
|
||||
// The migration procedure must properly deal with a zero-length segment,
|
||||
// which is valid in the new format.
|
||||
dir, err := ioutil.TempDir("", "walmigrate")
|
||||
testutil.Ok(t, err)
|
||||
|
|
|
@ -133,7 +133,7 @@ func NewTemporaryDirectory(name string, t T) (handler TemporaryDirectory) {
|
|||
return
|
||||
}
|
||||
|
||||
// DirHash returns a hash of all files attribites and their content within a directory.
|
||||
// DirHash returns a hash of all files attributes and their content within a directory.
|
||||
func DirHash(t *testing.T, path string) []byte {
|
||||
hash := sha256.New()
|
||||
err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
|
||||
|
|
|
@ -97,7 +97,7 @@ describe('Graph', () => {
|
|||
spyState.mockReset();
|
||||
mockPlot.mockReset();
|
||||
});
|
||||
it('should trigger state update when new data is recieved', () => {
|
||||
it('should trigger state update when new data is received', () => {
|
||||
graph.setProps({ data: { result: [{ values: [{}], metric: {} }] } });
|
||||
expect(spyState).toHaveBeenCalledWith(
|
||||
{
|
||||
|
|
|
@ -58,7 +58,7 @@ export const getHoverColor = (color: string, opacity: number, stacked: boolean)
|
|||
return `rgba(${r}, ${g}, ${b}, ${opacity})`;
|
||||
}
|
||||
/*
|
||||
Unfortunetly flot doesn't take into consideration
|
||||
Unfortunately flot doesn't take into consideration
|
||||
the alpha value when adjusting the color on the stacked series.
|
||||
TODO: find better way to set the opacity.
|
||||
*/
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
</div>
|
||||
|
||||
<!--
|
||||
TODO: Convert this to Bootstrap navbar. This requires Javascript
|
||||
TODO: Convert this to Bootstrap navbar. This requires JavaScript
|
||||
refresh.
|
||||
-->
|
||||
<div class="form-row">
|
||||
|
|
Loading…
Reference in a new issue