mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-27 22:49:40 -08:00
d1122e0743
* Append metadata to the WAL Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Remove extra whitespace; Reword some docstrings and comments Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Use RLock() for hasNewMetadata check Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Use single byte for metric type in RefMetadata Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Update proposed WAL format for single-byte type metadata Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Implementa MetadataAppender interface for the Agent Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Address first round of review comments Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Amend description of metadata in wal.md Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Correct key used to retrieve metadata from cache When we're setting metadata entries in the scrapeCace, we're using the p.Help(), p.Unit(), p.Type() helpers, which retrieve the series name and use it as the cache key. When checking for cache entries though, we used p.Series() as the key, which included the metric name _with_ its labels. That meant that we were never actually hitting the cache. We're fixing this by utiling the __name__ internal label for correctly getting the cache entries after they've been set by setHelp(), setType() or setUnit(). Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Put feature behind a feature flag Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Fix AppendMetadata docstring Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Reorder WAL format document Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Change error message of AppendMetadata; Fix access of s.meta in AppendMetadata Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Reuse temporary buffer in Metadata encoder Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Only keep latest metadata for each refID during checkpointing Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Fix test that's referencing decoding metadata Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Avoid creating metadata block if no new metadata are present Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Add tests for corrupt metadata block and relevant record type Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Fix CR comments Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Extract logic about changing metadata in an anonymous function Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Implement new proposed WAL format and amend relevant tests Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Use 'const' for metadata field names Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Apply metadata to head memSeries in Commit, not in AppendMetadata Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Add docstring and rename extracted helper in scrape.go Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Add tests for tsdb-related cases Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Fix linter issues vol1 Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Fix linter issues vol2 Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Fix Windows test by closing WAL reader files Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Use switch instead of two if statements in metadata decoding Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Fix review comments around TestMetadata* tests Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Add code for replaying WAL; test correctness of in-memory data after a replay Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Remove scrape-loop related code from PR Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Address first round of comments Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Simplify tests by sorting slices before comparison Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Fix test to use separate transactions Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Empty out buffer and record slices after encoding latest metadata Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Fix linting issue Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Update calculation for DroppedMetadata metric Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Rename MetadataAppender interface and AppendMetadata method to MetadataUpdater/UpdateMetadata Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Reuse buffer when encoding latest metadata for each series Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Fix review comments; Check all returned error values using two helpers Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Simplify use of helpers Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Satisfy linter Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com>
296 lines
9.8 KiB
Go
296 lines
9.8 KiB
Go
// Copyright 2018 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package wal
|
|
|
|
import (
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
"sort"
|
|
"strings"
|
|
"testing"
|
|
|
|
"github.com/go-kit/log"
|
|
"github.com/pkg/errors"
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
|
"github.com/prometheus/prometheus/tsdb/record"
|
|
)
|
|
|
|
func TestLastCheckpoint(t *testing.T) {
|
|
dir := t.TempDir()
|
|
|
|
_, _, err := LastCheckpoint(dir)
|
|
require.Equal(t, record.ErrNotFound, err)
|
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.0000"), 0o777))
|
|
s, k, err := LastCheckpoint(dir)
|
|
require.NoError(t, err)
|
|
require.Equal(t, filepath.Join(dir, "checkpoint.0000"), s)
|
|
require.Equal(t, 0, k)
|
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.xyz"), 0o777))
|
|
s, k, err = LastCheckpoint(dir)
|
|
require.NoError(t, err)
|
|
require.Equal(t, filepath.Join(dir, "checkpoint.0000"), s)
|
|
require.Equal(t, 0, k)
|
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1"), 0o777))
|
|
s, k, err = LastCheckpoint(dir)
|
|
require.NoError(t, err)
|
|
require.Equal(t, filepath.Join(dir, "checkpoint.1"), s)
|
|
require.Equal(t, 1, k)
|
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1000"), 0o777))
|
|
s, k, err = LastCheckpoint(dir)
|
|
require.NoError(t, err)
|
|
require.Equal(t, filepath.Join(dir, "checkpoint.1000"), s)
|
|
require.Equal(t, 1000, k)
|
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0o777))
|
|
s, k, err = LastCheckpoint(dir)
|
|
require.NoError(t, err)
|
|
require.Equal(t, filepath.Join(dir, "checkpoint.99999999"), s)
|
|
require.Equal(t, 99999999, k)
|
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0o777))
|
|
s, k, err = LastCheckpoint(dir)
|
|
require.NoError(t, err)
|
|
require.Equal(t, filepath.Join(dir, "checkpoint.100000000"), s)
|
|
require.Equal(t, 100000000, k)
|
|
}
|
|
|
|
func TestDeleteCheckpoints(t *testing.T) {
|
|
dir := t.TempDir()
|
|
|
|
require.NoError(t, DeleteCheckpoints(dir, 0))
|
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.00"), 0o777))
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.01"), 0o777))
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.02"), 0o777))
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.03"), 0o777))
|
|
|
|
require.NoError(t, DeleteCheckpoints(dir, 2))
|
|
|
|
files, err := os.ReadDir(dir)
|
|
require.NoError(t, err)
|
|
fns := []string{}
|
|
for _, f := range files {
|
|
fns = append(fns, f.Name())
|
|
}
|
|
require.Equal(t, []string{"checkpoint.02", "checkpoint.03"}, fns)
|
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0o777))
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0o777))
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000001"), 0o777))
|
|
|
|
require.NoError(t, DeleteCheckpoints(dir, 100000000))
|
|
|
|
files, err = os.ReadDir(dir)
|
|
require.NoError(t, err)
|
|
fns = []string{}
|
|
for _, f := range files {
|
|
fns = append(fns, f.Name())
|
|
}
|
|
require.Equal(t, []string{"checkpoint.100000000", "checkpoint.100000001"}, fns)
|
|
}
|
|
|
|
func TestCheckpoint(t *testing.T) {
|
|
for _, compress := range []bool{false, true} {
|
|
t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) {
|
|
dir := t.TempDir()
|
|
|
|
var enc record.Encoder
|
|
// Create a dummy segment to bump the initial number.
|
|
seg, err := CreateSegment(dir, 100)
|
|
require.NoError(t, err)
|
|
require.NoError(t, seg.Close())
|
|
|
|
// Manually create checkpoint for 99 and earlier.
|
|
w, err := New(nil, nil, filepath.Join(dir, "checkpoint.0099"), compress)
|
|
require.NoError(t, err)
|
|
|
|
// Add some data we expect to be around later.
|
|
err = w.Log(enc.Series([]record.RefSeries{
|
|
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")},
|
|
{Ref: 1, Labels: labels.FromStrings("a", "b", "c", "1")},
|
|
}, nil))
|
|
require.NoError(t, err)
|
|
// Log an unknown record, that might have come from a future Prometheus version.
|
|
require.NoError(t, w.Log([]byte{255}))
|
|
require.NoError(t, w.Close())
|
|
|
|
// Start a WAL and write records to it as usual.
|
|
w, err = NewSize(nil, nil, dir, 64*1024, compress)
|
|
require.NoError(t, err)
|
|
|
|
var last int64
|
|
for i := 0; ; i++ {
|
|
_, n, err := Segments(w.Dir())
|
|
require.NoError(t, err)
|
|
if n >= 106 {
|
|
break
|
|
}
|
|
// Write some series initially.
|
|
if i == 0 {
|
|
b := enc.Series([]record.RefSeries{
|
|
{Ref: 2, Labels: labels.FromStrings("a", "b", "c", "2")},
|
|
{Ref: 3, Labels: labels.FromStrings("a", "b", "c", "3")},
|
|
{Ref: 4, Labels: labels.FromStrings("a", "b", "c", "4")},
|
|
{Ref: 5, Labels: labels.FromStrings("a", "b", "c", "5")},
|
|
}, nil)
|
|
require.NoError(t, w.Log(b))
|
|
|
|
b = enc.Metadata([]record.RefMetadata{
|
|
{Ref: 2, Unit: "unit", Help: "help"},
|
|
{Ref: 3, Unit: "unit", Help: "help"},
|
|
{Ref: 4, Unit: "unit", Help: "help"},
|
|
{Ref: 5, Unit: "unit", Help: "help"},
|
|
}, nil)
|
|
require.NoError(t, w.Log(b))
|
|
}
|
|
// Write samples until the WAL has enough segments.
|
|
// Make them have drifting timestamps within a record to see that they
|
|
// get filtered properly.
|
|
b := enc.Samples([]record.RefSample{
|
|
{Ref: 0, T: last, V: float64(i)},
|
|
{Ref: 1, T: last + 10000, V: float64(i)},
|
|
{Ref: 2, T: last + 20000, V: float64(i)},
|
|
{Ref: 3, T: last + 30000, V: float64(i)},
|
|
}, nil)
|
|
require.NoError(t, w.Log(b))
|
|
|
|
b = enc.Exemplars([]record.RefExemplar{
|
|
{Ref: 1, T: last, V: float64(i), Labels: labels.FromStrings("traceID", fmt.Sprintf("trace-%d", i))},
|
|
}, nil)
|
|
require.NoError(t, w.Log(b))
|
|
|
|
// Write changing metadata for each series. In the end, only the latest
|
|
// version should end up in the checkpoint.
|
|
b = enc.Metadata([]record.RefMetadata{
|
|
{Ref: 0, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
|
|
{Ref: 1, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
|
|
{Ref: 2, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
|
|
{Ref: 3, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
|
|
}, nil)
|
|
require.NoError(t, w.Log(b))
|
|
|
|
last += 100
|
|
}
|
|
require.NoError(t, w.Close())
|
|
|
|
_, err = Checkpoint(log.NewNopLogger(), w, 100, 106, func(x chunks.HeadSeriesRef) bool {
|
|
return x%2 == 0
|
|
}, last/2)
|
|
require.NoError(t, err)
|
|
require.NoError(t, w.Truncate(107))
|
|
require.NoError(t, DeleteCheckpoints(w.Dir(), 106))
|
|
|
|
// Only the new checkpoint should be left.
|
|
files, err := os.ReadDir(dir)
|
|
require.NoError(t, err)
|
|
require.Equal(t, 1, len(files))
|
|
require.Equal(t, "checkpoint.00000106", files[0].Name())
|
|
|
|
sr, err := NewSegmentsReader(filepath.Join(dir, "checkpoint.00000106"))
|
|
require.NoError(t, err)
|
|
defer sr.Close()
|
|
|
|
var dec record.Decoder
|
|
var series []record.RefSeries
|
|
var metadata []record.RefMetadata
|
|
r := NewReader(sr)
|
|
|
|
for r.Next() {
|
|
rec := r.Record()
|
|
|
|
switch dec.Type(rec) {
|
|
case record.Series:
|
|
series, err = dec.Series(rec, series)
|
|
require.NoError(t, err)
|
|
case record.Samples:
|
|
samples, err := dec.Samples(rec, nil)
|
|
require.NoError(t, err)
|
|
for _, s := range samples {
|
|
require.GreaterOrEqual(t, s.T, last/2, "sample with wrong timestamp")
|
|
}
|
|
case record.Exemplars:
|
|
exemplars, err := dec.Exemplars(rec, nil)
|
|
require.NoError(t, err)
|
|
for _, e := range exemplars {
|
|
require.GreaterOrEqual(t, e.T, last/2, "exemplar with wrong timestamp")
|
|
}
|
|
case record.Metadata:
|
|
metadata, err = dec.Metadata(rec, metadata)
|
|
require.NoError(t, err)
|
|
}
|
|
}
|
|
require.NoError(t, r.Err())
|
|
|
|
expectedRefSeries := []record.RefSeries{
|
|
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")},
|
|
{Ref: 2, Labels: labels.FromStrings("a", "b", "c", "2")},
|
|
{Ref: 4, Labels: labels.FromStrings("a", "b", "c", "4")},
|
|
}
|
|
require.Equal(t, expectedRefSeries, series)
|
|
|
|
expectedRefMetadata := []record.RefMetadata{
|
|
{Ref: 0, Unit: fmt.Sprintf("%d", last-100), Help: fmt.Sprintf("%d", last-100)},
|
|
{Ref: 2, Unit: fmt.Sprintf("%d", last-100), Help: fmt.Sprintf("%d", last-100)},
|
|
{Ref: 4, Unit: "unit", Help: "help"},
|
|
}
|
|
sort.Slice(metadata, func(i, j int) bool { return metadata[i].Ref < metadata[j].Ref })
|
|
require.Equal(t, expectedRefMetadata, metadata)
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestCheckpointNoTmpFolderAfterError(t *testing.T) {
|
|
// Create a new wal with invalid data.
|
|
dir := t.TempDir()
|
|
w, err := NewSize(nil, nil, dir, 64*1024, false)
|
|
require.NoError(t, err)
|
|
var enc record.Encoder
|
|
require.NoError(t, w.Log(enc.Series([]record.RefSeries{
|
|
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "2")},
|
|
}, nil)))
|
|
require.NoError(t, w.Close())
|
|
|
|
// Corrupt data.
|
|
f, err := os.OpenFile(filepath.Join(w.Dir(), "00000000"), os.O_WRONLY, 0o666)
|
|
require.NoError(t, err)
|
|
_, err = f.WriteAt([]byte{42}, 1)
|
|
require.NoError(t, err)
|
|
require.NoError(t, f.Close())
|
|
|
|
// Run the checkpoint and since the wal contains corrupt data this should return an error.
|
|
_, err = Checkpoint(log.NewNopLogger(), w, 0, 1, nil, 0)
|
|
require.Error(t, err)
|
|
|
|
// Walk the wal dir to make sure there are no tmp folder left behind after the error.
|
|
err = filepath.Walk(w.Dir(), func(path string, info os.FileInfo, err error) error {
|
|
if err != nil {
|
|
return errors.Wrapf(err, "access err %q: %v", path, err)
|
|
}
|
|
if info.IsDir() && strings.HasSuffix(info.Name(), ".tmp") {
|
|
return fmt.Errorf("wal dir contains temporary folder:%s", info.Name())
|
|
}
|
|
return nil
|
|
})
|
|
require.NoError(t, err)
|
|
}
|