chore(remote_write): clean up as watcher.go is part of wlog now
Some checks are pending
buf.build / lint and publish (push) Waiting to run
CI / Go tests (push) Waiting to run
CI / More Go tests (push) Waiting to run
CI / Go tests with previous Go version (push) Waiting to run
CI / UI tests (push) Waiting to run
CI / Go tests on Windows (push) Waiting to run
CI / Mixins tests (push) Waiting to run
CI / Build Prometheus for common architectures (0) (push) Waiting to run
CI / Build Prometheus for common architectures (1) (push) Waiting to run
CI / Build Prometheus for common architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (0) (push) Waiting to run
CI / Build Prometheus for all architectures (1) (push) Waiting to run
CI / Build Prometheus for all architectures (10) (push) Waiting to run
CI / Build Prometheus for all architectures (11) (push) Waiting to run
CI / Build Prometheus for all architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (3) (push) Waiting to run
CI / Build Prometheus for all architectures (4) (push) Waiting to run
CI / Build Prometheus for all architectures (5) (push) Waiting to run
CI / Build Prometheus for all architectures (6) (push) Waiting to run
CI / Build Prometheus for all architectures (7) (push) Waiting to run
CI / Build Prometheus for all architectures (8) (push) Waiting to run
CI / Build Prometheus for all architectures (9) (push) Waiting to run
CI / Report status of build Prometheus for all architectures (push) Blocked by required conditions
CI / Check generated parser (push) Waiting to run
CI / golangci-lint (push) Waiting to run
CI / fuzzing (push) Waiting to run
CI / codeql (push) Waiting to run
CI / Publish main branch artifacts (push) Blocked by required conditions
CI / Publish release artefacts (push) Blocked by required conditions
CI / Publish UI on npm Registry (push) Blocked by required conditions
Scorecards supply-chain security / Scorecards analysis (push) Waiting to run

Signed-off-by: machine424 <ayoubmrini424@gmail.com>
This commit is contained in:
machine424 2024-07-08 12:15:37 +02:00 committed by Ayoub Mrini
parent e92a18b6ce
commit 9e43ad2e37

View file

@ -20,7 +20,6 @@ import (
"math"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"time"
@ -265,9 +264,9 @@ func (w *Watcher) loop() {
// Run the watcher, which will tail the WAL until the quit channel is closed
// or an error case is hit.
func (w *Watcher) Run() error {
_, lastSegment, err := w.firstAndLast()
_, lastSegment, err := Segments(w.walDir)
if err != nil {
return fmt.Errorf("wal.Segments: %w", err)
return fmt.Errorf("Segments: %w", err)
}
// We want to ensure this is false across iterations since
@ -318,57 +317,20 @@ func (w *Watcher) Run() error {
// findSegmentForIndex finds the first segment greater than or equal to index.
func (w *Watcher) findSegmentForIndex(index int) (int, error) {
refs, err := w.segments(w.walDir)
refs, err := listSegments(w.walDir)
if err != nil {
return -1, err
}
for _, r := range refs {
if r >= index {
return r, nil
if r.index >= index {
return r.index, nil
}
}
return -1, errors.New("failed to find segment for index")
}
func (w *Watcher) firstAndLast() (int, int, error) {
refs, err := w.segments(w.walDir)
if err != nil {
return -1, -1, err
}
if len(refs) == 0 {
return -1, -1, nil
}
return refs[0], refs[len(refs)-1], nil
}
// Copied from tsdb/wlog/wlog.go so we do not have to open a WAL.
// Plan is to move WAL watcher to TSDB and dedupe these implementations.
func (w *Watcher) segments(dir string) ([]int, error) {
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var refs []int
for _, f := range files {
k, err := strconv.Atoi(f.Name())
if err != nil {
continue
}
refs = append(refs, k)
}
slices.Sort(refs)
for i := 0; i < len(refs)-1; i++ {
if refs[i]+1 != refs[i+1] {
return nil, errors.New("segments are not sequential")
}
}
return refs, nil
}
func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, size int64) error {
err := w.readSegment(r, segmentNum, tail)
@ -447,35 +409,17 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
// Currently doing a garbage collect, try again later.
}
// if a newer segment is produced, read the current one until the end and move on.
case <-segmentTicker.C:
_, last, err := w.firstAndLast()
_, last, err := Segments(w.walDir)
if err != nil {
return fmt.Errorf("segments: %w", err)
return fmt.Errorf("Segments: %w", err)
}
// Check if new segments exists.
if last <= segmentNum {
continue
if last > segmentNum {
return w.readAndHandleError(reader, segmentNum, tail, size)
}
err = w.readSegment(reader, segmentNum, tail)
// Ignore errors reading to end of segment whilst replaying the WAL.
if !tail {
switch {
case err != nil && !errors.Is(err, io.EOF):
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err)
case reader.Offset() != size:
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
}
return nil
}
// Otherwise, when we are tailing, non-EOFs are fatal.
if err != nil && !errors.Is(err, io.EOF) {
return err
}
return nil
continue
// we haven't read due to a notification in quite some time, try reading anyways
case <-readTicker.C:
@ -484,7 +428,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
if err != nil {
return err
}
// still want to reset the ticker so we don't read too often
// reset the ticker so we don't read too often
readTicker.Reset(readTimeout)
case <-w.readNotify:
@ -492,7 +436,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error {
if err != nil {
return err
}
// still want to reset the ticker so we don't read too often
// reset the ticker so we don't read too often
readTicker.Reset(readTimeout)
}
}
@ -731,17 +675,17 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err
}
// Ensure we read the whole contents of every segment in the checkpoint dir.
segs, err := w.segments(checkpointDir)
segs, err := listSegments(checkpointDir)
if err != nil {
return fmt.Errorf("Unable to get segments checkpoint dir: %w", err)
}
for _, seg := range segs {
size, err := getSegmentSize(checkpointDir, seg)
for _, segRef := range segs {
size, err := getSegmentSize(checkpointDir, segRef.index)
if err != nil {
return fmt.Errorf("getSegmentSize: %w", err)
}
sr, err := OpenReadSegment(SegmentName(checkpointDir, seg))
sr, err := OpenReadSegment(SegmentName(checkpointDir, segRef.index))
if err != nil {
return fmt.Errorf("unable to open segment: %w", err)
}
@ -753,7 +697,7 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err
}
if r.Offset() != size {
return fmt.Errorf("readCheckpoint wasn't able to read all data from the checkpoint %s/%08d, size: %d, totalRead: %d", checkpointDir, seg, size, r.Offset())
return fmt.Errorf("readCheckpoint wasn't able to read all data from the checkpoint %s/%08d, size: %d, totalRead: %d", checkpointDir, segRef.index, size, r.Offset())
}
}