fix panics

Signed-off-by: jessicagreben <jessicagrebens@gmail.com>
This commit is contained in:
jessicagreben 2020-11-01 07:54:04 -08:00
parent 61c9a89120
commit 75654715d3
4 changed files with 28 additions and 13 deletions

View file

@ -139,11 +139,10 @@ func main() {
backfillRuleCmd := backfillCmd.Command("rules", "Backfill Prometheus data for new rules.")
backfillRuleStart := backfillRuleCmd.Flag("start", "The time to start backfilling the new rule from. It is required. Start time should be RFC3339 or Unix timestamp.").
Required().String()
backfillRuleEnd := backfillRuleCmd.Flag("end", "If an end time is provided, all recording rules in the rule files provided will be backfilled to the end time. Default will backfill up to 3 hrs ago. End time should be RFC3339 or Unix timestamp.").
Default("-3h").String()
backfillRuleEnd := backfillRuleCmd.Flag("end", "If an end time is provided, all recording rules in the rule files provided will be backfilled to the end time. Default will backfill up to 3 hrs ago. End time should be RFC3339 or Unix timestamp.").String()
backfillOutputDir := backfillRuleCmd.Flag("output dir", "The filepath on the local filesystem to write the output to. Output will be blocks containing the data of the backfilled recording rules.").Default("backfilldata/").String()
backfillRuleURL := backfillRuleCmd.Flag("url", "Prometheus API url with the data where the rule will be backfilled from.").Default("localhost:9090").String()
backfillRuleEvalInterval := backfillRuleCmd.Flag("evaluation_interval default", "How frequently to evaluate rules when backfilling. evaluation interval in the rules file will take precedence.").
backfillRuleURL := backfillRuleCmd.Flag("url", "Prometheus API url with the data where the rule will be backfilled from.").Default("http://localhost:9090").String()
backfillRuleEvalInterval := backfillRuleCmd.Flag("evaluation_interval_default", "How frequently to evaluate rules when backfilling if a value is not set in the rules file.").
Default("60s").Duration()
backfillRuleFiles := backfillRuleCmd.Arg(
"rule-files",
@ -787,11 +786,29 @@ func (j *jsonPrinter) printLabelValues(v model.LabelValues) {
// at the outputDir location.
func BackfillRule(url, start, end, outputDir string, evalInterval time.Duration, files ...string) error {
ctx := context.Background()
stime, etime, err := parseStartTimeAndEndTime(start, end)
var stime, etime time.Time
var err error
if end == "" {
etime = time.Now().Add(-3 * time.Hour)
} else {
etime, err = parseTime(end)
if err != nil {
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, "error parsing end time:", err)
return err
}
}
stime, err = parseTime(start)
if err != nil {
fmt.Fprintln(os.Stderr, "error parsing start time:", err)
return err
}
if !stime.Before(etime) {
fmt.Fprintln(os.Stderr, "start time is not before end time")
return nil
}
cfg := ruleImporterConfig{
Start: stime,
End: etime,

View file

@ -121,6 +121,7 @@ func (importer *ruleImporter) loadGroups(ctx context.Context, filenames []string
File: filename,
Interval: itv,
Rules: rgRules,
Opts: &rules.ManagerOptions{},
})
}
}

View file

@ -474,8 +474,8 @@ func (g *Group) GetEvaluationTime() time.Duration {
return g.evaluationTime
}
// setEvaluationDuration sets the time in seconds the last evaluation took.
func (g *Group) setEvaluationDuration(dur time.Duration) {
// setEvaluationTime sets the time in seconds the last evaluation took.
func (g *Group) setEvaluationTime(dur time.Duration) {
g.metrics.groupLastDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(dur.Seconds())
g.mtx.Lock()
@ -490,8 +490,8 @@ func (g *Group) GetLastEvaluation() time.Time {
return g.lastEvaluation
}
// setEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule group was last evaluated.
func (g *Group) setEvaluationTimestamp(ts time.Time) {
// setLastEvaluation updates evaluationTimestamp to the timestamp of when the rule group was last evaluated.
func (g *Group) setLastEvaluation(ts time.Time) {
g.metrics.groupLastEvalTime.WithLabelValues(GroupKey(g.file, g.name)).Set(float64(ts.UnixNano()) / 1e9)
g.mtx.Lock()

View file

@ -24,7 +24,6 @@ import (
"github.com/oklog/ulid"
"github.com/pkg/errors"
"github.com/prometheus/prometheus/pkg/timestamp"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
@ -85,7 +84,6 @@ func (w *BlockWriter) Appender(ctx context.Context) storage.Appender {
// Flush implements the Writer interface. This is where actual block writing
// happens. After flush completes, no writes can be done.
func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) {
seriesCount := w.head.NumSeries()
if w.head.NumSeries() == 0 {
return ulid.ULID{}, errors.New("no series appended, aborting")
}
@ -94,7 +92,6 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) {
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.
maxt := w.head.MaxTime() + 1
level.Info(w.logger).Log("msg", "flushing", "series_count", seriesCount, "mint", timestamp.Time(mint), "maxt", timestamp.Time(maxt))
compactor, err := NewLeveledCompactor(ctx,
nil,