Merge remote-tracking branch 'origin/release-1.2'

This commit is contained in:
Fabian Reinartz 2016-11-01 16:35:22 +01:00
commit 6703404cb4
3 changed files with 44 additions and 0 deletions

View file

@ -262,6 +262,10 @@ func parse(args []string) error {
return err return err
} }
if promql.StalenessDelta < 0 {
return fmt.Errorf("negative staleness delta: %s", promql.StalenessDelta)
}
if err := parsePrometheusURL(); err != nil { if err := parsePrometheusURL(); err != nil {
return err return err
} }

View file

@ -493,6 +493,10 @@ func (bit *boundedIterator) Close() {
// QueryRange implements Storage. // QueryRange implements Storage.
func (s *MemorySeriesStorage) QueryRange(_ context.Context, from, through model.Time, matchers ...*metric.LabelMatcher) ([]SeriesIterator, error) { func (s *MemorySeriesStorage) QueryRange(_ context.Context, from, through model.Time, matchers ...*metric.LabelMatcher) ([]SeriesIterator, error) {
if through.Before(from) {
// In that case, nothing will match.
return nil, nil
}
fpSeriesPairs, err := s.seriesForLabelMatchers(from, through, matchers...) fpSeriesPairs, err := s.seriesForLabelMatchers(from, through, matchers...)
if err != nil { if err != nil {
return nil, err return nil, err
@ -507,6 +511,9 @@ func (s *MemorySeriesStorage) QueryRange(_ context.Context, from, through model.
// QueryInstant implements Storage. // QueryInstant implements Storage.
func (s *MemorySeriesStorage) QueryInstant(_ context.Context, ts model.Time, stalenessDelta time.Duration, matchers ...*metric.LabelMatcher) ([]SeriesIterator, error) { func (s *MemorySeriesStorage) QueryInstant(_ context.Context, ts model.Time, stalenessDelta time.Duration, matchers ...*metric.LabelMatcher) ([]SeriesIterator, error) {
if stalenessDelta < 0 {
panic("negative staleness delta")
}
from := ts.Add(-stalenessDelta) from := ts.Add(-stalenessDelta)
through := ts through := ts

View file

@ -504,6 +504,39 @@ func BenchmarkQueryRange(b *testing.B) {
}) })
} }
func TestQueryRangeThroughBeforeFrom(t *testing.T) {
now := model.Now()
insertStart := now.Add(-2 * time.Hour)
s, closer := NewTestStorage(t, 2)
defer closer.Close()
// Stop maintenance loop to prevent actual purging.
close(s.loopStopping)
<-s.loopStopped
<-s.logThrottlingStopped
// Recreate channel to avoid panic when we really shut down.
s.loopStopping = make(chan struct{})
for i := 0; i < 8192; i++ {
s.Append(&model.Sample{
Metric: model.Metric{"__name__": "testmetric", "job": "test"},
Timestamp: insertStart.Add(time.Duration(i) * time.Second),
Value: model.SampleValue(rand.Float64()),
})
}
s.WaitForIndexing()
lm, _ := metric.NewLabelMatcher(metric.Equal, "job", "test")
iters, err := s.QueryRange(context.Background(), now.Add(-30*time.Minute), now.Add(-90*time.Minute), lm)
if err != nil {
t.Error(err)
}
if len(iters) != 0 {
t.Errorf("expected no iters to be returned, got %d", len(iters))
}
}
func TestRetentionCutoff(t *testing.T) { func TestRetentionCutoff(t *testing.T) {
now := model.Now() now := model.Now()
insertStart := now.Add(-2 * time.Hour) insertStart := now.Add(-2 * time.Hour)