Merge pull request #5503 from prometheus/release292

Cut 2.9.2
This commit is contained in:
Brian Brazil 2019-04-24 16:30:33 +01:00 committed by GitHub
commit d3245f1502
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
22 changed files with 509 additions and 31 deletions

View file

@ -1,3 +1,11 @@
## 2.9.2 / 2019-04-24
* [BUGFIX] Make sure subquery range is taken into account for selection #5467
* [BUGFIX] Exhaust every request body before closing it #5166
* [BUGFIX] Cmd/promtool: return errors from rule evaluations #5483
* [BUGFIX] Remote Storage: string interner should not panic in release #5487
* [BUGFIX] Fix memory allocation regression in mergedPostings.Seek tsdb#586
## 2.9.1 / 2019-04-16
* [BUGFIX] Discovery/kubernetes: fix missing label sanitization #5462

View file

@ -1 +1 @@
2.9.1
2.9.2

View file

@ -210,6 +210,12 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou
}
for _, g := range groups {
g.Eval(suite.Context(), ts)
for _, r := range g.Rules() {
if r.LastError() != nil {
errs = append(errs, errors.Errorf(" rule: %s, time: %s, err: %v",
r.Name(), ts.Sub(time.Unix(0, 0)), r.LastError()))
}
}
}
})
if len(errs) > 0 {

View file

@ -17,6 +17,7 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
@ -306,7 +307,10 @@ func fetchApps(ctx context.Context, client *http.Client, url string) (*appList,
if err != nil {
return nil, err
}
defer resp.Body.Close()
defer func() {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}()
if (resp.StatusCode < 200) || (resp.StatusCode >= 300) {
return nil, errors.Errorf("non 2xx status '%v' response during marathon service discovery", resp.StatusCode)

View file

@ -17,6 +17,8 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
@ -98,7 +100,10 @@ func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*target
}
dec := json.NewDecoder(resp.Body)
defer resp.Body.Close()
defer func() {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}()
err := dec.Decode(&nodes)
if err != nil {

View file

@ -17,6 +17,7 @@ import (
"bytes"
"context"
"encoding/json"
"io"
"io/ioutil"
"math"
"net/http"
@ -114,7 +115,10 @@ func (c *Client) Write(samples model.Samples) error {
if err != nil {
return err
}
defer resp.Body.Close()
defer func() {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}()
// API returns status code 204 for successful writes.
// http://opentsdb.net/docs/build/html/api_http/put.html

2
go.mod
View file

@ -79,7 +79,7 @@ require (
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f
github.com/prometheus/common v0.3.0
github.com/prometheus/tsdb v0.7.0
github.com/prometheus/tsdb v0.7.1
github.com/rlmcpherson/s3gof3r v0.5.0 // indirect
github.com/rubyist/circuitbreaker v2.2.1+incompatible // indirect
github.com/samuel/go-zookeeper v0.0.0-20161028232340-1d7be4effb13

4
go.sum
View file

@ -277,8 +277,8 @@ github.com/prometheus/common v0.3.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/tsdb v0.7.0 h1:2rQ5LUp0GGnSOMyZTEnEE82YDAAcQ2d4M2nGeUBH5tg=
github.com/prometheus/tsdb v0.7.0/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rlmcpherson/s3gof3r v0.5.0 h1:1izOJpTiohSibfOHuNyEA/yQnAirh05enzEdmhez43k=

View file

@ -19,6 +19,8 @@ import (
"crypto/md5"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
@ -508,7 +510,10 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b
if err != nil {
return err
}
defer resp.Body.Close()
defer func() {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}()
// Any HTTP status 2xx is OK.
if resp.StatusCode/100 != 2 {

View file

@ -14,6 +14,7 @@
package notifier
import (
"bytes"
"context"
"crypto/md5"
"encoding/json"
@ -224,7 +225,7 @@ func TestCustomDo(t *testing.T) {
testutil.Equals(t, testURL, req.URL.String())
return &http.Response{
Body: ioutil.NopCloser(nil),
Body: ioutil.NopCloser(bytes.NewBuffer(nil)),
}, nil
},
}, nil)

View file

@ -552,6 +552,14 @@ func (ng *Engine) populateSeries(ctx context.Context, q storage.Queryable, s *Ev
Step: durationToInt64Millis(s.Interval),
}
// We need to make sure we select the timerange selected by the subquery.
// TODO(gouthamve): cumulativeSubqueryOffset gives the sum of range and the offset
// we can optimise it by separating out the range and offsets, and substracting the offsets
// from end also.
subqOffset := ng.cumulativeSubqueryOffset(path)
offsetMilliseconds := durationMilliseconds(subqOffset)
params.Start = params.Start - offsetMilliseconds
switch n := node.(type) {
case *VectorSelector:
params.Start = params.Start - durationMilliseconds(LookbackDelta)

View file

@ -227,6 +227,166 @@ func TestQueryError(t *testing.T) {
}
}
// paramCheckerQuerier implements storage.Querier which checks the start and end times
// in params.
type paramCheckerQuerier struct {
start int64
end int64
t *testing.T
}
func (q *paramCheckerQuerier) Select(sp *storage.SelectParams, _ ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) {
testutil.Equals(q.t, q.start, sp.Start)
testutil.Equals(q.t, q.end, sp.End)
return errSeriesSet{err: nil}, nil, nil
}
func (*paramCheckerQuerier) LabelValues(name string) ([]string, error) { return nil, nil }
func (*paramCheckerQuerier) LabelNames() ([]string, error) { return nil, nil }
func (*paramCheckerQuerier) Close() error { return nil }
func TestParamsSetCorrectly(t *testing.T) {
opts := EngineOpts{
Logger: nil,
Reg: nil,
MaxConcurrent: 10,
MaxSamples: 10,
Timeout: 10 * time.Second,
}
// Set the lookback to be smaller and reset at the end.
currLookback := LookbackDelta
LookbackDelta = 5 * time.Second
defer func() {
LookbackDelta = currLookback
}()
cases := []struct {
query string
// All times are in seconds.
start int64
end int64
paramStart int64
paramEnd int64
}{{
query: "foo",
start: 10,
paramStart: 5,
paramEnd: 10,
}, {
query: "foo[2m]",
start: 200,
paramStart: 80, // 200 - 120
paramEnd: 200,
}, {
query: "foo[2m] offset 2m",
start: 300,
paramStart: 60,
paramEnd: 180,
}, {
query: "foo[2m:1s]",
start: 300,
paramStart: 175, // 300 - 120 - 5
paramEnd: 300,
}, {
query: "count_over_time(foo[2m:1s])",
start: 300,
paramStart: 175, // 300 - 120 - 5
paramEnd: 300,
}, {
query: "count_over_time(foo[2m:1s] offset 10s)",
start: 300,
paramStart: 165, // 300 - 120 - 5 - 10
paramEnd: 300,
}, {
query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)",
start: 300,
paramStart: 155, // 300 - 120 - 5 - 10 - 10
paramEnd: 290,
}, {
// Range queries now.
query: "foo",
start: 10,
end: 20,
paramStart: 5,
paramEnd: 20,
}, {
query: "rate(foo[2m])",
start: 200,
end: 500,
paramStart: 80, // 200 - 120
paramEnd: 500,
}, {
query: "rate(foo[2m] offset 2m)",
start: 300,
end: 500,
paramStart: 60,
paramEnd: 380,
}, {
query: "rate(foo[2m:1s])",
start: 300,
end: 500,
paramStart: 175, // 300 - 120 - 5
paramEnd: 500,
}, {
query: "count_over_time(foo[2m:1s])",
start: 300,
end: 500,
paramStart: 175, // 300 - 120 - 5
paramEnd: 500,
}, {
query: "count_over_time(foo[2m:1s] offset 10s)",
start: 300,
end: 500,
paramStart: 165, // 300 - 120 - 5 - 10
paramEnd: 500,
}, {
query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)",
start: 300,
end: 500,
paramStart: 155, // 300 - 120 - 5 - 10 - 10
paramEnd: 490,
}}
for _, tc := range cases {
engine := NewEngine(opts)
queryable := storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
return &paramCheckerQuerier{start: tc.paramStart * 1000, end: tc.paramEnd * 1000, t: t}, nil
})
var (
query Query
err error
)
if tc.end == 0 {
query, err = engine.NewInstantQuery(queryable, tc.query, time.Unix(tc.start, 0))
} else {
query, err = engine.NewRangeQuery(queryable, tc.query, time.Unix(tc.start, 0), time.Unix(tc.end, 0), time.Second)
}
testutil.Ok(t, err)
res := query.Exec(context.Background())
testutil.Ok(t, res.Err)
}
}
func TestEngineShutdown(t *testing.T) {
opts := EngineOpts{
Logger: nil,

View file

@ -20,6 +20,7 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
"sync"
@ -543,7 +544,10 @@ func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error)
if err != nil {
return "", err
}
defer resp.Body.Close()
defer func() {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}()
if resp.StatusCode != http.StatusOK {
return "", errors.Errorf("server returned HTTP status %s", resp.Status)

View file

@ -95,7 +95,10 @@ func (c *Client) Store(ctx context.Context, req []byte) error {
// recoverable.
return recoverableError{err}
}
defer httpResp.Body.Close()
defer func() {
io.Copy(ioutil.Discard, httpResp.Body)
httpResp.Body.Close()
}()
if httpResp.StatusCode/100 != 2 {
scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, maxErrMsgLen))
@ -148,7 +151,10 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
if err != nil {
return nil, errors.Wrap(err, "error sending request")
}
defer httpResp.Body.Close()
defer func() {
io.Copy(ioutil.Discard, httpResp.Body)
httpResp.Body.Close()
}()
if httpResp.StatusCode/100 != 2 {
return nil, errors.Errorf("server returned HTTP status %s", httpResp.Status)
}

View file

@ -21,9 +21,18 @@ package remote
import (
"sync"
"sync/atomic"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var interner = newPool()
var noReferenceReleases = promauto.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "string_interner_zero_reference_releases_total",
Help: "The number of times release has been called for strings that are not interned.",
})
type pool struct {
mtx sync.RWMutex
@ -73,7 +82,8 @@ func (p *pool) release(s string) {
p.mtx.RUnlock()
if !ok {
panic("released unknown string")
noReferenceReleases.Inc()
return
}
refs := atomic.AddInt64(&interned.refs, -1)

View file

@ -339,6 +339,10 @@ func (t *QueueManager) Stop() {
// StoreSeries keeps track of which series we know about for lookups when sending samples to remote.
func (t *QueueManager) StoreSeries(series []tsdb.RefSeries, index int) {
// Lock before any calls to labelsToLabels proto, as that's where string interning is done.
t.seriesMtx.Lock()
defer t.seriesMtx.Unlock()
temp := make(map[uint64][]prompb.Label, len(series))
for _, s := range series {
ls := processExternalLabels(s.Labels, t.externalLabels)
@ -350,8 +354,6 @@ func (t *QueueManager) StoreSeries(series []tsdb.RefSeries, index int) {
temp[s.Ref] = labelsToLabelsProto(rl)
}
t.seriesMtx.Lock()
defer t.seriesMtx.Unlock()
for ref, labels := range temp {
t.seriesSegmentIndexes[ref] = index

View file

@ -32,6 +32,7 @@ import (
"github.com/golang/snappy"
"github.com/stretchr/testify/require"
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/pkg/labels"
@ -257,6 +258,39 @@ func TestReshardRaceWithStop(t *testing.T) {
}
}
func TestReleaseNoninternedString(t *testing.T) {
c := NewTestStorageClient()
var m *QueueManager
h := sync.Mutex{}
h.Lock()
m = NewQueueManager(nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), config.DefaultQueueConfig, nil, nil, c, defaultFlushDeadline)
m.Start()
go func() {
for {
m.SeriesReset(1)
}
}()
for i := 1; i < 1000; i++ {
m.StoreSeries([]tsdb.RefSeries{
tsdb.RefSeries{
Ref: uint64(i),
Labels: tsdbLabels.Labels{
tsdbLabels.Label{
Name: "asdf",
Value: fmt.Sprintf("%d", i),
},
},
},
}, 0)
}
metric := client_testutil.ToFloat64(noReferenceReleases)
testutil.Assert(t, metric == 0, "expected there to be no calls to release for strings that were not already interned: %d", int(metric))
}
func createTimeseries(n int) ([]tsdb.RefSample, []tsdb.RefSeries) {
samples := make([]tsdb.RefSample, 0, n)
series := make([]tsdb.RefSeries, 0, n)

View file

@ -0,0 +1,187 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package testutil provides helpers to test code using the prometheus package
// of client_golang.
//
// While writing unit tests to verify correct instrumentation of your code, it's
// a common mistake to mostly test the instrumentation library instead of your
// own code. Rather than verifying that a prometheus.Counter's value has changed
// as expected or that it shows up in the exposition after registration, it is
// in general more robust and more faithful to the concept of unit tests to use
// mock implementations of the prometheus.Counter and prometheus.Registerer
// interfaces that simply assert that the Add or Register methods have been
// called with the expected arguments. However, this might be overkill in simple
// scenarios. The ToFloat64 function is provided for simple inspection of a
// single-value metric, but it has to be used with caution.
//
// End-to-end tests to verify all or larger parts of the metrics exposition can
// be implemented with the CollectAndCompare or GatherAndCompare functions. The
// most appropriate use is not so much testing instrumentation of your code, but
// testing custom prometheus.Collector implementations and in particular whole
// exporters, i.e. programs that retrieve telemetry data from a 3rd party source
// and convert it into Prometheus metrics.
package testutil
import (
"bytes"
"fmt"
"io"
"github.com/prometheus/common/expfmt"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/internal"
)
// ToFloat64 collects all Metrics from the provided Collector. It expects that
// this results in exactly one Metric being collected, which must be a Gauge,
// Counter, or Untyped. In all other cases, ToFloat64 panics. ToFloat64 returns
// the value of the collected Metric.
//
// The Collector provided is typically a simple instance of Gauge or Counter, or
// less commonly a GaugeVec or CounterVec with exactly one element. But any
// Collector fulfilling the prerequisites described above will do.
//
// Use this function with caution. It is computationally very expensive and thus
// not suited at all to read values from Metrics in regular code. This is really
// only for testing purposes, and even for testing, other approaches are often
// more appropriate (see this package's documentation).
//
// A clear anti-pattern would be to use a metric type from the prometheus
// package to track values that are also needed for something else than the
// exposition of Prometheus metrics. For example, you would like to track the
// number of items in a queue because your code should reject queuing further
// items if a certain limit is reached. It is tempting to track the number of
// items in a prometheus.Gauge, as it is then easily available as a metric for
// exposition, too. However, then you would need to call ToFloat64 in your
// regular code, potentially quite often. The recommended way is to track the
// number of items conventionally (in the way you would have done it without
// considering Prometheus metrics) and then expose the number with a
// prometheus.GaugeFunc.
func ToFloat64(c prometheus.Collector) float64 {
var (
m prometheus.Metric
mCount int
mChan = make(chan prometheus.Metric)
done = make(chan struct{})
)
go func() {
for m = range mChan {
mCount++
}
close(done)
}()
c.Collect(mChan)
close(mChan)
<-done
if mCount != 1 {
panic(fmt.Errorf("collected %d metrics instead of exactly 1", mCount))
}
pb := &dto.Metric{}
m.Write(pb)
if pb.Gauge != nil {
return pb.Gauge.GetValue()
}
if pb.Counter != nil {
return pb.Counter.GetValue()
}
if pb.Untyped != nil {
return pb.Untyped.GetValue()
}
panic(fmt.Errorf("collected a non-gauge/counter/untyped metric: %s", pb))
}
// CollectAndCompare registers the provided Collector with a newly created
// pedantic Registry. It then does the same as GatherAndCompare, gathering the
// metrics from the pedantic Registry.
func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error {
reg := prometheus.NewPedanticRegistry()
if err := reg.Register(c); err != nil {
return fmt.Errorf("registering collector failed: %s", err)
}
return GatherAndCompare(reg, expected, metricNames...)
}
// GatherAndCompare gathers all metrics from the provided Gatherer and compares
// it to an expected output read from the provided Reader in the Prometheus text
// exposition format. If any metricNames are provided, only metrics with those
// names are compared.
func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error {
got, err := g.Gather()
if err != nil {
return fmt.Errorf("gathering metrics failed: %s", err)
}
if metricNames != nil {
got = filterMetrics(got, metricNames)
}
var tp expfmt.TextParser
wantRaw, err := tp.TextToMetricFamilies(expected)
if err != nil {
return fmt.Errorf("parsing expected metrics failed: %s", err)
}
want := internal.NormalizeMetricFamilies(wantRaw)
return compare(got, want)
}
// compare encodes both provided slices of metric families into the text format,
// compares their string message, and returns an error if they do not match.
// The error contains the encoded text of both the desired and the actual
// result.
func compare(got, want []*dto.MetricFamily) error {
var gotBuf, wantBuf bytes.Buffer
enc := expfmt.NewEncoder(&gotBuf, expfmt.FmtText)
for _, mf := range got {
if err := enc.Encode(mf); err != nil {
return fmt.Errorf("encoding gathered metrics failed: %s", err)
}
}
enc = expfmt.NewEncoder(&wantBuf, expfmt.FmtText)
for _, mf := range want {
if err := enc.Encode(mf); err != nil {
return fmt.Errorf("encoding expected metrics failed: %s", err)
}
}
if wantBuf.String() != gotBuf.String() {
return fmt.Errorf(`
metric output does not match expectation; want:
%s
got:
%s`, wantBuf.String(), gotBuf.String())
}
return nil
}
func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily {
var filtered []*dto.MetricFamily
for _, m := range metrics {
for _, name := range names {
if m.GetName() == name {
filtered = append(filtered, m)
break
}
}
}
return filtered
}

View file

@ -1,5 +1,8 @@
## master / unreleased
## 0.7.1
- [ENHANCEMENT] Reduce memory usage in mergedPostings.Seek
## 0.7.0
- [CHANGE] tsdb now requires golang 1.12 or higher.
- [REMOVED] `chunks.NewReader` is removed as it wasn't used anywhere.

View file

@ -60,7 +60,6 @@ $(warning Some recipes may not work as expected as the current Go runtime is '$(
endif
endif
PROMU := $(FIRST_GOPATH)/bin/promu
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
pkgs = ./...
ifeq (arm, $(GOHOSTARCH))
@ -72,14 +71,29 @@ endif
PROMU_VERSION ?= 0.3.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
STATICCHECK_VERSION ?= 2019.1
STATICCHECK_URL := https://github.com/dominikh/go-tools/releases/download/$(STATICCHECK_VERSION)/staticcheck_$(GOHOSTOS)_$(GOHOSTARCH)
STATICCHECK :=
# staticcheck only supports linux, freebsd, darwin and windows platforms on i386/amd64
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
STATICCHECK_VERSION ?= 2019.1
STATICCHECK_URL := https://github.com/dominikh/go-tools/releases/download/$(STATICCHECK_VERSION)/staticcheck_$(GOHOSTOS)_$(GOHOSTARCH)
endif
endif
PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
DOCKER_REPO ?= prom
DOCKER_ARCHS ?= amd64
BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
ifeq ($(GOHOSTARCH),amd64)
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
# Only supported on amd64
@ -147,6 +161,7 @@ common-vet:
.PHONY: common-staticcheck
common-staticcheck: $(STATICCHECK)
ifdef STATICCHECK
@echo ">> running staticcheck"
chmod +x $(STATICCHECK)
ifdef GO111MODULE
@ -157,6 +172,7 @@ ifdef GO111MODULE
else
$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
endif
endif
.PHONY: common-unused
common-unused: $(GOVENDOR)
@ -187,17 +203,28 @@ common-tarball: promu
@echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
.PHONY: common-docker
common-docker:
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
--build-arg ARCH="$*" \
--build-arg OS="linux" \
.
.PHONY: common-docker-publish
common-docker-publish:
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
.PHONY: common-docker-tag-latest
common-docker-tag-latest:
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest"
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
.PHONY: common-docker-manifest
common-docker-manifest:
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
.PHONY: promu
promu: $(PROMU)
@ -214,9 +241,11 @@ proto:
@echo ">> generating code from proto files"
@./scripts/genproto.sh
ifdef STATICCHECK
$(STATICCHECK):
mkdir -p $(FIRST_GOPATH)/bin
curl -s -L $(STATICCHECK_URL) > $(STATICCHECK)
endif
ifdef GOVENDOR
.PHONY: $(GOVENDOR)

View file

@ -483,11 +483,12 @@ func (it *mergedPostings) Seek(id uint64) bool {
// Heapifying when there is lots of Seeks is inefficient,
// mark to be re-heapified on the Next() call.
it.heaped = false
newH := make(postingsHeap, 0, len(it.h))
lowest := ^uint64(0)
n := 0
for _, i := range it.h {
if i.Seek(id) {
newH = append(newH, i)
it.h[n] = i
n++
if i.At() < lowest {
lowest = i.At()
}
@ -498,7 +499,7 @@ func (it *mergedPostings) Seek(id uint64) bool {
}
}
}
it.h = newH
it.h = it.h[:n]
if len(it.h) == 0 {
return false
}

3
vendor/modules.txt vendored
View file

@ -231,6 +231,7 @@ github.com/prometheus/client_golang/api/prometheus/v1
github.com/prometheus/client_golang/prometheus/promhttp
github.com/prometheus/client_golang/prometheus/promauto
github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/testutil
# github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f
github.com/prometheus/client_model/go
# github.com/prometheus/common v0.3.0
@ -247,7 +248,7 @@ github.com/prometheus/procfs
github.com/prometheus/procfs/nfs
github.com/prometheus/procfs/xfs
github.com/prometheus/procfs/internal/util
# github.com/prometheus/tsdb v0.7.0
# github.com/prometheus/tsdb v0.7.1
github.com/prometheus/tsdb
github.com/prometheus/tsdb/fileutil
github.com/prometheus/tsdb/labels