Merge branch 'release-2.2'

Signed-off-by: beorn7 <beorn@soundcloud.com>
This commit is contained in:
beorn7 2018-04-10 16:45:56 +02:00
commit 94ff07b81d
16 changed files with 186 additions and 101 deletions

View file

@ -3,6 +3,14 @@
* [CHANGE] `marathon_sd`: use `auth_token` and `auth_token_file` for token-based authentication instead of `bearer_token` and `bearer_token_file` respectively. * [CHANGE] `marathon_sd`: use `auth_token` and `auth_token_file` for token-based authentication instead of `bearer_token` and `bearer_token_file` respectively.
* [ENHANCEMENT] `marathon_sd`: adds support for basic and bearer authentication, plus all other common HTTP client options (TLS config, proxy URL, etc.) * [ENHANCEMENT] `marathon_sd`: adds support for basic and bearer authentication, plus all other common HTTP client options (TLS config, proxy URL, etc.)
## 2.2.1 / 2018-03-13
* [BUGFIX] Fix data loss in TSDB on compaction
* [BUGFIX] Correctly stop timer in remote-write path
* [BUGFIX] Fix deadlock triggerd by loading targets page
* [BUGFIX] Fix incorrect buffering of samples on range selection queries
* [BUGFIX] Handle large index files on windows properly
## 2.2.0 / 2018-03-08 ## 2.2.0 / 2018-03-08
* [CHANGE] Rename file SD mtime metric. * [CHANGE] Rename file SD mtime metric.

View file

@ -1 +1 @@
2.2.0 2.2.1

View file

@ -40,6 +40,7 @@ func NewManager(logger log.Logger, app Appendable) *Manager {
scrapeConfigs: make(map[string]*config.ScrapeConfig), scrapeConfigs: make(map[string]*config.ScrapeConfig),
scrapePools: make(map[string]*scrapePool), scrapePools: make(map[string]*scrapePool),
graceShut: make(chan struct{}), graceShut: make(chan struct{}),
targetsAll: make(map[string][]*Target),
} }
} }
@ -48,10 +49,16 @@ func NewManager(logger log.Logger, app Appendable) *Manager {
type Manager struct { type Manager struct {
logger log.Logger logger log.Logger
append Appendable append Appendable
graceShut chan struct{}
mtxTargets sync.Mutex // Guards the fields below.
targetsActive []*Target
targetsDropped []*Target
targetsAll map[string][]*Target
mtxScrape sync.Mutex // Guards the fields below.
scrapeConfigs map[string]*config.ScrapeConfig scrapeConfigs map[string]*config.ScrapeConfig
scrapePools map[string]*scrapePool scrapePools map[string]*scrapePool
mtx sync.RWMutex
graceShut chan struct{}
} }
// Run starts background processing to handle target updates and reload the scraping loops. // Run starts background processing to handle target updates and reload the scraping loops.
@ -68,6 +75,9 @@ func (m *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) error {
// Stop cancels all running scrape pools and blocks until all have exited. // Stop cancels all running scrape pools and blocks until all have exited.
func (m *Manager) Stop() { func (m *Manager) Stop() {
m.mtxScrape.Lock()
defer m.mtxScrape.Unlock()
for _, sp := range m.scrapePools { for _, sp := range m.scrapePools {
sp.stop() sp.stop()
} }
@ -76,8 +86,9 @@ func (m *Manager) Stop() {
// ApplyConfig resets the manager's target providers and job configurations as defined by the new cfg. // ApplyConfig resets the manager's target providers and job configurations as defined by the new cfg.
func (m *Manager) ApplyConfig(cfg *config.Config) error { func (m *Manager) ApplyConfig(cfg *config.Config) error {
m.mtx.Lock() m.mtxScrape.Lock()
defer m.mtx.Unlock() defer m.mtxScrape.Unlock()
c := make(map[string]*config.ScrapeConfig) c := make(map[string]*config.ScrapeConfig)
for _, scfg := range cfg.ScrapeConfigs { for _, scfg := range cfg.ScrapeConfigs {
c[scfg.JobName] = scfg c[scfg.JobName] = scfg
@ -97,71 +108,66 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error {
return nil return nil
} }
// TargetMap returns map of active and dropped targets and their corresponding scrape config job name. // TargetsAll returns active and dropped targets grouped by job_name.
func (m *Manager) TargetMap() map[string][]*Target { func (m *Manager) TargetsAll() map[string][]*Target {
m.mtx.Lock() m.mtxTargets.Lock()
defer m.mtx.Unlock() defer m.mtxTargets.Unlock()
return m.targetsAll
targets := make(map[string][]*Target)
for jobName, sp := range m.scrapePools {
sp.mtx.RLock()
for _, t := range sp.targets {
targets[jobName] = append(targets[jobName], t)
}
targets[jobName] = append(targets[jobName], sp.droppedTargets...)
sp.mtx.RUnlock()
}
return targets
} }
// Targets returns the targets currently being scraped. // TargetsActive returns the active targets currently being scraped.
func (m *Manager) Targets() []*Target { func (m *Manager) TargetsActive() []*Target {
m.mtx.Lock() m.mtxTargets.Lock()
defer m.mtx.Unlock() defer m.mtxTargets.Unlock()
return m.targetsActive
var targets []*Target
for _, p := range m.scrapePools {
p.mtx.RLock()
for _, tt := range p.targets {
targets = append(targets, tt)
}
p.mtx.RUnlock()
}
return targets
} }
// DroppedTargets returns the targets dropped during relabelling. // TargetsDropped returns the dropped targets during relabelling.
func (m *Manager) DroppedTargets() []*Target { func (m *Manager) TargetsDropped() []*Target {
m.mtx.Lock() m.mtxTargets.Lock()
defer m.mtx.Unlock() defer m.mtxTargets.Unlock()
var droppedTargets []*Target return m.targetsDropped
for _, p := range m.scrapePools { }
p.mtx.RLock()
droppedTargets = append(droppedTargets, p.droppedTargets...) func (m *Manager) targetsUpdate(active, dropped map[string][]*Target) {
p.mtx.RUnlock() m.mtxTargets.Lock()
defer m.mtxTargets.Unlock()
m.targetsAll = make(map[string][]*Target)
m.targetsActive = nil
m.targetsDropped = nil
for jobName, targets := range active {
m.targetsAll[jobName] = append(m.targetsAll[jobName], targets...)
m.targetsActive = append(m.targetsActive, targets...)
}
for jobName, targets := range dropped {
m.targetsAll[jobName] = append(m.targetsAll[jobName], targets...)
m.targetsDropped = append(m.targetsDropped, targets...)
} }
return droppedTargets
} }
func (m *Manager) reload(t map[string][]*targetgroup.Group) { func (m *Manager) reload(t map[string][]*targetgroup.Group) {
m.mtxScrape.Lock()
defer m.mtxScrape.Unlock()
tDropped := make(map[string][]*Target)
tActive := make(map[string][]*Target)
for tsetName, tgroup := range t { for tsetName, tgroup := range t {
var sp *scrapePool
if existing, ok := m.scrapePools[tsetName]; !ok {
scrapeConfig, ok := m.scrapeConfigs[tsetName] scrapeConfig, ok := m.scrapeConfigs[tsetName]
if !ok { if !ok {
level.Error(m.logger).Log("msg", "error reloading target set", "err", fmt.Sprintf("invalid config id:%v", tsetName)) level.Error(m.logger).Log("msg", "error reloading target set", "err", fmt.Sprintf("invalid config id:%v", tsetName))
continue continue
} }
sp = newScrapePool(scrapeConfig, m.append, log.With(m.logger, "scrape_pool", tsetName))
// Scrape pool doesn't exist so start a new one.
existing, ok := m.scrapePools[tsetName]
if !ok {
sp := newScrapePool(scrapeConfig, m.append, log.With(m.logger, "scrape_pool", tsetName))
m.scrapePools[tsetName] = sp m.scrapePools[tsetName] = sp
sp.Sync(tgroup)
} else { } else {
existing.Sync(tgroup) sp = existing
} }
tActive[tsetName], tDropped[tsetName] = sp.Sync(tgroup)
} }
m.targetsUpdate(tActive, tDropped)
} }

View file

@ -245,8 +245,8 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) {
} }
// Sync converts target groups into actual scrape targets and synchronizes // Sync converts target groups into actual scrape targets and synchronizes
// the currently running scraper with the resulting set. // the currently running scraper with the resulting set and returns all scraped and dropped targets.
func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { func (sp *scrapePool) Sync(tgs []*targetgroup.Group) (tActive []*Target, tDropped []*Target) {
start := time.Now() start := time.Now()
var all []*Target var all []*Target
@ -273,6 +273,15 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
time.Since(start).Seconds(), time.Since(start).Seconds(),
) )
targetScrapePoolSyncsCounter.WithLabelValues(sp.config.JobName).Inc() targetScrapePoolSyncsCounter.WithLabelValues(sp.config.JobName).Inc()
sp.mtx.RLock()
for _, t := range sp.targets {
tActive = append(tActive, t)
}
tDropped = sp.droppedTargets
sp.mtx.RUnlock()
return tActive, tDropped
} }
// sync takes a list of potentially duplicated targets, deduplicates them, starts // sync takes a list of potentially duplicated targets, deduplicates them, starts

View file

@ -110,6 +110,8 @@ func (t *Target) Labels() labels.Labels {
// DiscoveredLabels returns a copy of the target's labels before any processing. // DiscoveredLabels returns a copy of the target's labels before any processing.
func (t *Target) DiscoveredLabels() labels.Labels { func (t *Target) DiscoveredLabels() labels.Labels {
t.mtx.Lock()
defer t.mtx.Unlock()
lset := make(labels.Labels, len(t.discoveredLabels)) lset := make(labels.Labels, len(t.discoveredLabels))
copy(lset, t.discoveredLabels) copy(lset, t.discoveredLabels)
return lset return lset
@ -117,6 +119,8 @@ func (t *Target) DiscoveredLabels() labels.Labels {
// SetDiscoveredLabels sets new DiscoveredLabels // SetDiscoveredLabels sets new DiscoveredLabels
func (t *Target) SetDiscoveredLabels(l labels.Labels) { func (t *Target) SetDiscoveredLabels(l labels.Labels) {
t.mtx.Lock()
defer t.mtx.Unlock()
t.discoveredLabels = l t.discoveredLabels = l
} }

View file

@ -151,16 +151,11 @@ func (c *LeveledCompactor) Plan(dir string) ([]string, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
// We do not include the most recently created block. This gives users a window
// of a full block size to piece-wise backup new data without having to care
// about data overlap.
if len(dirs) < 1 { if len(dirs) < 1 {
return nil, nil return nil, nil
} }
dirs = dirs[:len(dirs)-1]
var dms []dirMeta var dms []dirMeta
for _, dir := range dirs { for _, dir := range dirs {
meta, err := readMetaFile(dir) meta, err := readMetaFile(dir)
if err != nil { if err != nil {
@ -176,6 +171,10 @@ func (c *LeveledCompactor) plan(dms []dirMeta) ([]string, error) {
return dms[i].meta.MinTime < dms[j].meta.MinTime return dms[i].meta.MinTime < dms[j].meta.MinTime
}) })
// We do not include a recently created block with max(minTime), so the block which was just created from WAL.
// This gives users a window of a full block size to piece-wise backup new data without having to care about data overlap.
dms = dms[:len(dms)-1]
var res []string var res []string
for _, dm := range c.selectDirs(dms) { for _, dm := range c.selectDirs(dms) {
res = append(res, dm.dir) res = append(res, dm.dir)

View file

@ -0,0 +1,5 @@
// +build windows
package fileutil
const maxMapSize = 0x7FFFFFFF // 2GB

View file

@ -0,0 +1,5 @@
// +build windows
package fileutil
const maxMapSize = 0xFFFFFFFFFFFF // 256TB

View file

@ -19,14 +19,14 @@ import (
"unsafe" "unsafe"
) )
func mmap(f *os.File, sz int) ([]byte, error) { func mmap(f *os.File, size int) ([]byte, error) {
low, high := uint32(sz), uint32(sz>>32) low, high := uint32(size), uint32(size>>32)
h, errno := syscall.CreateFileMapping(syscall.Handle(f.Fd()), nil, syscall.PAGE_READONLY, high, low, nil) h, errno := syscall.CreateFileMapping(syscall.Handle(f.Fd()), nil, syscall.PAGE_READONLY, high, low, nil)
if h == 0 { if h == 0 {
return nil, os.NewSyscallError("CreateFileMapping", errno) return nil, os.NewSyscallError("CreateFileMapping", errno)
} }
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(size))
if addr == 0 { if addr == 0 {
return nil, os.NewSyscallError("MapViewOfFile", errno) return nil, os.NewSyscallError("MapViewOfFile", errno)
} }
@ -35,7 +35,7 @@ func mmap(f *os.File, sz int) ([]byte, error) {
return nil, os.NewSyscallError("CloseHandle", err) return nil, os.NewSyscallError("CloseHandle", err)
} }
return (*[1 << 30]byte)(unsafe.Pointer(addr))[:sz], nil return (*[maxMapSize]byte)(unsafe.Pointer(addr))[:size], nil
} }
func munmap(b []byte) error { func munmap(b []byte) error {

View file

@ -653,6 +653,11 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) {
return r, nil return r, nil
} }
// Version returns the file format version of the underlying index.
func (r *Reader) Version() int {
return r.version
}
// Range marks a byte range. // Range marks a byte range.
type Range struct { type Range struct {
Start, End int64 Start, End int64

View file

@ -61,6 +61,9 @@ func repairBadIndexVersion(logger log.Logger, dir string) error {
if err := repl.Close(); err != nil { if err := repl.Close(); err != nil {
return err return err
} }
if err := broken.Close(); err != nil {
return err
}
if err := renameFile(repl.Name(), broken.Name()); err != nil { if err := renameFile(repl.Name(), broken.Name()); err != nil {
return err return err
} }

30
vendor/vendor.json vendored
View file

@ -826,40 +826,40 @@
"revisionTime": "2016-04-11T19:08:41Z" "revisionTime": "2016-04-11T19:08:41Z"
}, },
{ {
"checksumSHA1": "zVgXlbZ1J8GhBN7tZji7M/SuiAU=", "checksumSHA1": "vNslgGjRBqauFmVIBTkvEWwvURg=",
"path": "github.com/prometheus/tsdb", "path": "github.com/prometheus/tsdb",
"revision": "16b2bf1b45ce3e3536c78ebec5116ea09a69786e", "revision": "659ed644294eec6310cef0685b002a3aed8c8f85",
"revisionTime": "2018-03-02T11:51:49Z" "revisionTime": "2018-03-14T13:49:50Z"
}, },
{ {
"checksumSHA1": "S7F4yWxVLhxQNHMdgoOo6plmOOs=", "checksumSHA1": "S7F4yWxVLhxQNHMdgoOo6plmOOs=",
"path": "github.com/prometheus/tsdb/chunkenc", "path": "github.com/prometheus/tsdb/chunkenc",
"revision": "494acd307058387ced7646f9996b0f7372eaa558", "revision": "659ed644294eec6310cef0685b002a3aed8c8f85",
"revisionTime": "2018-02-15T11:29:47Z" "revisionTime": "2018-03-14T13:49:50Z"
}, },
{ {
"checksumSHA1": "+zsn1i8cqwgZXL8Bg6jDy32xjAo=", "checksumSHA1": "+zsn1i8cqwgZXL8Bg6jDy32xjAo=",
"path": "github.com/prometheus/tsdb/chunks", "path": "github.com/prometheus/tsdb/chunks",
"revision": "494acd307058387ced7646f9996b0f7372eaa558", "revision": "659ed644294eec6310cef0685b002a3aed8c8f85",
"revisionTime": "2018-02-15T11:29:47Z" "revisionTime": "2018-03-14T13:49:50Z"
}, },
{ {
"checksumSHA1": "h49AAcJ5+iRBwCgbfQf+2T1E1ZE=", "checksumSHA1": "T7qvg4VhFLklT3g+qPkUWxBo0yw=",
"path": "github.com/prometheus/tsdb/fileutil", "path": "github.com/prometheus/tsdb/fileutil",
"revision": "494acd307058387ced7646f9996b0f7372eaa558", "revision": "659ed644294eec6310cef0685b002a3aed8c8f85",
"revisionTime": "2018-02-15T11:29:47Z" "revisionTime": "2018-03-14T13:49:50Z"
}, },
{ {
"checksumSHA1": "UlvN+ZhTu52S8f9niySQpPC+dvQ=", "checksumSHA1": "4ebzIE2Jvj6+SG6yGFSXN8scgfo=",
"path": "github.com/prometheus/tsdb/index", "path": "github.com/prometheus/tsdb/index",
"revision": "494acd307058387ced7646f9996b0f7372eaa558", "revision": "659ed644294eec6310cef0685b002a3aed8c8f85",
"revisionTime": "2018-02-15T11:29:47Z" "revisionTime": "2018-03-14T13:49:50Z"
}, },
{ {
"checksumSHA1": "Va8HWvOFTwFeewZFadMAOzNGDps=", "checksumSHA1": "Va8HWvOFTwFeewZFadMAOzNGDps=",
"path": "github.com/prometheus/tsdb/labels", "path": "github.com/prometheus/tsdb/labels",
"revision": "494acd307058387ced7646f9996b0f7372eaa558", "revision": "659ed644294eec6310cef0685b002a3aed8c8f85",
"revisionTime": "2018-02-15T11:29:47Z" "revisionTime": "2018-03-14T13:49:50Z"
}, },
{ {
"checksumSHA1": "5SYLEhADhdBVZAGPVHWggQl7H8k=", "checksumSHA1": "5SYLEhADhdBVZAGPVHWggQl7H8k=",

View file

@ -82,8 +82,8 @@ func (e *apiError) Error() string {
} }
type targetRetriever interface { type targetRetriever interface {
Targets() []*scrape.Target TargetsActive() []*scrape.Target
DroppedTargets() []*scrape.Target TargetsDropped() []*scrape.Target
} }
type alertmanagerRetriever interface { type alertmanagerRetriever interface {
@ -452,11 +452,12 @@ type TargetDiscovery struct {
} }
func (api *API) targets(r *http.Request) (interface{}, *apiError) { func (api *API) targets(r *http.Request) (interface{}, *apiError) {
targets := api.targetRetriever.Targets() tActive := api.targetRetriever.TargetsActive()
droppedTargets := api.targetRetriever.DroppedTargets() tDropped := api.targetRetriever.TargetsDropped()
res := &TargetDiscovery{ActiveTargets: make([]*Target, len(targets)), DroppedTargets: make([]*DroppedTarget, len(droppedTargets))} res := &TargetDiscovery{ActiveTargets: make([]*Target, len(tActive)), DroppedTargets: make([]*DroppedTarget, len(tDropped))}
for i, t := range tActive {
for i, t := range targets {
lastErrStr := "" lastErrStr := ""
lastErr := t.LastError() lastErr := t.LastError()
if lastErr != nil { if lastErr != nil {
@ -473,12 +474,11 @@ func (api *API) targets(r *http.Request) (interface{}, *apiError) {
} }
} }
for i, t := range droppedTargets { for i, t := range tDropped {
res.DroppedTargets[i] = &DroppedTarget{ res.DroppedTargets[i] = &DroppedTarget{
DiscoveredLabels: t.DiscoveredLabels().Map(), DiscoveredLabels: t.DiscoveredLabels().Map(),
} }
} }
return res, nil return res, nil
} }

View file

@ -45,7 +45,7 @@ import (
type testTargetRetriever struct{} type testTargetRetriever struct{}
func (t testTargetRetriever) Targets() []*scrape.Target { func (t testTargetRetriever) TargetsActive() []*scrape.Target {
return []*scrape.Target{ return []*scrape.Target{
scrape.NewTarget( scrape.NewTarget(
labels.FromMap(map[string]string{ labels.FromMap(map[string]string{
@ -58,7 +58,7 @@ func (t testTargetRetriever) Targets() []*scrape.Target {
), ),
} }
} }
func (t testTargetRetriever) DroppedTargets() []*scrape.Target { func (t testTargetRetriever) TargetsDropped() []*scrape.Target {
return []*scrape.Target{ return []*scrape.Target{
scrape.NewTarget( scrape.NewTarget(
nil, nil,

View file

@ -437,7 +437,7 @@ func (h *Handler) Run(ctx context.Context) error {
h.options.QueryEngine, h.options.QueryEngine,
h.options.Storage.Querier, h.options.Storage.Querier,
func() []*scrape.Target { func() []*scrape.Target {
return h.options.ScrapeManager.Targets() return h.options.ScrapeManager.TargetsActive()
}, },
func() []*url.URL { func() []*url.URL {
return h.options.Notifier.Alertmanagers() return h.options.Notifier.Alertmanagers()
@ -659,7 +659,7 @@ func (h *Handler) rules(w http.ResponseWriter, r *http.Request) {
func (h *Handler) serviceDiscovery(w http.ResponseWriter, r *http.Request) { func (h *Handler) serviceDiscovery(w http.ResponseWriter, r *http.Request) {
var index []string var index []string
targets := h.scrapeManager.TargetMap() targets := h.scrapeManager.TargetsAll()
for job := range targets { for job := range targets {
index = append(index, job) index = append(index, job)
} }
@ -677,7 +677,7 @@ func (h *Handler) serviceDiscovery(w http.ResponseWriter, r *http.Request) {
func (h *Handler) targets(w http.ResponseWriter, r *http.Request) { func (h *Handler) targets(w http.ResponseWriter, r *http.Request) {
// Bucket targets by job label // Bucket targets by job label
tps := map[string][]*scrape.Target{} tps := map[string][]*scrape.Target{}
for _, t := range h.scrapeManager.Targets() { for _, t := range h.scrapeManager.TargetsActive() {
job := t.Labels().Get(model.JobLabel) job := t.Labels().Get(model.JobLabel)
tps[job] = append(tps[job], t) tps[job] = append(tps[job], t)
} }
@ -733,7 +733,13 @@ func tmplFuncs(consolesPath string, opts *Options) template_text.FuncMap {
return time.Since(t) / time.Millisecond * time.Millisecond return time.Since(t) / time.Millisecond * time.Millisecond
}, },
"consolesPath": func() string { return consolesPath }, "consolesPath": func() string { return consolesPath },
"pathPrefix": func() string { return opts.ExternalURL.Path }, "pathPrefix": func() string {
if opts.RoutePrefix == "/" {
return ""
} else {
return opts.RoutePrefix
}
},
"buildVersion": func() string { return opts.Version.Revision }, "buildVersion": func() string { return opts.Version.Revision },
"stripLabels": func(lset map[string]string, labels ...string) map[string]string { "stripLabels": func(lset map[string]string, labels ...string) map[string]string {
for _, ln := range labels { for _, ln := range labels {

View file

@ -272,6 +272,41 @@ func TestRoutePrefix(t *testing.T) {
testutil.Equals(t, http.StatusOK, resp.StatusCode) testutil.Equals(t, http.StatusOK, resp.StatusCode)
} }
func TestPathPrefix(t *testing.T) {
tests := []struct {
routePrefix string
pathPrefix string
}{
{
routePrefix: "/",
// If we have pathPrefix as "/", URL in UI gets "//"" as prefix,
// hence doesn't remain relative path anymore.
pathPrefix: "",
},
{
routePrefix: "/prometheus",
pathPrefix: "/prometheus",
},
{
routePrefix: "/p1/p2/p3/p4",
pathPrefix: "/p1/p2/p3/p4",
},
}
for _, test := range tests {
opts := &Options{
RoutePrefix: test.routePrefix,
}
pathPrefix := tmplFuncs("", opts)["pathPrefix"].(func() string)
pp := pathPrefix()
testutil.Equals(t, test.pathPrefix, pp)
}
}
func TestDebugHandler(t *testing.T) { func TestDebugHandler(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
prefix, url string prefix, url string