2016-02-17 10:33:17 -08:00
|
|
|
// Copyright 2016 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package retrieval
|
|
|
|
|
2016-02-22 07:46:55 -08:00
|
|
|
import (
|
2017-01-15 08:33:07 -08:00
|
|
|
"bytes"
|
2016-02-28 10:21:50 -08:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net/http"
|
2016-02-22 07:46:55 -08:00
|
|
|
"sync"
|
|
|
|
"time"
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-02-22 07:46:55 -08:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
"github.com/prometheus/common/log"
|
|
|
|
"golang.org/x/net/context"
|
2016-02-28 10:21:50 -08:00
|
|
|
"golang.org/x/net/context/ctxhttp"
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-02-23 02:56:09 -08:00
|
|
|
"github.com/prometheus/prometheus/config"
|
2016-12-29 00:27:30 -08:00
|
|
|
"github.com/prometheus/prometheus/pkg/labels"
|
2017-01-15 08:33:07 -08:00
|
|
|
"github.com/prometheus/prometheus/pkg/textparse"
|
|
|
|
"github.com/prometheus/prometheus/pkg/timestamp"
|
2016-02-22 07:46:55 -08:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
|
|
|
)
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-02-22 07:46:55 -08:00
|
|
|
const (
|
|
|
|
scrapeHealthMetricName = "up"
|
|
|
|
scrapeDurationMetricName = "scrape_duration_seconds"
|
2016-10-26 09:43:01 -07:00
|
|
|
scrapeSamplesMetricName = "scrape_samples_scraped"
|
2016-02-22 07:46:55 -08:00
|
|
|
)
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-02-22 07:46:55 -08:00
|
|
|
var (
|
|
|
|
targetIntervalLength = prometheus.NewSummaryVec(
|
|
|
|
prometheus.SummaryOpts{
|
2016-11-23 00:17:04 -08:00
|
|
|
Name: "prometheus_target_interval_length_seconds",
|
2016-02-22 07:46:55 -08:00
|
|
|
Help: "Actual intervals between scrapes.",
|
|
|
|
Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
|
|
|
|
},
|
2016-11-23 00:17:04 -08:00
|
|
|
[]string{"interval"},
|
2016-02-22 07:46:55 -08:00
|
|
|
)
|
2016-12-16 10:00:52 -08:00
|
|
|
targetSkippedScrapes = prometheus.NewCounter(
|
2016-02-22 07:46:55 -08:00
|
|
|
prometheus.CounterOpts{
|
2016-11-23 00:17:04 -08:00
|
|
|
Name: "prometheus_target_skipped_scrapes_total",
|
|
|
|
Help: "Total number of scrapes that were skipped because the metric storage was throttled.",
|
2016-02-22 07:46:55 -08:00
|
|
|
},
|
|
|
|
)
|
2016-03-09 07:33:10 -08:00
|
|
|
targetReloadIntervalLength = prometheus.NewSummaryVec(
|
|
|
|
prometheus.SummaryOpts{
|
2016-11-23 00:17:04 -08:00
|
|
|
Name: "prometheus_target_reload_length_seconds",
|
2016-03-09 07:33:10 -08:00
|
|
|
Help: "Actual interval to reload the scrape pool with a given configuration.",
|
|
|
|
Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
|
|
|
|
},
|
2016-11-23 00:17:04 -08:00
|
|
|
[]string{"interval"},
|
2016-03-09 07:33:10 -08:00
|
|
|
)
|
2016-03-11 03:22:23 -08:00
|
|
|
targetSyncIntervalLength = prometheus.NewSummaryVec(
|
|
|
|
prometheus.SummaryOpts{
|
2016-11-23 00:17:04 -08:00
|
|
|
Name: "prometheus_target_sync_length_seconds",
|
2016-03-11 03:22:23 -08:00
|
|
|
Help: "Actual interval to sync the scrape pool.",
|
|
|
|
Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
|
|
|
|
},
|
2016-11-23 00:17:04 -08:00
|
|
|
[]string{"scrape_job"},
|
2016-03-11 03:22:23 -08:00
|
|
|
)
|
|
|
|
targetScrapePoolSyncsCounter = prometheus.NewCounterVec(
|
|
|
|
prometheus.CounterOpts{
|
2016-11-23 00:17:04 -08:00
|
|
|
Name: "prometheus_target_scrape_pool_sync_total",
|
|
|
|
Help: "Total number of syncs that were executed on a scrape pool.",
|
2016-03-11 03:22:23 -08:00
|
|
|
},
|
2016-11-23 00:17:04 -08:00
|
|
|
[]string{"scrape_job"},
|
2016-03-11 03:22:23 -08:00
|
|
|
)
|
2016-02-22 07:46:55 -08:00
|
|
|
)
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-02-22 07:46:55 -08:00
|
|
|
func init() {
|
|
|
|
prometheus.MustRegister(targetIntervalLength)
|
|
|
|
prometheus.MustRegister(targetSkippedScrapes)
|
2016-03-09 07:33:10 -08:00
|
|
|
prometheus.MustRegister(targetReloadIntervalLength)
|
2016-03-11 03:22:23 -08:00
|
|
|
prometheus.MustRegister(targetSyncIntervalLength)
|
|
|
|
prometheus.MustRegister(targetScrapePoolSyncsCounter)
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-02-22 07:46:55 -08:00
|
|
|
// scrapePool manages scrapes for sets of targets.
|
|
|
|
type scrapePool struct {
|
2016-12-30 12:35:35 -08:00
|
|
|
appendable Appendable
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
ctx context.Context
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-02-28 10:21:50 -08:00
|
|
|
mtx sync.RWMutex
|
|
|
|
config *config.ScrapeConfig
|
|
|
|
client *http.Client
|
2016-02-28 00:51:02 -08:00
|
|
|
// Targets and loops must always be synchronized to have the same
|
2016-02-28 10:56:18 -08:00
|
|
|
// set of hashes.
|
|
|
|
targets map[uint64]*Target
|
|
|
|
loops map[uint64]loop
|
2016-02-28 00:51:02 -08:00
|
|
|
|
|
|
|
// Constructor for new scrape loops. This is settable for testing convenience.
|
2017-01-13 05:48:01 -08:00
|
|
|
newLoop func(context.Context, scraper, func() storage.Appender, func() storage.Appender) loop
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-12-30 12:35:35 -08:00
|
|
|
func newScrapePool(ctx context.Context, cfg *config.ScrapeConfig, app Appendable) *scrapePool {
|
2016-11-23 03:41:19 -08:00
|
|
|
client, err := NewHTTPClient(cfg.HTTPClientConfig)
|
2016-02-28 10:21:50 -08:00
|
|
|
if err != nil {
|
|
|
|
// Any errors that could occur here should be caught during config validation.
|
|
|
|
log.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err)
|
|
|
|
}
|
2016-02-22 07:46:55 -08:00
|
|
|
return &scrapePool{
|
2016-12-30 12:35:35 -08:00
|
|
|
appendable: app,
|
|
|
|
config: cfg,
|
|
|
|
ctx: ctx,
|
|
|
|
client: client,
|
|
|
|
targets: map[uint64]*Target{},
|
|
|
|
loops: map[uint64]loop{},
|
|
|
|
newLoop: newScrapeLoop,
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
|
|
|
}
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
// stop terminates all scrape loops and returns after they all terminated.
|
2016-02-22 07:46:55 -08:00
|
|
|
func (sp *scrapePool) stop() {
|
|
|
|
var wg sync.WaitGroup
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-02-28 00:51:02 -08:00
|
|
|
sp.mtx.Lock()
|
|
|
|
defer sp.mtx.Unlock()
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-02-28 00:51:02 -08:00
|
|
|
for fp, l := range sp.loops {
|
2016-02-23 05:37:25 -08:00
|
|
|
wg.Add(1)
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
go func(l loop) {
|
|
|
|
l.stop()
|
|
|
|
wg.Done()
|
|
|
|
}(l)
|
2016-02-28 00:51:02 -08:00
|
|
|
|
|
|
|
delete(sp.loops, fp)
|
|
|
|
delete(sp.targets, fp)
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-02-22 07:46:55 -08:00
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
// reload the scrape pool with the given scrape configuration. The target state is preserved
|
|
|
|
// but all scrape loops are restarted with the new scrape configuration.
|
|
|
|
// This method returns after all scrape loops that were stopped have fully terminated.
|
2016-02-23 04:34:24 -08:00
|
|
|
func (sp *scrapePool) reload(cfg *config.ScrapeConfig) {
|
2016-03-09 07:33:10 -08:00
|
|
|
start := time.Now()
|
2016-11-22 03:48:30 -08:00
|
|
|
|
2016-02-23 04:34:24 -08:00
|
|
|
sp.mtx.Lock()
|
|
|
|
defer sp.mtx.Unlock()
|
|
|
|
|
2016-11-23 03:41:19 -08:00
|
|
|
client, err := NewHTTPClient(cfg.HTTPClientConfig)
|
2016-02-28 10:21:50 -08:00
|
|
|
if err != nil {
|
|
|
|
// Any errors that could occur here should be caught during config validation.
|
|
|
|
log.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err)
|
|
|
|
}
|
2016-02-23 04:34:24 -08:00
|
|
|
sp.config = cfg
|
2016-02-28 10:21:50 -08:00
|
|
|
sp.client = client
|
2016-02-23 04:34:24 -08:00
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
var (
|
|
|
|
wg sync.WaitGroup
|
|
|
|
interval = time.Duration(sp.config.ScrapeInterval)
|
|
|
|
timeout = time.Duration(sp.config.ScrapeTimeout)
|
|
|
|
)
|
2016-02-23 04:34:24 -08:00
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
for fp, oldLoop := range sp.loops {
|
|
|
|
var (
|
|
|
|
t = sp.targets[fp]
|
2016-02-28 10:21:50 -08:00
|
|
|
s = &targetScraper{Target: t, client: sp.client}
|
2017-01-13 05:48:01 -08:00
|
|
|
newLoop = sp.newLoop(sp.ctx, s,
|
|
|
|
func() storage.Appender {
|
|
|
|
return sp.sampleAppender(t)
|
|
|
|
},
|
|
|
|
func() storage.Appender {
|
|
|
|
return sp.reportAppender(t)
|
|
|
|
},
|
|
|
|
)
|
2016-02-23 05:37:25 -08:00
|
|
|
)
|
|
|
|
wg.Add(1)
|
2016-02-23 04:34:24 -08:00
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
go func(oldLoop, newLoop loop) {
|
|
|
|
oldLoop.stop()
|
|
|
|
wg.Done()
|
2016-02-23 04:34:24 -08:00
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
go newLoop.run(interval, timeout, nil)
|
|
|
|
}(oldLoop, newLoop)
|
|
|
|
|
|
|
|
sp.loops[fp] = newLoop
|
2016-02-23 04:34:24 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
2016-03-09 07:33:10 -08:00
|
|
|
targetReloadIntervalLength.WithLabelValues(interval.String()).Observe(
|
2016-07-07 06:24:35 -07:00
|
|
|
time.Since(start).Seconds(),
|
2016-03-09 07:33:10 -08:00
|
|
|
)
|
2016-02-23 04:34:24 -08:00
|
|
|
}
|
|
|
|
|
2016-11-22 03:48:30 -08:00
|
|
|
// Sync converts target groups into actual scrape targets and synchronizes
|
|
|
|
// the currently running scraper with the resulting set.
|
|
|
|
func (sp *scrapePool) Sync(tgs []*config.TargetGroup) {
|
|
|
|
start := time.Now()
|
|
|
|
|
|
|
|
var all []*Target
|
|
|
|
for _, tg := range tgs {
|
|
|
|
targets, err := targetsFromGroup(tg, sp.config)
|
|
|
|
if err != nil {
|
|
|
|
log.With("err", err).Error("creating targets failed")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
all = append(all, targets...)
|
|
|
|
}
|
|
|
|
sp.sync(all)
|
|
|
|
|
|
|
|
targetSyncIntervalLength.WithLabelValues(sp.config.JobName).Observe(
|
|
|
|
time.Since(start).Seconds(),
|
|
|
|
)
|
|
|
|
targetScrapePoolSyncsCounter.WithLabelValues(sp.config.JobName).Inc()
|
|
|
|
}
|
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
// sync takes a list of potentially duplicated targets, deduplicates them, starts
|
|
|
|
// scrape loops for new targets, and stops scrape loops for disappeared targets.
|
|
|
|
// It returns after all stopped scrape loops terminated.
|
|
|
|
func (sp *scrapePool) sync(targets []*Target) {
|
2016-02-22 07:46:55 -08:00
|
|
|
sp.mtx.Lock()
|
2016-02-23 04:34:24 -08:00
|
|
|
defer sp.mtx.Unlock()
|
2016-02-22 07:46:55 -08:00
|
|
|
|
|
|
|
var (
|
2016-02-28 10:56:18 -08:00
|
|
|
uniqueTargets = map[uint64]struct{}{}
|
|
|
|
interval = time.Duration(sp.config.ScrapeInterval)
|
|
|
|
timeout = time.Duration(sp.config.ScrapeTimeout)
|
2016-02-22 07:46:55 -08:00
|
|
|
)
|
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
for _, t := range targets {
|
2017-01-13 05:48:01 -08:00
|
|
|
t := t
|
2016-02-28 10:56:18 -08:00
|
|
|
hash := t.hash()
|
|
|
|
uniqueTargets[hash] = struct{}{}
|
2016-02-23 05:37:25 -08:00
|
|
|
|
2016-02-28 10:56:18 -08:00
|
|
|
if _, ok := sp.targets[hash]; !ok {
|
2016-02-28 10:21:50 -08:00
|
|
|
s := &targetScraper{Target: t, client: sp.client}
|
2017-01-13 05:48:01 -08:00
|
|
|
l := sp.newLoop(sp.ctx, s,
|
|
|
|
func() storage.Appender {
|
|
|
|
return sp.sampleAppender(t)
|
|
|
|
},
|
|
|
|
func() storage.Appender {
|
|
|
|
return sp.reportAppender(t)
|
|
|
|
},
|
|
|
|
)
|
2016-02-23 05:37:25 -08:00
|
|
|
|
2016-02-28 10:56:18 -08:00
|
|
|
sp.targets[hash] = t
|
|
|
|
sp.loops[hash] = l
|
2016-02-23 05:37:25 -08:00
|
|
|
|
|
|
|
go l.run(interval, timeout, nil)
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
var wg sync.WaitGroup
|
2016-02-22 07:46:55 -08:00
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
// Stop and remove old targets and scraper loops.
|
2016-02-28 10:56:18 -08:00
|
|
|
for hash := range sp.targets {
|
|
|
|
if _, ok := uniqueTargets[hash]; !ok {
|
2016-02-23 05:37:25 -08:00
|
|
|
wg.Add(1)
|
|
|
|
go func(l loop) {
|
|
|
|
l.stop()
|
2016-02-22 07:46:55 -08:00
|
|
|
wg.Done()
|
2016-02-28 10:56:18 -08:00
|
|
|
}(sp.loops[hash])
|
2016-02-23 05:37:25 -08:00
|
|
|
|
2016-02-28 10:56:18 -08:00
|
|
|
delete(sp.loops, hash)
|
|
|
|
delete(sp.targets, hash)
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for all potentially stopped scrapers to terminate.
|
|
|
|
// This covers the case of flapping targets. If the server is under high load, a new scraper
|
|
|
|
// may be active and tries to insert. The old scraper that didn't terminate yet could still
|
|
|
|
// be inserting a previous sample set.
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
2016-02-28 00:51:02 -08:00
|
|
|
// sampleAppender returns an appender for ingested samples from the target.
|
2016-12-29 00:27:30 -08:00
|
|
|
func (sp *scrapePool) sampleAppender(target *Target) storage.Appender {
|
2016-12-30 12:35:35 -08:00
|
|
|
app, err := sp.appendable.Appender()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2016-02-28 00:51:02 -08:00
|
|
|
// The relabelAppender has to be inside the label-modifying appenders
|
|
|
|
// so the relabeling rules are applied to the correct label set.
|
|
|
|
if mrc := sp.config.MetricRelabelConfigs; len(mrc) > 0 {
|
|
|
|
app = relabelAppender{
|
2016-12-29 00:27:30 -08:00
|
|
|
Appender: app,
|
|
|
|
relabelings: mrc,
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if sp.config.HonorLabels {
|
|
|
|
app = honorLabelsAppender{
|
2016-12-29 00:27:30 -08:00
|
|
|
Appender: app,
|
|
|
|
labels: target.Labels(),
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
app = ruleLabelsAppender{
|
2016-12-29 00:27:30 -08:00
|
|
|
Appender: app,
|
|
|
|
labels: target.Labels(),
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return app
|
|
|
|
}
|
|
|
|
|
|
|
|
// reportAppender returns an appender for reporting samples for the target.
|
2016-12-29 00:27:30 -08:00
|
|
|
func (sp *scrapePool) reportAppender(target *Target) storage.Appender {
|
2016-12-30 12:35:35 -08:00
|
|
|
app, err := sp.appendable.Appender()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2016-02-28 00:51:02 -08:00
|
|
|
return ruleLabelsAppender{
|
2016-12-30 12:35:35 -08:00
|
|
|
Appender: app,
|
2016-12-29 00:27:30 -08:00
|
|
|
labels: target.Labels(),
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-25 04:58:46 -08:00
|
|
|
// A scraper retrieves samples and accepts a status report at the end.
|
|
|
|
type scraper interface {
|
2017-01-15 08:33:07 -08:00
|
|
|
scrape(ctx context.Context, w io.Writer) error
|
2016-02-25 04:58:46 -08:00
|
|
|
report(start time.Time, dur time.Duration, err error)
|
|
|
|
offset(interval time.Duration) time.Duration
|
|
|
|
}
|
|
|
|
|
2016-02-28 10:21:50 -08:00
|
|
|
// targetScraper implements the scraper interface for a target.
|
|
|
|
type targetScraper struct {
|
|
|
|
*Target
|
|
|
|
client *http.Client
|
|
|
|
}
|
|
|
|
|
2016-09-17 02:28:03 -07:00
|
|
|
const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1`
|
2016-02-28 10:21:50 -08:00
|
|
|
|
2017-01-15 08:33:07 -08:00
|
|
|
var scrapeBufPool = sync.Pool{}
|
|
|
|
|
|
|
|
func getScrapeBuf() []byte {
|
|
|
|
b := scrapeBufPool.Get()
|
|
|
|
if b == nil {
|
|
|
|
return make([]byte, 0, 8192)
|
|
|
|
}
|
|
|
|
return b.([]byte)
|
|
|
|
}
|
|
|
|
|
|
|
|
func putScrapeBuf(b []byte) {
|
|
|
|
b = b[:0]
|
|
|
|
scrapeBufPool.Put(b)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *targetScraper) scrape(ctx context.Context, w io.Writer) error {
|
2016-02-28 10:21:50 -08:00
|
|
|
req, err := http.NewRequest("GET", s.URL().String(), nil)
|
|
|
|
if err != nil {
|
2017-01-15 08:33:07 -08:00
|
|
|
return err
|
2016-02-28 10:21:50 -08:00
|
|
|
}
|
2017-01-15 08:33:07 -08:00
|
|
|
// Disable accept header to always negotiate for text format.
|
|
|
|
// req.Header.Add("Accept", acceptHeader)
|
2016-02-28 10:21:50 -08:00
|
|
|
|
|
|
|
resp, err := ctxhttp.Do(ctx, s.client, req)
|
|
|
|
if err != nil {
|
2017-01-15 08:33:07 -08:00
|
|
|
return err
|
2016-02-28 10:21:50 -08:00
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
|
|
|
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
2017-01-15 08:33:07 -08:00
|
|
|
return fmt.Errorf("server returned HTTP status %s", resp.Status)
|
2016-02-28 10:21:50 -08:00
|
|
|
}
|
|
|
|
|
2017-01-15 08:33:07 -08:00
|
|
|
_, err = io.Copy(w, resp.Body)
|
|
|
|
return err
|
2016-02-28 10:21:50 -08:00
|
|
|
}
|
|
|
|
|
2016-02-28 00:51:02 -08:00
|
|
|
// A loop can run and be stopped again. It must not be reused after it was stopped.
|
2016-02-22 07:46:55 -08:00
|
|
|
type loop interface {
|
2016-02-22 09:49:26 -08:00
|
|
|
run(interval, timeout time.Duration, errc chan<- error)
|
2016-02-22 07:46:55 -08:00
|
|
|
stop()
|
|
|
|
}
|
|
|
|
|
|
|
|
type scrapeLoop struct {
|
|
|
|
scraper scraper
|
|
|
|
|
2017-01-13 05:48:01 -08:00
|
|
|
appender func() storage.Appender
|
|
|
|
reportAppender func() storage.Appender
|
2016-02-22 07:46:55 -08:00
|
|
|
|
2017-01-15 08:33:07 -08:00
|
|
|
cache map[string]uint64
|
|
|
|
|
2016-02-22 07:46:55 -08:00
|
|
|
done chan struct{}
|
|
|
|
ctx context.Context
|
|
|
|
cancel func()
|
|
|
|
}
|
|
|
|
|
2017-01-13 05:48:01 -08:00
|
|
|
func newScrapeLoop(ctx context.Context, sc scraper, app, reportApp func() storage.Appender) loop {
|
2016-02-22 07:46:55 -08:00
|
|
|
sl := &scrapeLoop{
|
|
|
|
scraper: sc,
|
|
|
|
appender: app,
|
|
|
|
reportAppender: reportApp,
|
2017-01-15 08:33:07 -08:00
|
|
|
cache: map[string]uint64{},
|
2016-02-22 07:46:55 -08:00
|
|
|
done: make(chan struct{}),
|
|
|
|
}
|
|
|
|
sl.ctx, sl.cancel = context.WithCancel(ctx)
|
|
|
|
|
|
|
|
return sl
|
|
|
|
}
|
|
|
|
|
2016-02-22 09:49:26 -08:00
|
|
|
func (sl *scrapeLoop) run(interval, timeout time.Duration, errc chan<- error) {
|
2016-02-22 07:46:55 -08:00
|
|
|
defer close(sl.done)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(sl.scraper.offset(interval)):
|
|
|
|
// Continue after a scraping offset.
|
|
|
|
case <-sl.ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var last time.Time
|
|
|
|
|
|
|
|
ticker := time.NewTicker(interval)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-sl.ctx.Done():
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2016-12-29 00:27:30 -08:00
|
|
|
var (
|
|
|
|
start = time.Now()
|
|
|
|
scrapeCtx, _ = context.WithTimeout(sl.ctx, timeout)
|
|
|
|
)
|
2016-02-22 09:49:26 -08:00
|
|
|
|
2016-12-29 00:27:30 -08:00
|
|
|
// Only record after the first scrape.
|
|
|
|
if !last.IsZero() {
|
|
|
|
targetIntervalLength.WithLabelValues(interval.String()).Observe(
|
|
|
|
time.Since(last).Seconds(),
|
|
|
|
)
|
|
|
|
}
|
2016-02-22 07:46:55 -08:00
|
|
|
|
2017-01-15 08:33:07 -08:00
|
|
|
n := 0
|
|
|
|
buf := bytes.NewBuffer(getScrapeBuf())
|
|
|
|
|
|
|
|
err := sl.scraper.scrape(scrapeCtx, buf)
|
2016-12-29 00:27:30 -08:00
|
|
|
if err == nil {
|
2017-01-15 08:33:07 -08:00
|
|
|
b := buf.Bytes()
|
|
|
|
|
|
|
|
if n, err = sl.append(b, start); err != nil {
|
|
|
|
log.With("err", err).Error("append failed")
|
|
|
|
}
|
|
|
|
putScrapeBuf(b)
|
2016-12-29 00:27:30 -08:00
|
|
|
} else if errc != nil {
|
|
|
|
errc <- err
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
|
|
|
|
2017-01-15 08:33:07 -08:00
|
|
|
sl.report(start, time.Since(start), n, err)
|
2016-12-29 00:27:30 -08:00
|
|
|
last = start
|
|
|
|
|
2016-02-22 07:46:55 -08:00
|
|
|
select {
|
|
|
|
case <-sl.ctx.Done():
|
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sl *scrapeLoop) stop() {
|
|
|
|
sl.cancel()
|
|
|
|
<-sl.done
|
|
|
|
}
|
|
|
|
|
2016-12-29 00:27:30 -08:00
|
|
|
type sample struct {
|
|
|
|
metric labels.Labels
|
|
|
|
t int64
|
|
|
|
v float64
|
|
|
|
}
|
|
|
|
|
|
|
|
type samples []sample
|
|
|
|
|
|
|
|
func (s samples) Len() int { return len(s) }
|
|
|
|
func (s samples) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
|
|
|
|
|
|
func (s samples) Less(i, j int) bool {
|
|
|
|
d := labels.Compare(s[i].metric, s[j].metric)
|
|
|
|
if d < 0 {
|
|
|
|
return true
|
|
|
|
} else if d > 0 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return s[i].t < s[j].t
|
|
|
|
}
|
|
|
|
|
2017-01-15 08:33:07 -08:00
|
|
|
func (sl *scrapeLoop) append(b []byte, ts time.Time) (n int, err error) {
|
2016-04-25 07:43:52 -07:00
|
|
|
var (
|
2017-01-15 08:33:07 -08:00
|
|
|
app = sl.appender()
|
|
|
|
p = textparse.New(b)
|
|
|
|
defTime = timestamp.FromTime(ts)
|
2016-04-25 07:43:52 -07:00
|
|
|
)
|
2016-02-22 07:46:55 -08:00
|
|
|
|
2017-01-15 08:33:07 -08:00
|
|
|
for p.Next() {
|
|
|
|
t := defTime
|
|
|
|
met, tp, v := p.At()
|
|
|
|
if tp != nil {
|
|
|
|
t = *tp
|
|
|
|
}
|
|
|
|
|
|
|
|
mets := string(met)
|
|
|
|
ref, ok := sl.cache[mets]
|
|
|
|
if ok {
|
2017-01-16 23:39:18 -08:00
|
|
|
if err = app.Add(ref, t, v); err == nil {
|
|
|
|
continue
|
|
|
|
} else if err != storage.ErrNotFound {
|
2017-01-15 08:33:07 -08:00
|
|
|
break
|
|
|
|
}
|
|
|
|
ok = false
|
2017-01-13 05:48:01 -08:00
|
|
|
}
|
2017-01-15 08:33:07 -08:00
|
|
|
if !ok {
|
|
|
|
var lset labels.Labels
|
|
|
|
p.Metric(&lset)
|
|
|
|
ref, err = app.SetSeries(lset)
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err = app.Add(ref, t, v); err != nil {
|
|
|
|
break
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
|
|
|
}
|
2017-01-15 08:33:07 -08:00
|
|
|
sl.cache[mets] = ref
|
|
|
|
n++
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
2017-01-15 08:33:07 -08:00
|
|
|
if err == nil {
|
|
|
|
err = p.Err()
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
2017-01-15 08:33:07 -08:00
|
|
|
if err != nil {
|
|
|
|
app.Rollback()
|
|
|
|
return 0, err
|
2016-04-25 07:43:52 -07:00
|
|
|
}
|
2017-01-13 05:48:01 -08:00
|
|
|
if err := app.Commit(); err != nil {
|
2017-01-15 08:33:07 -08:00
|
|
|
return 0, err
|
2016-12-29 00:27:30 -08:00
|
|
|
}
|
2017-01-15 08:33:07 -08:00
|
|
|
return n, nil
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
|
|
|
|
2017-01-15 08:33:07 -08:00
|
|
|
func (sl *scrapeLoop) report(start time.Time, duration time.Duration, scrapedSamples int, err error) error {
|
2016-02-22 07:46:55 -08:00
|
|
|
sl.scraper.report(start, duration, err)
|
|
|
|
|
2017-01-15 08:33:07 -08:00
|
|
|
ts := timestamp.FromTime(start)
|
2016-02-22 07:46:55 -08:00
|
|
|
|
2016-12-29 00:27:30 -08:00
|
|
|
var health float64
|
2016-02-22 07:46:55 -08:00
|
|
|
if err == nil {
|
|
|
|
health = 1
|
|
|
|
}
|
|
|
|
|
2017-01-13 05:48:01 -08:00
|
|
|
app := sl.reportAppender()
|
|
|
|
|
2017-01-15 08:33:07 -08:00
|
|
|
if err := sl.addReportSample(app, scrapeHealthMetricName, ts, health); err != nil {
|
|
|
|
app.Rollback()
|
|
|
|
return err
|
2017-01-13 05:48:01 -08:00
|
|
|
}
|
2017-01-15 08:33:07 -08:00
|
|
|
if err := sl.addReportSample(app, scrapeDurationMetricName, ts, duration.Seconds()); err != nil {
|
|
|
|
app.Rollback()
|
|
|
|
return err
|
2016-05-19 07:22:49 -07:00
|
|
|
}
|
2017-01-15 08:33:07 -08:00
|
|
|
if err := sl.addReportSample(app, scrapeSamplesMetricName, ts, float64(scrapedSamples)); err != nil {
|
|
|
|
app.Rollback()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return app.Commit()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sl *scrapeLoop) addReportSample(app storage.Appender, s string, t int64, v float64) error {
|
|
|
|
ref, ok := sl.cache[s]
|
|
|
|
|
|
|
|
if ok {
|
|
|
|
if err := app.Add(ref, t, v); err != storage.ErrNotFound {
|
|
|
|
return err
|
|
|
|
}
|
2017-01-13 05:48:01 -08:00
|
|
|
}
|
2017-01-15 08:33:07 -08:00
|
|
|
met := labels.Labels{
|
|
|
|
labels.Label{Name: labels.MetricName, Value: s},
|
2016-05-19 07:22:49 -07:00
|
|
|
}
|
2017-01-15 08:33:07 -08:00
|
|
|
ref, err := app.SetSeries(met)
|
2017-01-13 05:48:01 -08:00
|
|
|
if err != nil {
|
2017-01-15 08:33:07 -08:00
|
|
|
return err
|
2017-01-13 05:48:01 -08:00
|
|
|
}
|
2017-01-15 08:33:07 -08:00
|
|
|
if err = app.Add(ref, t, v); err != nil {
|
|
|
|
return err
|
2016-12-30 12:35:35 -08:00
|
|
|
}
|
2017-01-15 08:33:07 -08:00
|
|
|
sl.cache[s] = ref
|
2016-12-30 12:35:35 -08:00
|
|
|
|
2017-01-15 08:33:07 -08:00
|
|
|
return nil
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|