mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge pull request #1441 from prometheus/scraperef9
Next iteration of retrieval refactoring
This commit is contained in:
commit
2c931950c6
|
@ -15,13 +15,18 @@ package retrieval
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/log"
|
||||
"github.com/prometheus/common/model"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
|
@ -70,26 +75,33 @@ func init() {
|
|||
// scrapePool manages scrapes for sets of targets.
|
||||
type scrapePool struct {
|
||||
appender storage.SampleAppender
|
||||
config *config.ScrapeConfig
|
||||
|
||||
ctx context.Context
|
||||
|
||||
mtx sync.RWMutex
|
||||
config *config.ScrapeConfig
|
||||
client *http.Client
|
||||
// Targets and loops must always be synchronized to have the same
|
||||
// set of fingerprints.
|
||||
mtx sync.RWMutex
|
||||
targets map[model.Fingerprint]*Target
|
||||
loops map[model.Fingerprint]loop
|
||||
// set of hashes.
|
||||
targets map[uint64]*Target
|
||||
loops map[uint64]loop
|
||||
|
||||
// Constructor for new scrape loops. This is settable for testing convenience.
|
||||
newLoop func(context.Context, scraper, storage.SampleAppender, storage.SampleAppender) loop
|
||||
}
|
||||
|
||||
func newScrapePool(cfg *config.ScrapeConfig, app storage.SampleAppender) *scrapePool {
|
||||
client, err := newHTTPClient(cfg)
|
||||
if err != nil {
|
||||
// Any errors that could occur here should be caught during config validation.
|
||||
log.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err)
|
||||
}
|
||||
return &scrapePool{
|
||||
appender: app,
|
||||
config: cfg,
|
||||
targets: map[model.Fingerprint]*Target{},
|
||||
loops: map[model.Fingerprint]loop{},
|
||||
client: client,
|
||||
targets: map[uint64]*Target{},
|
||||
loops: map[uint64]loop{},
|
||||
newLoop: newScrapeLoop,
|
||||
}
|
||||
}
|
||||
|
@ -123,7 +135,13 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) {
|
|||
sp.mtx.Lock()
|
||||
defer sp.mtx.Unlock()
|
||||
|
||||
client, err := newHTTPClient(cfg)
|
||||
if err != nil {
|
||||
// Any errors that could occur here should be caught during config validation.
|
||||
log.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err)
|
||||
}
|
||||
sp.config = cfg
|
||||
sp.client = client
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
|
@ -134,7 +152,8 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) {
|
|||
for fp, oldLoop := range sp.loops {
|
||||
var (
|
||||
t = sp.targets[fp]
|
||||
newLoop = sp.newLoop(sp.ctx, t, sp.sampleAppender(t), sp.reportAppender(t))
|
||||
s = &targetScraper{Target: t, client: sp.client}
|
||||
newLoop = sp.newLoop(sp.ctx, s, sp.sampleAppender(t), sp.reportAppender(t))
|
||||
)
|
||||
wg.Add(1)
|
||||
|
||||
|
@ -159,20 +178,21 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
defer sp.mtx.Unlock()
|
||||
|
||||
var (
|
||||
fingerprints = map[model.Fingerprint]struct{}{}
|
||||
interval = time.Duration(sp.config.ScrapeInterval)
|
||||
timeout = time.Duration(sp.config.ScrapeTimeout)
|
||||
uniqueTargets = map[uint64]struct{}{}
|
||||
interval = time.Duration(sp.config.ScrapeInterval)
|
||||
timeout = time.Duration(sp.config.ScrapeTimeout)
|
||||
)
|
||||
|
||||
for _, t := range targets {
|
||||
fp := t.fingerprint()
|
||||
fingerprints[fp] = struct{}{}
|
||||
hash := t.hash()
|
||||
uniqueTargets[hash] = struct{}{}
|
||||
|
||||
if _, ok := sp.targets[fp]; !ok {
|
||||
l := sp.newLoop(sp.ctx, t, sp.sampleAppender(t), sp.reportAppender(t))
|
||||
if _, ok := sp.targets[hash]; !ok {
|
||||
s := &targetScraper{Target: t, client: sp.client}
|
||||
l := sp.newLoop(sp.ctx, s, sp.sampleAppender(t), sp.reportAppender(t))
|
||||
|
||||
sp.targets[fp] = t
|
||||
sp.loops[fp] = l
|
||||
sp.targets[hash] = t
|
||||
sp.loops[hash] = l
|
||||
|
||||
go l.run(interval, timeout, nil)
|
||||
}
|
||||
|
@ -181,16 +201,16 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
var wg sync.WaitGroup
|
||||
|
||||
// Stop and remove old targets and scraper loops.
|
||||
for fp := range sp.targets {
|
||||
if _, ok := fingerprints[fp]; !ok {
|
||||
for hash := range sp.targets {
|
||||
if _, ok := uniqueTargets[hash]; !ok {
|
||||
wg.Add(1)
|
||||
go func(l loop) {
|
||||
l.stop()
|
||||
wg.Done()
|
||||
}(sp.loops[fp])
|
||||
}(sp.loops[hash])
|
||||
|
||||
delete(sp.loops, fp)
|
||||
delete(sp.targets, fp)
|
||||
delete(sp.loops, hash)
|
||||
delete(sp.targets, hash)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -242,6 +262,57 @@ type scraper interface {
|
|||
offset(interval time.Duration) time.Duration
|
||||
}
|
||||
|
||||
// targetScraper implements the scraper interface for a target.
|
||||
type targetScraper struct {
|
||||
*Target
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,application/json;schema="prometheus/telemetry";version=0.0.2;q=0.2,*/*;q=0.1`
|
||||
|
||||
func (s *targetScraper) scrape(ctx context.Context, ts time.Time) (model.Samples, error) {
|
||||
req, err := http.NewRequest("GET", s.URL().String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Add("Accept", acceptHeader)
|
||||
|
||||
resp, err := ctxhttp.Do(ctx, s.client, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("server returned HTTP status %s", resp.Status)
|
||||
}
|
||||
|
||||
var (
|
||||
allSamples = make(model.Samples, 0, 200)
|
||||
decSamples = make(model.Vector, 0, 50)
|
||||
)
|
||||
sdec := expfmt.SampleDecoder{
|
||||
Dec: expfmt.NewDecoder(resp.Body, expfmt.ResponseFormat(resp.Header)),
|
||||
Opts: &expfmt.DecodeOptions{
|
||||
Timestamp: model.TimeFromUnixNano(ts.UnixNano()),
|
||||
},
|
||||
}
|
||||
|
||||
for {
|
||||
if err = sdec.Decode(&decSamples); err != nil {
|
||||
break
|
||||
}
|
||||
allSamples = append(allSamples, decSamples...)
|
||||
decSamples = decSamples[:0]
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
// Set err to nil since it is used in the scrape health recording.
|
||||
err = nil
|
||||
}
|
||||
return allSamples, err
|
||||
}
|
||||
|
||||
// A loop can run and be stopped again. It must not be reused after it was stopped.
|
||||
type loop interface {
|
||||
run(interval, timeout time.Duration, errc chan<- error)
|
||||
|
|
|
@ -15,7 +15,11 @@ package retrieval
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -60,11 +64,11 @@ func (l *testLoop) stop() {
|
|||
|
||||
func TestScrapePoolStop(t *testing.T) {
|
||||
sp := &scrapePool{
|
||||
targets: map[model.Fingerprint]*Target{},
|
||||
loops: map[model.Fingerprint]loop{},
|
||||
targets: map[uint64]*Target{},
|
||||
loops: map[uint64]loop{},
|
||||
}
|
||||
var mtx sync.Mutex
|
||||
stopped := map[model.Fingerprint]bool{}
|
||||
stopped := map[uint64]bool{}
|
||||
numTargets := 20
|
||||
|
||||
// Stopping the scrape pool must call stop() on all scrape loops,
|
||||
|
@ -82,12 +86,12 @@ func TestScrapePoolStop(t *testing.T) {
|
|||
time.Sleep(time.Duration(i*20) * time.Millisecond)
|
||||
|
||||
mtx.Lock()
|
||||
stopped[t.fingerprint()] = true
|
||||
stopped[t.hash()] = true
|
||||
mtx.Unlock()
|
||||
}
|
||||
|
||||
sp.targets[t.fingerprint()] = t
|
||||
sp.loops[t.fingerprint()] = l
|
||||
sp.targets[t.hash()] = t
|
||||
sp.loops[t.hash()] = l
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
|
@ -126,7 +130,7 @@ func TestScrapePoolReload(t *testing.T) {
|
|||
var mtx sync.Mutex
|
||||
numTargets := 20
|
||||
|
||||
stopped := map[model.Fingerprint]bool{}
|
||||
stopped := map[uint64]bool{}
|
||||
|
||||
reloadCfg := &config.ScrapeConfig{
|
||||
ScrapeInterval: model.Duration(3 * time.Second),
|
||||
|
@ -144,16 +148,16 @@ func TestScrapePoolReload(t *testing.T) {
|
|||
t.Errorf("Expected scrape timeout %d but got %d", 2*time.Second, timeout)
|
||||
}
|
||||
mtx.Lock()
|
||||
if !stopped[s.(*Target).fingerprint()] {
|
||||
t.Errorf("Scrape loop for %v not stopped yet", s.(*Target))
|
||||
if !stopped[s.(*targetScraper).hash()] {
|
||||
t.Errorf("Scrape loop for %v not stopped yet", s.(*targetScraper))
|
||||
}
|
||||
mtx.Unlock()
|
||||
}
|
||||
return l
|
||||
}
|
||||
sp := &scrapePool{
|
||||
targets: map[model.Fingerprint]*Target{},
|
||||
loops: map[model.Fingerprint]loop{},
|
||||
targets: map[uint64]*Target{},
|
||||
loops: map[uint64]loop{},
|
||||
newLoop: newLoop,
|
||||
}
|
||||
|
||||
|
@ -172,18 +176,18 @@ func TestScrapePoolReload(t *testing.T) {
|
|||
time.Sleep(time.Duration(i*20) * time.Millisecond)
|
||||
|
||||
mtx.Lock()
|
||||
stopped[t.fingerprint()] = true
|
||||
stopped[t.hash()] = true
|
||||
mtx.Unlock()
|
||||
}
|
||||
|
||||
sp.targets[t.fingerprint()] = t
|
||||
sp.loops[t.fingerprint()] = l
|
||||
sp.targets[t.hash()] = t
|
||||
sp.loops[t.hash()] = l
|
||||
}
|
||||
done := make(chan struct{})
|
||||
|
||||
beforeTargets := map[model.Fingerprint]*Target{}
|
||||
for fp, t := range sp.targets {
|
||||
beforeTargets[fp] = t
|
||||
beforeTargets := map[uint64]*Target{}
|
||||
for h, t := range sp.targets {
|
||||
beforeTargets[h] = t
|
||||
}
|
||||
|
||||
reloadTime := time.Now()
|
||||
|
@ -423,6 +427,134 @@ func TestScrapeLoopRun(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTargetScraperScrapeOK(t *testing.T) {
|
||||
server := httptest.NewServer(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
|
||||
w.Write([]byte("metric_a 1\nmetric_b 2\n"))
|
||||
}),
|
||||
)
|
||||
defer server.Close()
|
||||
|
||||
serverURL, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ts := &targetScraper{
|
||||
Target: &Target{
|
||||
labels: model.LabelSet{
|
||||
model.SchemeLabel: model.LabelValue(serverURL.Scheme),
|
||||
model.AddressLabel: model.LabelValue(serverURL.Host),
|
||||
},
|
||||
},
|
||||
client: http.DefaultClient,
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
samples, err := ts.scrape(context.Background(), now)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected scrape error: %s", err)
|
||||
}
|
||||
|
||||
expectedSamples := model.Samples{
|
||||
{
|
||||
Metric: model.Metric{"__name__": "metric_a"},
|
||||
Timestamp: model.TimeFromUnixNano(now.UnixNano()),
|
||||
Value: 1,
|
||||
},
|
||||
{
|
||||
Metric: model.Metric{"__name__": "metric_b"},
|
||||
Timestamp: model.TimeFromUnixNano(now.UnixNano()),
|
||||
Value: 2,
|
||||
},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(samples, expectedSamples) {
|
||||
t.Errorf("Scraped samples did not match served metrics")
|
||||
t.Errorf("Expected: %v", expectedSamples)
|
||||
t.Fatalf("Got: %v", samples)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetScrapeScrapeCancel(t *testing.T) {
|
||||
block := make(chan struct{})
|
||||
|
||||
server := httptest.NewServer(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
<-block
|
||||
}),
|
||||
)
|
||||
defer server.Close()
|
||||
|
||||
serverURL, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ts := &targetScraper{
|
||||
Target: &Target{
|
||||
labels: model.LabelSet{
|
||||
model.SchemeLabel: model.LabelValue(serverURL.Scheme),
|
||||
model.AddressLabel: model.LabelValue(serverURL.Host),
|
||||
},
|
||||
},
|
||||
client: http.DefaultClient,
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
time.Sleep(1 * time.Second)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
go func() {
|
||||
if _, err := ts.scrape(ctx, time.Now()); err != context.Canceled {
|
||||
t.Fatalf("Expected context cancelation error but got: %s", err)
|
||||
}
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("Scrape function did not return unexpectedly")
|
||||
case <-done:
|
||||
}
|
||||
// If this is closed in a defer above the function the test server
|
||||
// does not terminate and the test doens't complete.
|
||||
close(block)
|
||||
}
|
||||
|
||||
func TestTargetScrapeScrapeNotFound(t *testing.T) {
|
||||
server := httptest.NewServer(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}),
|
||||
)
|
||||
defer server.Close()
|
||||
|
||||
serverURL, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ts := &targetScraper{
|
||||
Target: &Target{
|
||||
labels: model.LabelSet{
|
||||
model.SchemeLabel: model.LabelValue(serverURL.Scheme),
|
||||
model.AddressLabel: model.LabelValue(serverURL.Host),
|
||||
},
|
||||
},
|
||||
client: http.DefaultClient,
|
||||
}
|
||||
|
||||
if _, err := ts.scrape(context.Background(), time.Now()); !strings.Contains(err.Error(), "404") {
|
||||
t.Fatalf("Expected \"404 NotFound\" error but got: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// testScraper implements the scraper interface and allows setting values
|
||||
// returned by its methods. It also allows setting a custom scrape function.
|
||||
type testScraper struct {
|
||||
|
|
|
@ -15,7 +15,7 @@ package retrieval
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"hash/fnv"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -23,10 +23,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
|
@ -34,127 +31,38 @@ import (
|
|||
)
|
||||
|
||||
// TargetHealth describes the health state of a target.
|
||||
type TargetHealth int
|
||||
|
||||
func (t TargetHealth) String() string {
|
||||
switch t {
|
||||
case HealthUnknown:
|
||||
return "unknown"
|
||||
case HealthGood:
|
||||
return "up"
|
||||
case HealthBad:
|
||||
return "down"
|
||||
}
|
||||
panic("unknown state")
|
||||
}
|
||||
|
||||
func (t TargetHealth) value() model.SampleValue {
|
||||
if t == HealthGood {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
type TargetHealth string
|
||||
|
||||
// The possible health states of a target based on the last performed scrape.
|
||||
const (
|
||||
// HealthUnknown is the state of a Target before it is first scraped.
|
||||
HealthUnknown TargetHealth = iota
|
||||
// HealthGood is the state of a Target that has been successfully scraped.
|
||||
HealthGood
|
||||
// HealthBad is the state of a Target that was scraped unsuccessfully.
|
||||
HealthBad
|
||||
HealthUnknown TargetHealth = "unknown"
|
||||
HealthGood TargetHealth = "up"
|
||||
HealthBad TargetHealth = "down"
|
||||
)
|
||||
|
||||
// TargetStatus contains information about the current status of a scrape target.
|
||||
type TargetStatus struct {
|
||||
lastError error
|
||||
lastScrape time.Time
|
||||
health TargetHealth
|
||||
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// LastError returns the error encountered during the last scrape.
|
||||
func (ts *TargetStatus) LastError() error {
|
||||
ts.mu.RLock()
|
||||
defer ts.mu.RUnlock()
|
||||
|
||||
return ts.lastError
|
||||
}
|
||||
|
||||
// LastScrape returns the time of the last scrape.
|
||||
func (ts *TargetStatus) LastScrape() time.Time {
|
||||
ts.mu.RLock()
|
||||
defer ts.mu.RUnlock()
|
||||
|
||||
return ts.lastScrape
|
||||
}
|
||||
|
||||
// Health returns the last known health state of the target.
|
||||
func (ts *TargetStatus) Health() TargetHealth {
|
||||
ts.mu.RLock()
|
||||
defer ts.mu.RUnlock()
|
||||
|
||||
return ts.health
|
||||
}
|
||||
|
||||
func (ts *TargetStatus) setLastScrape(t time.Time) {
|
||||
ts.mu.Lock()
|
||||
defer ts.mu.Unlock()
|
||||
|
||||
ts.lastScrape = t
|
||||
}
|
||||
|
||||
func (ts *TargetStatus) setLastError(err error) {
|
||||
ts.mu.Lock()
|
||||
defer ts.mu.Unlock()
|
||||
|
||||
if err == nil {
|
||||
ts.health = HealthGood
|
||||
} else {
|
||||
ts.health = HealthBad
|
||||
}
|
||||
ts.lastError = err
|
||||
}
|
||||
|
||||
// Target refers to a singular HTTP or HTTPS endpoint.
|
||||
type Target struct {
|
||||
// The status object for the target. It is only set once on initialization.
|
||||
status *TargetStatus
|
||||
|
||||
scrapeLoop *scrapeLoop
|
||||
scrapeConfig *config.ScrapeConfig
|
||||
|
||||
// Mutex protects the members below.
|
||||
sync.RWMutex
|
||||
|
||||
// Labels before any processing.
|
||||
metaLabels model.LabelSet
|
||||
// Any labels that are added to this target and its metrics.
|
||||
labels model.LabelSet
|
||||
// Additional URL parmeters that are part of the target URL.
|
||||
params url.Values
|
||||
|
||||
// The HTTP client used to scrape the target's endpoint.
|
||||
httpClient *http.Client
|
||||
mtx sync.RWMutex
|
||||
lastError error
|
||||
lastScrape time.Time
|
||||
health TargetHealth
|
||||
}
|
||||
|
||||
// NewTarget creates a reasonably configured target for querying.
|
||||
func NewTarget(cfg *config.ScrapeConfig, labels, metaLabels model.LabelSet) (*Target, error) {
|
||||
client, err := newHTTPClient(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func NewTarget(labels, metaLabels model.LabelSet, params url.Values) *Target {
|
||||
return &Target{
|
||||
labels: labels,
|
||||
metaLabels: metaLabels,
|
||||
params: params,
|
||||
health: HealthUnknown,
|
||||
}
|
||||
t := &Target{
|
||||
status: &TargetStatus{},
|
||||
scrapeConfig: cfg,
|
||||
labels: labels,
|
||||
metaLabels: metaLabels,
|
||||
httpClient: client,
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Status returns the status of the target.
|
||||
func (t *Target) Status() *TargetStatus {
|
||||
return t.status
|
||||
}
|
||||
|
||||
func newHTTPClient(cfg *config.ScrapeConfig) (*http.Client, error) {
|
||||
|
@ -202,15 +110,16 @@ func newHTTPClient(cfg *config.ScrapeConfig) (*http.Client, error) {
|
|||
}
|
||||
|
||||
func (t *Target) String() string {
|
||||
return t.host()
|
||||
return t.URL().String()
|
||||
}
|
||||
|
||||
// fingerprint returns an identifying hash for the target.
|
||||
func (t *Target) fingerprint() model.Fingerprint {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
// hash returns an identifying hash for the target.
|
||||
func (t *Target) hash() uint64 {
|
||||
h := fnv.New64a()
|
||||
h.Write([]byte(t.labels.Fingerprint().String()))
|
||||
h.Write([]byte(t.URL().String()))
|
||||
|
||||
return t.labels.Fingerprint()
|
||||
return h.Sum64()
|
||||
}
|
||||
|
||||
// offset returns the time until the next scrape cycle for the target.
|
||||
|
@ -219,7 +128,7 @@ func (t *Target) offset(interval time.Duration) time.Duration {
|
|||
|
||||
var (
|
||||
base = now % int64(interval)
|
||||
offset = uint64(t.fingerprint()) % uint64(interval)
|
||||
offset = t.hash() % uint64(interval)
|
||||
next = base + int64(offset)
|
||||
)
|
||||
|
||||
|
@ -229,35 +138,27 @@ func (t *Target) offset(interval time.Duration) time.Duration {
|
|||
return time.Duration(next)
|
||||
}
|
||||
|
||||
func (t *Target) scheme() string {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
|
||||
return string(t.labels[model.SchemeLabel])
|
||||
// Labels returns a copy of the set of all public labels of the target.
|
||||
func (t *Target) Labels() model.LabelSet {
|
||||
lset := make(model.LabelSet, len(t.labels))
|
||||
for ln, lv := range t.labels {
|
||||
if !strings.HasPrefix(string(ln), model.ReservedLabelPrefix) {
|
||||
lset[ln] = lv
|
||||
}
|
||||
}
|
||||
return lset
|
||||
}
|
||||
|
||||
func (t *Target) host() string {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
|
||||
return string(t.labels[model.AddressLabel])
|
||||
}
|
||||
|
||||
func (t *Target) path() string {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
|
||||
return string(t.labels[model.MetricsPathLabel])
|
||||
// MetaLabels returns a copy of the target's labels before any processing.
|
||||
func (t *Target) MetaLabels() model.LabelSet {
|
||||
return t.metaLabels.Clone()
|
||||
}
|
||||
|
||||
// URL returns a copy of the target's URL.
|
||||
func (t *Target) URL() *url.URL {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
|
||||
params := url.Values{}
|
||||
|
||||
for k, v := range t.scrapeConfig.Params {
|
||||
for k, v := range t.params {
|
||||
params[k] = make([]string, len(v))
|
||||
copy(params[k], v)
|
||||
}
|
||||
|
@ -282,63 +183,42 @@ func (t *Target) URL() *url.URL {
|
|||
}
|
||||
}
|
||||
|
||||
// InstanceIdentifier returns the identifier for the target.
|
||||
func (t *Target) InstanceIdentifier() string {
|
||||
return t.host()
|
||||
}
|
||||
|
||||
const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,application/json;schema="prometheus/telemetry";version=0.0.2;q=0.2,*/*;q=0.1`
|
||||
|
||||
func (t *Target) scrape(ctx context.Context, ts time.Time) (model.Samples, error) {
|
||||
t.RLock()
|
||||
client := t.httpClient
|
||||
t.RUnlock()
|
||||
|
||||
req, err := http.NewRequest("GET", t.URL().String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Add("Accept", acceptHeader)
|
||||
|
||||
resp, err := ctxhttp.Do(ctx, client, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("server returned HTTP status %s", resp.Status)
|
||||
}
|
||||
|
||||
var (
|
||||
allSamples = make(model.Samples, 0, 200)
|
||||
decSamples = make(model.Vector, 0, 50)
|
||||
)
|
||||
sdec := expfmt.SampleDecoder{
|
||||
Dec: expfmt.NewDecoder(resp.Body, expfmt.ResponseFormat(resp.Header)),
|
||||
Opts: &expfmt.DecodeOptions{
|
||||
Timestamp: model.TimeFromUnixNano(ts.UnixNano()),
|
||||
},
|
||||
}
|
||||
|
||||
for {
|
||||
if err = sdec.Decode(&decSamples); err != nil {
|
||||
break
|
||||
}
|
||||
allSamples = append(allSamples, decSamples...)
|
||||
decSamples = decSamples[:0]
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
// Set err to nil since it is used in the scrape health recording.
|
||||
err = nil
|
||||
}
|
||||
return allSamples, err
|
||||
}
|
||||
|
||||
func (t *Target) report(start time.Time, dur time.Duration, err error) {
|
||||
t.status.setLastError(err)
|
||||
t.status.setLastScrape(start)
|
||||
t.mtx.Lock()
|
||||
defer t.mtx.Unlock()
|
||||
|
||||
if err == nil {
|
||||
t.health = HealthGood
|
||||
} else {
|
||||
t.health = HealthBad
|
||||
}
|
||||
|
||||
t.lastError = err
|
||||
t.lastScrape = start
|
||||
}
|
||||
|
||||
// LastError returns the error encountered during the last scrape.
|
||||
func (t *Target) LastError() error {
|
||||
t.mtx.RLock()
|
||||
defer t.mtx.RUnlock()
|
||||
|
||||
return t.lastError
|
||||
}
|
||||
|
||||
// LastScrape returns the time of the last scrape.
|
||||
func (t *Target) LastScrape() time.Time {
|
||||
t.mtx.RLock()
|
||||
defer t.mtx.RUnlock()
|
||||
|
||||
return t.lastScrape
|
||||
}
|
||||
|
||||
// Health returns the last known health state of the target.
|
||||
func (t *Target) Health() TargetHealth {
|
||||
t.mtx.RLock()
|
||||
defer t.mtx.RUnlock()
|
||||
|
||||
return t.health
|
||||
}
|
||||
|
||||
// Merges the ingested sample's metric with the label set. On a collision the
|
||||
|
@ -397,36 +277,3 @@ func (app relabelAppender) Append(s *model.Sample) error {
|
|||
|
||||
return app.SampleAppender.Append(s)
|
||||
}
|
||||
|
||||
// Labels returns a copy of the set of all public labels of the target.
|
||||
func (t *Target) Labels() model.LabelSet {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
|
||||
return t.unlockedLabels()
|
||||
}
|
||||
|
||||
// unlockedLabels does the same as Labels but does not lock the mutex (useful
|
||||
// for internal usage when the mutex is already locked).
|
||||
func (t *Target) unlockedLabels() model.LabelSet {
|
||||
lset := make(model.LabelSet, len(t.labels))
|
||||
for ln, lv := range t.labels {
|
||||
if !strings.HasPrefix(string(ln), model.ReservedLabelPrefix) {
|
||||
lset[ln] = lv
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := lset[model.InstanceLabel]; !ok {
|
||||
lset[model.InstanceLabel] = t.labels[model.AddressLabel]
|
||||
}
|
||||
|
||||
return lset
|
||||
}
|
||||
|
||||
// MetaLabels returns a copy of the target's labels before any processing.
|
||||
func (t *Target) MetaLabels() model.LabelSet {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
|
||||
return t.metaLabels.Clone()
|
||||
}
|
||||
|
|
|
@ -16,7 +16,6 @@ package retrieval
|
|||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
@ -28,7 +27,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
)
|
||||
|
@ -36,9 +34,8 @@ import (
|
|||
func TestTargetLabels(t *testing.T) {
|
||||
target := newTestTarget("example.com:80", 0, model.LabelSet{"job": "some_job", "foo": "bar"})
|
||||
want := model.LabelSet{
|
||||
model.JobLabel: "some_job",
|
||||
model.InstanceLabel: "example.com:80",
|
||||
"foo": "bar",
|
||||
model.JobLabel: "some_job",
|
||||
"foo": "bar",
|
||||
}
|
||||
got := target.Labels()
|
||||
if !reflect.DeepEqual(want, got) {
|
||||
|
@ -92,69 +89,36 @@ func TestTargetOffset(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTargetScrape404(t *testing.T) {
|
||||
server := httptest.NewServer(
|
||||
http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
},
|
||||
),
|
||||
)
|
||||
defer server.Close()
|
||||
|
||||
testTarget := newTestTarget(server.URL, time.Second, model.LabelSet{})
|
||||
|
||||
want := errors.New("server returned HTTP status 404 Not Found")
|
||||
_, got := testTarget.scrape(context.Background(), time.Now())
|
||||
if got == nil || want.Error() != got.Error() {
|
||||
t.Fatalf("want err %q, got %q", want, got)
|
||||
func TestTargetURL(t *testing.T) {
|
||||
params := url.Values{
|
||||
"abc": []string{"foo", "bar", "baz"},
|
||||
"xyz": []string{"hoo"},
|
||||
}
|
||||
}
|
||||
labels := model.LabelSet{
|
||||
model.AddressLabel: "example.com:1234",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metricz",
|
||||
"__param_abc": "overwrite",
|
||||
"__param_cde": "huu",
|
||||
}
|
||||
target := NewTarget(labels, labels, params)
|
||||
|
||||
func TestURLParams(t *testing.T) {
|
||||
server := httptest.NewServer(
|
||||
http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
|
||||
w.Write([]byte{})
|
||||
r.ParseForm()
|
||||
if r.Form["foo"][0] != "bar" {
|
||||
t.Fatalf("URL parameter 'foo' had unexpected first value '%v'", r.Form["foo"][0])
|
||||
}
|
||||
if r.Form["foo"][1] != "baz" {
|
||||
t.Fatalf("URL parameter 'foo' had unexpected second value '%v'", r.Form["foo"][1])
|
||||
}
|
||||
},
|
||||
),
|
||||
)
|
||||
defer server.Close()
|
||||
serverURL, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
// The reserved labels are concatenated into a full URL. The first value for each
|
||||
// URL query parameter can be set/modified via labels as well.
|
||||
expectedParams := url.Values{
|
||||
"abc": []string{"overwrite", "bar", "baz"},
|
||||
"cde": []string{"huu"},
|
||||
"xyz": []string{"hoo"},
|
||||
}
|
||||
expectedURL := url.URL{
|
||||
Scheme: "https",
|
||||
Host: "example.com:1234",
|
||||
Path: "/metricz",
|
||||
RawQuery: expectedParams.Encode(),
|
||||
}
|
||||
|
||||
target, err := NewTarget(
|
||||
&config.ScrapeConfig{
|
||||
JobName: "test_job1",
|
||||
ScrapeInterval: model.Duration(1 * time.Minute),
|
||||
ScrapeTimeout: model.Duration(1 * time.Second),
|
||||
Scheme: serverURL.Scheme,
|
||||
Params: url.Values{
|
||||
"foo": []string{"bar", "baz"},
|
||||
},
|
||||
},
|
||||
model.LabelSet{
|
||||
model.SchemeLabel: model.LabelValue(serverURL.Scheme),
|
||||
model.AddressLabel: model.LabelValue(serverURL.Host),
|
||||
"__param_foo": "bar",
|
||||
},
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = target.scrape(context.Background(), time.Now()); err != nil {
|
||||
t.Fatal(err)
|
||||
if u := target.URL(); !reflect.DeepEqual(u.String(), expectedURL.String()) {
|
||||
t.Fatalf("Expected URL %q but got %q", expectedURL, u)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -165,12 +129,7 @@ func newTestTarget(targetURL string, deadline time.Duration, labels model.LabelS
|
|||
labels[model.MetricsPathLabel] = "/metrics"
|
||||
|
||||
return &Target{
|
||||
scrapeConfig: &config.ScrapeConfig{
|
||||
ScrapeInterval: model.Duration(time.Millisecond),
|
||||
ScrapeTimeout: model.Duration(deadline),
|
||||
},
|
||||
labels: labels,
|
||||
status: &TargetStatus{},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -343,7 +302,7 @@ func newTLSConfig(t *testing.T) *tls.Config {
|
|||
return tlsConfig
|
||||
}
|
||||
|
||||
func TestNewTargetWithBadTLSConfig(t *testing.T) {
|
||||
func TestNewClientWithBadTLSConfig(t *testing.T) {
|
||||
cfg := &config.ScrapeConfig{
|
||||
ScrapeTimeout: model.Duration(1 * time.Second),
|
||||
TLSConfig: config.TLSConfig{
|
||||
|
@ -352,7 +311,7 @@ func TestNewTargetWithBadTLSConfig(t *testing.T) {
|
|||
KeyFile: "testdata/nonexistent_client.key",
|
||||
},
|
||||
}
|
||||
_, err := NewTarget(cfg, nil, nil)
|
||||
_, err := newHTTPClient(cfg)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error, got nil.")
|
||||
}
|
||||
|
|
|
@ -176,7 +176,7 @@ type targetSet struct {
|
|||
mtx sync.RWMutex
|
||||
|
||||
// Sets of targets by a source string that is unique across target providers.
|
||||
tgroups map[string]map[model.Fingerprint]*Target
|
||||
tgroups map[string][]*Target
|
||||
providers map[string]TargetProvider
|
||||
|
||||
scrapePool *scrapePool
|
||||
|
@ -189,7 +189,7 @@ type targetSet struct {
|
|||
|
||||
func newTargetSet(cfg *config.ScrapeConfig, app storage.SampleAppender) *targetSet {
|
||||
ts := &targetSet{
|
||||
tgroups: map[string]map[model.Fingerprint]*Target{},
|
||||
tgroups: map[string][]*Target{},
|
||||
scrapePool: newScrapePool(cfg, app),
|
||||
syncCh: make(chan struct{}, 1),
|
||||
config: cfg,
|
||||
|
@ -247,13 +247,11 @@ Loop:
|
|||
}
|
||||
|
||||
func (ts *targetSet) sync() {
|
||||
targets := []*Target{}
|
||||
for _, tgroup := range ts.tgroups {
|
||||
for _, t := range tgroup {
|
||||
targets = append(targets, t)
|
||||
}
|
||||
var all []*Target
|
||||
for _, targets := range ts.tgroups {
|
||||
all = append(all, targets...)
|
||||
}
|
||||
ts.scrapePool.sync(targets)
|
||||
ts.scrapePool.sync(all)
|
||||
}
|
||||
|
||||
func (ts *targetSet) runProviders(ctx context.Context, providers map[string]TargetProvider) {
|
||||
|
@ -394,8 +392,8 @@ func providersFromConfig(cfg *config.ScrapeConfig) map[string]TargetProvider {
|
|||
}
|
||||
|
||||
// targetsFromGroup builds targets based on the given TargetGroup and config.
|
||||
func targetsFromGroup(tg *config.TargetGroup, cfg *config.ScrapeConfig) (map[model.Fingerprint]*Target, error) {
|
||||
targets := make(map[model.Fingerprint]*Target, len(tg.Targets))
|
||||
func targetsFromGroup(tg *config.TargetGroup, cfg *config.ScrapeConfig) ([]*Target, error) {
|
||||
targets := make([]*Target, 0, len(tg.Targets))
|
||||
|
||||
for i, labels := range tg.Targets {
|
||||
for k, v := range cfg.Params {
|
||||
|
@ -459,12 +457,12 @@ func targetsFromGroup(tg *config.TargetGroup, cfg *config.ScrapeConfig) (map[mod
|
|||
delete(labels, ln)
|
||||
}
|
||||
}
|
||||
tr, err := NewTarget(cfg, labels, preRelabelLabels)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while creating instance %d in target group %s: %s", i, tg, err)
|
||||
|
||||
if _, ok := labels[model.InstanceLabel]; !ok {
|
||||
labels[model.InstanceLabel] = labels[model.AddressLabel]
|
||||
}
|
||||
|
||||
targets[tr.fingerprint()] = tr
|
||||
targets = append(targets, NewTarget(labels, preRelabelLabels, cfg.Params))
|
||||
}
|
||||
|
||||
return targets, nil
|
||||
|
|
|
@ -159,7 +159,7 @@ func webUiTemplatesGraphHtml() (*asset, error) {
|
|||
return a, nil
|
||||
}
|
||||
|
||||
var _webUiTemplatesStatusHtml = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xcc\x57\xcd\x6e\xdc\x36\x10\xbe\xef\x53\xb0\x44\x8e\xd5\x2e\x10\xa0\x17\x63\x57\x07\x1b\x29\x1c\xc0\x29\xdc\xac\x7d\xe9\x25\xe0\x8a\xb3\x12\x5b\x9a\x14\x48\xca\xb5\xa1\xe8\xdd\x3b\x43\x49\x5e\xfd\x6d\x9a\x34\x68\xeb\xcb\x9a\x43\x0e\xe7\xe7\x9b\x6f\x46\x74\x5d\x4b\x38\x2a\x03\x8c\x17\x20\x24\x6f\x9a\xed\x0f\x49\xc2\x8c\x7a\x62\x49\x92\xd6\x35\x18\xd9\x34\xab\xd5\x49\x2b\xb3\x26\x80\x09\xa8\xb8\x62\x6c\x2b\xd5\x23\xcb\xb4\xf0\x7e\x17\x0f\x04\xaa\xb8\xe4\xa8\x2b\x25\x79\x8a\xe7\xa8\x51\xbc\x65\x4a\xee\xb8\xab\x4c\x50\x0f\xc0\xd3\x8f\xed\x82\xbd\x37\x47\xeb\x1e\x44\x50\xd6\x6c\x37\xc5\xdb\x4e\x3b\x88\x83\x86\xde\x62\x2b\xc4\xdf\x04\xad\x4b\x30\x1e\x64\x27\x1f\xac\x93\xe0\x5e\x44\x1f\x9c\x2a\x5f\xa4\xc2\x3e\x82\xeb\x02\x20\xa3\x07\x2b\x9f\x7b\x89\x64\x77\x12\x48\x2c\xd2\xfb\x92\x62\xda\x6e\x70\x39\x3a\x91\x88\xc0\x7a\x1f\x44\xa8\xfc\xfa\x52\xb9\x50\xac\xef\xef\xae\x10\xa2\x0d\x9e\x9c\xec\x6d\x4e\x06\x71\x7d\x72\x86\x02\x85\x93\xae\x46\x48\x1c\x2a\xa5\xa5\x3a\x65\xcf\xd3\x4b\xda\xf9\x1f\x01\xa9\x6b\x27\x4c\x0e\xec\xcd\x1f\xf0\xfc\x23\x7b\xf3\x28\x74\x05\xec\x62\xc7\xd6\x14\x52\xac\xf3\x39\xe0\x98\xcf\x6c\x09\x58\x5d\xfb\x27\x47\xa8\xc8\x40\x44\x67\x01\xc6\xd6\xec\x97\xb0\xa3\x40\x5a\xba\x7d\x35\x96\x08\xc2\x51\xe5\x95\xeb\x80\xbc\x1a\x8a\x03\x10\x4b\x07\x83\x42\xb6\x5a\x14\x09\xed\xaf\x26\x34\xd5\xe0\x89\xa4\xf8\x67\x66\xa0\x45\x29\x13\x5a\xb3\xde\x56\x54\x6c\x1a\x34\x7e\x7d\xf7\xe1\x66\x6f\x54\x59\x42\x60\xa5\x08\xc5\xad\xc3\x86\x79\x42\x2f\x07\xb7\xe9\xfb\x68\xc9\x63\x10\x2e\x87\x80\x3e\xef\xda\xc5\xc9\xeb\xbf\x54\xfd\x41\xbd\x7f\xb7\x07\xac\x77\x69\xad\xa6\x72\x8f\x12\x6b\xa3\xb9\xc5\x23\x3f\x60\x40\x2c\x3a\x8e\x89\x61\x79\x5b\x5e\x10\x19\x32\x54\x2e\x85\xd9\xf1\x9f\x78\x1f\x33\x7a\xf8\x44\x17\xc8\x3f\x72\x00\xc5\x8e\x1f\xe3\xc2\x2f\xb0\xab\x73\x96\xbe\x33\xb2\xb4\xca\x84\x29\xab\xfa\x73\x8a\x77\xd6\xb9\xfd\xe1\x8d\x38\x80\xf6\xe7\x4f\x7d\x60\xfb\xcc\x89\xf2\xac\x81\x77\xce\x59\x37\x3f\x9c\x46\x4f\x1a\x13\x58\xa6\x4d\x36\x80\x9d\x00\x1f\x81\x7a\x26\x79\x39\xdb\x12\xac\x40\x5a\xed\x38\xf2\xed\xfe\xe3\x0d\xfb\xcc\x72\x6d\x0f\x42\xe3\xba\x69\x08\x60\xda\x5d\xef\xb3\x02\x1e\xb0\xd3\x2e\x36\x9b\x6e\xe7\xda\xfa\x10\x49\x4a\xc2\x2d\x92\x93\x8a\x20\x52\xa4\xe6\xd4\xc3\x20\x4a\x4d\xd8\xf5\xe3\xc0\xc7\x79\x40\xd7\x7f\xad\xc0\x3d\xb3\x49\xf8\x93\xab\x6a\x38\x45\x3a\x03\x8b\x37\x30\x25\x62\x4c\xcf\x96\xe8\x92\xc5\xdf\xa4\x74\xea\x41\xb8\xe7\x48\x9b\xb8\xd3\x34\x94\x77\x3f\x46\xf8\x76\x43\x37\xe7\xf1\x4f\xa7\xc8\xdf\xed\x8f\xe7\xd1\x59\xe8\x27\x91\x0a\x0d\x2e\xb0\xf8\x9b\xd4\xf5\x4b\xd7\x5c\x83\xd0\xd8\x08\x9f\x59\x11\x17\x77\xf6\x8a\xd4\x11\x2d\xe6\x89\xa6\x9f\x94\x91\x2a\x13\xc1\x3a\x16\xe0\x29\x24\x15\x4e\x0b\x97\x09\x0f\x7c\x39\x8f\xb1\xd9\x85\x94\x96\x41\xf8\x67\x29\x65\x95\xf3\xd6\x25\xb1\xd9\xb0\x5d\x99\x14\x41\x24\xc1\xe6\xb9\xc6\x01\x1f\x90\xb2\x41\x95\x9c\x05\x15\x48\xee\x8e\xad\x53\xb9\x32\x42\x27\xdd\xf6\x25\xe0\x37\x0c\x98\x83\x58\x31\x65\xf2\x0b\xca\xe2\x03\x04\xd1\x76\x22\xb1\x74\x31\xd3\xb6\xc4\x91\x65\x71\x76\xb5\xea\x6c\xdd\xfd\xa5\x39\xc2\x19\x57\x06\x61\x34\x19\xf0\x2f\xd3\x6f\xc4\xdc\x48\x41\xdd\x79\xff\x0f\x29\xa8\x3d\x7c\xab\x3f\x7c\x62\x89\x4a\x07\x9e\x1a\x6b\xe0\xdb\xf9\xfd\x9d\x64\xa8\x6b\x75\x7c\x21\x32\x8d\xc6\x76\x32\xae\xdf\xfb\xdf\xc0\xe1\x33\xe0\x17\xc0\xaf\x48\x9f\x58\x5d\x7b\x85\x85\x58\xd0\x47\xae\x8b\xdc\x7e\x67\xaf\xcd\x62\x89\x73\x78\x29\xe7\x73\x4d\x29\x89\x0a\x6e\xda\x76\x7c\xf0\x0c\x18\x98\x3d\x87\xf5\xd7\x66\x31\xfd\x1c\xcc\xef\x8d\xde\x32\x73\x95\xe5\xd7\x0d\x06\xef\x42\x55\x1e\xb5\xc8\xf1\x7d\xb0\x6f\x25\xf6\x33\x89\xaf\xe5\x85\xd8\x61\x19\x63\x7a\x6d\x2f\x45\x5a\xe2\xff\x27\xe9\xaa\x57\xfe\x2b\x00\x00\xff\xff\xb2\x36\x91\x1f\xeb\x0c\x00\x00")
|
||||
var _webUiTemplatesStatusHtml = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xcc\x57\xcd\x6e\xdc\x36\x10\xbe\xef\x53\xb0\x44\x8e\xd5\x2e\x10\xa0\x17\x63\x57\x07\x1b\x29\x1c\xc0\x29\xdc\xac\x7d\xe9\x25\xe0\x8a\xb3\x12\x5b\x9a\x14\x48\xca\xf5\x42\xd1\xbb\x77\x86\x92\xa2\x9f\xd5\xa6\x49\x83\xb6\xb9\xac\x39\xe4\x70\x7e\xbe\xf9\x66\x44\xd7\xb5\x84\xa3\x32\xc0\x78\x01\x42\xf2\xa6\xd9\xfe\x90\x24\xcc\xa8\x17\x96\x24\x69\x5d\x83\x91\x4d\xb3\x5a\x0d\x5a\x99\x35\x01\x4c\x40\xc5\x15\x63\x5b\xa9\x9e\x59\xa6\x85\xf7\xbb\x78\x20\x50\xc5\x25\x47\x5d\x29\xc9\x53\x3c\x47\x8d\xe2\x35\x53\x72\xc7\x5d\x65\x82\x7a\x02\x9e\xbe\x6f\x17\xec\xad\x39\x5a\xf7\x24\x82\xb2\x66\xbb\x29\x5e\x77\xda\x41\x1c\x34\xf4\x16\x5b\x21\xfe\x26\x68\x5d\x82\xf1\x20\x3b\xf9\x60\x9d\x04\xf7\x49\xf4\xc1\xa9\xf2\x93\x54\xd8\x67\x70\x5d\x00\x64\xf4\x60\xe5\xa9\x97\x48\x76\x83\x40\x62\x91\x3e\x96\x14\xd3\x76\x83\xcb\xc9\x89\x44\x04\xd6\xfb\x20\x42\xe5\xd7\xd7\xca\x85\x62\xfd\xf8\x70\x83\x10\x6d\xf0\x64\xb0\xb7\x19\x0c\xe2\x7a\x70\x86\x02\x85\x93\xae\x26\x48\x1c\x2a\xa5\xa5\x1a\xb2\xe7\xe9\x35\xed\xfc\x8f\x80\xd4\xb5\x13\x26\x07\xf6\xea\x0f\x38\xfd\xc8\x5e\x3d\x0b\x5d\x01\xbb\xda\xb1\x35\x85\x14\xeb\x7c\x09\x38\xe6\x33\x5b\x02\x56\xd7\xfe\xc9\x11\x2a\x32\x10\xd1\x59\x80\xb1\x35\xfb\x39\xec\x28\x90\x96\x6e\x5f\x8c\x25\x82\x70\x54\x79\xe5\x3a\x20\x6f\xc6\xe2\x08\xc4\xd2\xc1\xa8\x90\xad\x16\x45\x42\xfb\xab\x19\x4d\x35\x78\x22\x29\xfe\x39\x33\xd0\xa2\x94\x09\xad\x59\x6f\x2b\x2a\x36\x0d\x1a\xbf\x7d\x78\x77\xb7\x37\xaa\x2c\x21\xb0\x52\x84\xe2\xde\x61\xc3\xbc\xa0\x97\x83\xdb\xf4\x7d\xb4\xe4\x31\x08\x97\x43\x40\x9f\x0f\xed\x62\xf0\xfa\x2f\x55\x7f\x54\xef\xdf\xed\x01\xeb\x5d\x5a\xab\xa9\xdc\x93\xc4\xda\x68\xee\xf1\xc8\x8f\x18\x10\x8b\x8e\x63\x62\x5c\xde\x96\x17\x44\x86\x0c\x95\x4b\x61\x76\xfc\x27\xde\xc7\x8c\x1e\x3e\xd0\x05\xf2\x8f\x1c\x40\xb1\xe3\xc7\xb4\xf0\x0b\xec\xea\x9c\xa5\x6f\x8c\x2c\xad\x32\x61\xce\xaa\xfe\x9c\xe2\x3d\xeb\xdc\xfe\xf0\x4e\x1c\x40\xfb\xcb\xa7\x3e\xb0\x7d\xe6\x44\x79\xd1\xc0\x1b\xe7\xac\x3b\x3f\x9c\x47\x4f\x1a\x33\x58\xe6\x4d\x36\x82\x9d\x00\x9f\x80\x7a\x21\x79\x79\xb6\x25\x58\x81\xb4\xda\x71\xe4\xdb\xe3\xfb\x3b\xf6\x91\xe5\xda\x1e\x84\xc6\x75\xd3\x10\xc0\xb4\xbb\xde\x67\x05\x3c\x61\xa7\x5d\x6d\x36\xdd\xce\xad\xf5\x21\x92\x94\x84\x7b\x24\x27\x15\x41\xa4\x48\xcd\xb9\x87\x51\x94\x9a\xb0\xeb\xc7\x81\x8f\xf3\x80\xae\xff\x5a\x81\x3b\xb1\x59\xf8\xb3\xab\x6a\x3c\x45\x3a\x03\x8b\x37\x30\x25\x62\x4c\xcf\x96\xe8\x92\xc5\xdf\xa4\x74\xea\x49\xb8\x53\xa4\x4d\xdc\x69\x1a\xca\xbb\x1f\x23\x7c\xbb\xa1\x9b\xe7\xf1\xcf\xa7\xc8\xdf\xed\x4f\xe7\xd1\x45\xe8\x67\x91\x0a\x0d\x2e\xb0\xf8\x9b\xd4\x35\x5b\xdf\x82\xd0\xd8\x01\x1f\x59\x11\x17\x0f\xf6\x86\xf4\x10\x26\xe6\x89\x9f\x1f\x94\x91\x2a\x13\xc1\x3a\x16\xe0\x25\x24\x15\x8e\x09\x97\x09\x0f\x7c\x39\x81\xce\xde\x42\x12\xcb\x69\xff\xb3\x24\xb2\xca\x79\xeb\x92\xd8\x5e\xd8\xa0\x4c\x8a\x20\x92\x60\xf3\x5c\xe3\x48\x0f\x48\xd2\xa0\x4a\xce\x82\x0a\x24\x77\xc7\xd6\xa9\x5c\x19\xa1\x93\x6e\xfb\x1a\xf0\xab\x05\xcc\x41\xac\x91\x32\xf9\x15\x85\xff\x0e\x82\x68\x7b\x8f\x78\xb9\x98\x62\x5b\xd4\xc8\xab\x38\xad\x5a\x75\xb6\xee\xfe\xd2\xe4\xe0\x8c\x2b\x83\xf8\x99\x0c\xf8\xe7\x09\x37\xe1\x6a\x24\x9d\xee\xbc\xff\x87\xa4\xd3\x1e\xbe\xd6\x1f\x3e\xaa\x44\xa5\x03\x4f\x8d\x35\xf0\xf5\x8c\xfe\x46\x32\xd4\xb5\x3a\x12\xe0\x3e\xb4\x43\x70\xfd\xd6\xff\x06\x0e\xbf\xf8\xbf\x00\x7e\x30\xfa\x8c\xea\xda\x2b\xac\xc0\x58\x11\x69\x2d\x72\xfb\x8d\xfd\x34\x78\x8f\x43\x76\x29\xbd\x4b\x1d\x27\xa9\xea\x6e\xde\x5a\x71\x00\x8e\xec\x5d\xc2\xf3\x4b\xe3\x9e\x0f\xf9\xf3\x7b\x93\x17\xca\xb9\xca\xf2\x9b\x05\xa3\x76\xa1\x2a\x8f\x5a\xe4\xf8\xd5\xdf\xb7\x12\xfb\x99\xc4\xef\xe5\xdd\xd7\xbd\x01\x62\x4c\xdf\xdb\xfb\x8f\x96\xf8\x5f\x47\xba\xea\x95\xff\x0a\x00\x00\xff\xff\xd0\x50\xb7\x0a\xc1\x0c\x00\x00")
|
||||
|
||||
func webUiTemplatesStatusHtmlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
|
@ -174,7 +174,7 @@ func webUiTemplatesStatusHtml() (*asset, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "web/ui/templates/status.html", size: 3307, mode: os.FileMode(420), modTime: time.Unix(1455530985, 0)}
|
||||
info := bindataFileInfo{name: "web/ui/templates/status.html", size: 3265, mode: os.FileMode(420), modTime: time.Unix(1456687049, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
|
|
@ -55,8 +55,8 @@
|
|||
{{end}}
|
||||
</td>
|
||||
<td>
|
||||
<span class="alert alert-{{ .Status.Health | healthToClass }} state_indicator text-uppercase">
|
||||
{{.Status.Health}}
|
||||
<span class="alert alert-{{ .Health | healthToClass }} state_indicator text-uppercase">
|
||||
{{.Health}}
|
||||
</span>
|
||||
</td>
|
||||
<td>
|
||||
|
@ -70,11 +70,11 @@
|
|||
</span>
|
||||
</td>
|
||||
<td>
|
||||
{{if .Status.LastScrape.IsZero}}Never{{else}}{{since .Status.LastScrape}} ago{{end}}
|
||||
{{if .LastScrape.IsZero}}Never{{else}}{{since .LastScrape}} ago{{end}}
|
||||
</td>
|
||||
<td>
|
||||
{{if .Status.LastError}}
|
||||
<span class="alert alert-danger state_indicator">{{.Status.LastError}}</span>
|
||||
{{if .LastError}}
|
||||
<span class="alert alert-danger state_indicator">{{.LastError}}</span>
|
||||
{{end}}
|
||||
</td>
|
||||
</tr>
|
||||
|
|
Loading…
Reference in a new issue