Merge "Make scrape timeouts configurable per job."

This commit is contained in:
Julius Volz 2013-08-19 22:20:37 +02:00 committed by Gerrit Code Review
commit e26d3437fd
6 changed files with 44 additions and 55 deletions

View file

@ -79,6 +79,9 @@ func (c Config) Validate() error {
if _, err := utility.StringToDuration(job.GetSdRefreshInterval()); err != nil {
return fmt.Errorf("Invalid SD refresh interval for job '%s': %s", job.GetName(), err)
}
if _, err := utility.StringToDuration(job.GetScrapeTimeout()); err != nil {
return fmt.Errorf("Invalid scrape timeout for job '%s': %s", job.GetName(), err)
}
for _, targetGroup := range job.TargetGroup {
if err := c.validateLabels(targetGroup.Labels); err != nil {
return fmt.Errorf("Invalid labels for job '%s': %s", job.GetName(), err)
@ -150,3 +153,8 @@ type JobConfig struct {
func (c JobConfig) ScrapeInterval() time.Duration {
return stringToDuration(c.GetScrapeInterval())
}
// EvaluationInterval gets the scrape interval for a job.
func (c JobConfig) ScrapeTimeout() time.Duration {
return stringToDuration(c.GetScrapeInterval())
}

View file

@ -49,12 +49,18 @@ message TargetGroup {
}
// The configuration for a Prometheus job to scrape.
//
// The next field no. is 8.
message JobConfig {
// The job name. Must adhere to the regex "[a-zA-Z_][a-zA-Z0-9_-]*".
required string name = 1;
// How frequently to scrape targets from this job. Overrides the global
// default.
// default. Must be a valid Prometheus duration string in the form
// "[0-9]+[smhdwy]".
optional string scrape_interval = 2;
// Per-target timeout when scraping this job. Must be a valid Prometheus
// duration string in the form "[0-9]+[smhdwy]".
optional string scrape_timeout = 7 [default = "10s"];
// The DNS-SD service name pointing to SRV records containing endpoint
// information for a job. When this field is provided, no target_group
// elements may be set.

View file

@ -13,11 +13,8 @@ var _ = proto.Marshal
var _ = &json.SyntaxError{}
var _ = math.Inf
// A label/value pair suitable for attaching to timeseries.
type LabelPair struct {
// The name of the label. Must adhere to the regex "[a-zA-Z_][a-zA-Z0-9_]*".
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// The value of the label. May contain any characters.
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
@ -40,7 +37,6 @@ func (m *LabelPair) GetValue() string {
return ""
}
// A set of label/value pairs.
type LabelPairs struct {
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
XXX_unrecognized []byte `json:"-"`
@ -57,19 +53,12 @@ func (m *LabelPairs) GetLabel() []*LabelPair {
return nil
}
// The global Prometheus configuration section.
type GlobalConfig struct {
// How frequently to scrape targets by default. Must be a valid Prometheus
// duration string in the form "[0-9]+[smhdwy]".
ScrapeInterval *string `protobuf:"bytes,1,opt,name=scrape_interval,def=1m" json:"scrape_interval,omitempty"`
// How frequently to evaluate rules by default. Must be a valid Prometheus
// duration string in the form "[0-9]+[smhdwy]".
EvaluationInterval *string `protobuf:"bytes,2,opt,name=evaluation_interval,def=1m" json:"evaluation_interval,omitempty"`
// The labels to add to any timeseries that this Prometheus instance scrapes.
Labels *LabelPairs `protobuf:"bytes,3,opt,name=labels" json:"labels,omitempty"`
// The list of file names of rule files to load.
RuleFile []string `protobuf:"bytes,4,rep,name=rule_file" json:"rule_file,omitempty"`
XXX_unrecognized []byte `json:"-"`
ScrapeInterval *string `protobuf:"bytes,1,opt,name=scrape_interval,def=1m" json:"scrape_interval,omitempty"`
EvaluationInterval *string `protobuf:"bytes,2,opt,name=evaluation_interval,def=1m" json:"evaluation_interval,omitempty"`
Labels *LabelPairs `protobuf:"bytes,3,opt,name=labels" json:"labels,omitempty"`
RuleFile []string `protobuf:"bytes,4,rep,name=rule_file" json:"rule_file,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *GlobalConfig) Reset() { *m = GlobalConfig{} }
@ -107,11 +96,8 @@ func (m *GlobalConfig) GetRuleFile() []string {
return nil
}
// A labeled group of targets to scrape for a job.
type TargetGroup struct {
// The list of endpoints to scrape via HTTP.
Target []string `protobuf:"bytes,1,rep,name=target" json:"target,omitempty"`
// The labels to add to any timeseries scraped for this target group.
Target []string `protobuf:"bytes,1,rep,name=target" json:"target,omitempty"`
Labels *LabelPairs `protobuf:"bytes,2,opt,name=labels" json:"labels,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
@ -134,32 +120,22 @@ func (m *TargetGroup) GetLabels() *LabelPairs {
return nil
}
// The configuration for a Prometheus job to scrape.
type JobConfig struct {
// The job name. Must adhere to the regex "[a-zA-Z_][a-zA-Z0-9_-]*".
Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
// How frequently to scrape targets from this job. Overrides the global
// default.
ScrapeInterval *string `protobuf:"bytes,2,opt,name=scrape_interval" json:"scrape_interval,omitempty"`
// The DNS-SD service name pointing to SRV records containing endpoint
// information for a job. When this field is provided, no target_group
// elements may be set.
SdName *string `protobuf:"bytes,3,opt,name=sd_name" json:"sd_name,omitempty"`
// Discovery refresh period when using DNS-SD to discover targets. Must be a
// valid Prometheus duration string in the form "[0-9]+[smhdwy]".
SdRefreshInterval *string `protobuf:"bytes,4,opt,name=sd_refresh_interval,def=30s" json:"sd_refresh_interval,omitempty"`
// List of labeled target groups for this job. Only legal when DNS-SD isn't
// used for a job.
TargetGroup []*TargetGroup `protobuf:"bytes,5,rep,name=target_group" json:"target_group,omitempty"`
// The HTTP resource path to fetch metrics from on targets.
MetricsPath *string `protobuf:"bytes,6,opt,name=metrics_path,def=/metrics.json" json:"metrics_path,omitempty"`
XXX_unrecognized []byte `json:"-"`
Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
ScrapeInterval *string `protobuf:"bytes,2,opt,name=scrape_interval" json:"scrape_interval,omitempty"`
ScrapeTimeout *string `protobuf:"bytes,7,opt,name=scrape_timeout,def=10s" json:"scrape_timeout,omitempty"`
SdName *string `protobuf:"bytes,3,opt,name=sd_name" json:"sd_name,omitempty"`
SdRefreshInterval *string `protobuf:"bytes,4,opt,name=sd_refresh_interval,def=30s" json:"sd_refresh_interval,omitempty"`
TargetGroup []*TargetGroup `protobuf:"bytes,5,rep,name=target_group" json:"target_group,omitempty"`
MetricsPath *string `protobuf:"bytes,6,opt,name=metrics_path,def=/metrics.json" json:"metrics_path,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *JobConfig) Reset() { *m = JobConfig{} }
func (m *JobConfig) String() string { return proto.CompactTextString(m) }
func (*JobConfig) ProtoMessage() {}
const Default_JobConfig_ScrapeTimeout string = "10s"
const Default_JobConfig_SdRefreshInterval string = "30s"
const Default_JobConfig_MetricsPath string = "/metrics.json"
@ -177,6 +153,13 @@ func (m *JobConfig) GetScrapeInterval() string {
return ""
}
func (m *JobConfig) GetScrapeTimeout() string {
if m != nil && m.ScrapeTimeout != nil {
return *m.ScrapeTimeout
}
return Default_JobConfig_ScrapeTimeout
}
func (m *JobConfig) GetSdName() string {
if m != nil && m.SdName != nil {
return *m.SdName
@ -205,15 +188,10 @@ func (m *JobConfig) GetMetricsPath() string {
return Default_JobConfig_MetricsPath
}
// The top-level Prometheus configuration.
type PrometheusConfig struct {
// Global Prometheus configuration options. If omitted, an empty global
// configuration with default values (see GlobalConfig definition) will be
// created.
Global *GlobalConfig `protobuf:"bytes,1,opt,name=global" json:"global,omitempty"`
// The list of jobs to scrape.
Job []*JobConfig `protobuf:"bytes,2,rep,name=job" json:"job,omitempty"`
XXX_unrecognized []byte `json:"-"`
Global *GlobalConfig `protobuf:"bytes,1,opt,name=global" json:"global,omitempty"`
Job []*JobConfig `protobuf:"bytes,2,rep,name=job" json:"job,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *PrometheusConfig) Reset() { *m = PrometheusConfig{} }

View file

@ -86,7 +86,7 @@ func (p *sdTargetProvider) Targets() ([]Target, error) {
addr.Target = addr.Target[:len(addr.Target)-1]
}
endpoint.Host = fmt.Sprintf("%s:%d", addr.Target, addr.Port)
t := NewTarget(endpoint.String(), time.Second*5, baseLabels)
t := NewTarget(endpoint.String(), p.job.ScrapeTimeout(), baseLabels)
targets = append(targets, t)
}

View file

@ -14,8 +14,6 @@
package retrieval
import (
"time"
"github.com/golang/glog"
"github.com/prometheus/client_golang/extraction"
@ -110,7 +108,7 @@ func (m *targetManager) AddTargetsFromConfig(config config.Config) {
}
for _, endpoint := range targetGroup.Target {
target := NewTarget(endpoint, time.Second*5, baseLabels)
target := NewTarget(endpoint, job.ScrapeTimeout(), baseLabels)
m.AddTarget(job, target)
}
}

View file

@ -15,7 +15,6 @@ package api
import (
"net/http"
"time"
clientmodel "github.com/prometheus/client_golang/model"
@ -47,7 +46,7 @@ func (serv MetricsService) SetTargets(targetGroups []TargetGroup, jobName string
}
for _, endpoint := range targetGroup.Endpoints {
newTarget := retrieval.NewTarget(endpoint, time.Second*5, baseLabels)
newTarget := retrieval.NewTarget(endpoint, job.ScrapeTimeout(), baseLabels)
newTargets = append(newTargets, newTarget)
}
}