2013-10-15 03:08:29 -07:00
|
|
|
# Global default settings.
|
2013-01-24 15:33:16 -08:00
|
|
|
global {
|
2013-10-15 03:08:29 -07:00
|
|
|
scrape_interval: "15s" # By default, scrape targets every 15 seconds.
|
|
|
|
evaluation_interval: "15s" # By default, evaluate rules every 15 seconds.
|
|
|
|
|
|
|
|
# Attach these extra labels to all timeseries collected by this Prometheus instance.
|
|
|
|
labels: {
|
|
|
|
label: {
|
|
|
|
name: "monitor"
|
|
|
|
value: "codelab-monitor"
|
2013-10-10 11:53:19 -07:00
|
|
|
}
|
2013-01-24 15:33:16 -08:00
|
|
|
}
|
2013-10-15 03:08:29 -07:00
|
|
|
|
|
|
|
# Load and evaluate rules in this file every 'evaluation_interval' seconds. This field may be repeated.
|
|
|
|
#rule_file: "prometheus.rules"
|
2013-01-24 15:33:16 -08:00
|
|
|
}
|
|
|
|
|
2013-10-15 03:08:29 -07:00
|
|
|
# A job definition containing exactly one endpoint to scrape: Here it's prometheus itself.
|
|
|
|
job: {
|
|
|
|
# The job name is added as a label `job={job-name}` to any timeseries scraped from this job.
|
2013-10-10 11:53:19 -07:00
|
|
|
name: "prometheus"
|
2013-10-15 03:08:29 -07:00
|
|
|
# Override the global default and scrape targets from this job every 5 seconds.
|
2013-10-10 11:53:19 -07:00
|
|
|
scrape_interval: "5s"
|
2013-01-24 15:33:16 -08:00
|
|
|
|
2013-10-15 03:08:29 -07:00
|
|
|
# Let's define a group of targets to scrape for this job. In this case, only one.
|
|
|
|
target_group: {
|
|
|
|
# These endpoints are scraped via HTTP.
|
2013-10-28 00:03:58 -07:00
|
|
|
target: "http://localhost:9090/metrics"
|
2013-01-24 15:33:16 -08:00
|
|
|
}
|
|
|
|
}
|