Filter out testfile metrics correctly when using collect[] filters (#763)

* remove injection hook for textfile metrics, convert them to prometheus format

* add support for summaries

* add support for histograms

* add logic for handling inconsistent labels within a metric family for counter, gauge, untyped

* change logic for parsing the metrics textfile

* fix logic to adding missing labels

* Export time and error metrics for textfiles

* Add tests for new textfile collector, fix found bugs

* refactor Update() to split into smaller functions

* remove parseTextFiles(), fix import issue

* add mtime metric directly to channel, fix handling of mtime during testing

* rename variables related to labels

* refactor: add default case, remove if guard for metrics, remove extra loop and slice

* refactor: remove extra loop iterating over metric families

* test: add test case for different metric type, fix found bug

* test: add test for metrics with inconsistent labels

* test: add test for histogram

* test: add test for histogram with extra dimension

* test: add test for summary

* test: add test for summary with extra dimension

* remove unnecessary creation of protobuf

* nit: remove extra blank line
This commit is contained in:
Shubheksha Jalan 2017-12-24 00:51:58 +05:30 committed by Julius Volz
parent cd2a17176a
commit 1f2458f42c
17 changed files with 513 additions and 175 deletions

View file

@ -0,0 +1,32 @@
# HELP event_duration_seconds_total Query timings
# TYPE event_duration_seconds_total summary
event_duration_seconds_total{baz="inner_eval",quantile="0.5"} 1.073e-06
event_duration_seconds_total{baz="inner_eval",quantile="0.9"} 1.928e-06
event_duration_seconds_total{baz="inner_eval",quantile="0.99"} 4.35e-06
event_duration_seconds_total_sum{baz="inner_eval"} 1.8652166505091474e+06
event_duration_seconds_total_count{baz="inner_eval"} 1.492355615e+09
event_duration_seconds_total{baz="prepare_time",quantile="0.5"} 4.283e-06
event_duration_seconds_total{baz="prepare_time",quantile="0.9"} 7.796e-06
event_duration_seconds_total{baz="prepare_time",quantile="0.99"} 2.2083e-05
event_duration_seconds_total_sum{baz="prepare_time"} 840923.7919437207
event_duration_seconds_total_count{baz="prepare_time"} 1.492355814e+09
event_duration_seconds_total{baz="result_append",quantile="0.5"} 1.566e-06
event_duration_seconds_total{baz="result_append",quantile="0.9"} 3.223e-06
event_duration_seconds_total{baz="result_append",quantile="0.99"} 6.53e-06
event_duration_seconds_total_sum{baz="result_append"} 4.404109951000078
event_duration_seconds_total_count{baz="result_append"} 1.427647e+06
event_duration_seconds_total{baz="result_sort",quantile="0.5"} 1.847e-06
event_duration_seconds_total{baz="result_sort",quantile="0.9"} 2.975e-06
event_duration_seconds_total{baz="result_sort",quantile="0.99"} 4.08e-06
event_duration_seconds_total_sum{baz="result_sort"} 3.4123187829998307
event_duration_seconds_total_count{baz="result_sort"} 1.427647e+06
# HELP events_total this is a test metric
# TYPE events_total counter
events_total{foo="bar"} 10
events_total{foo="baz"} 20
# HELP node_textfile_mtime Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime gauge
node_textfile_mtime{file="metrics.prom"} 1
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0

View file

@ -0,0 +1,28 @@
# HELP events_total this is a test metric
# TYPE events_total counter
events_total{foo="bar"} 10
events_total{foo="baz"} 20
# HELP event_duration_seconds_total Query timings
# TYPE event_duration_seconds_total summary
event_duration_seconds_total{baz="inner_eval",quantile="0.5"} 1.073e-06
event_duration_seconds_total{baz="inner_eval",quantile="0.9"} 1.928e-06
event_duration_seconds_total{baz="inner_eval",quantile="0.99"} 4.35e-06
event_duration_seconds_total_sum{baz="inner_eval"} 1.8652166505091474e+06
event_duration_seconds_total_count{baz="inner_eval"} 1.492355615e+09
event_duration_seconds_total{baz="prepare_time",quantile="0.5"} 4.283e-06
event_duration_seconds_total{baz="prepare_time",quantile="0.9"} 7.796e-06
event_duration_seconds_total{baz="prepare_time",quantile="0.99"} 2.2083e-05
event_duration_seconds_total_sum{baz="prepare_time"} 840923.7919437207
event_duration_seconds_total_count{baz="prepare_time"} 1.492355814e+09
event_duration_seconds_total{baz="result_append",quantile="0.5"} 1.566e-06
event_duration_seconds_total{baz="result_append",quantile="0.9"} 3.223e-06
event_duration_seconds_total{baz="result_append",quantile="0.99"} 6.53e-06
event_duration_seconds_total_sum{baz="result_append"} 4.404109951000078
event_duration_seconds_total_count{baz="result_append"} 1.427647e+06
event_duration_seconds_total{baz="result_sort",quantile="0.5"} 1.847e-06
event_duration_seconds_total{baz="result_sort",quantile="0.9"} 2.975e-06
event_duration_seconds_total{baz="result_sort",quantile="0.99"} 4.08e-06
event_duration_seconds_total_sum{baz="result_sort"} 3.4123187829998307
event_duration_seconds_total_count{baz="result_sort"} 1.427647e+06

View file

@ -0,0 +1,21 @@
# HELP node_textfile_mtime Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime gauge
node_textfile_mtime{file="metrics.prom"} 1
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0
# HELP prometheus_tsdb_compaction_chunk_range Final time range of chunks on their first compaction
# TYPE prometheus_tsdb_compaction_chunk_range histogram
prometheus_tsdb_compaction_chunk_range_bucket{le="100"} 0
prometheus_tsdb_compaction_chunk_range_bucket{le="400"} 0
prometheus_tsdb_compaction_chunk_range_bucket{le="1600"} 0
prometheus_tsdb_compaction_chunk_range_bucket{le="6400"} 0
prometheus_tsdb_compaction_chunk_range_bucket{le="25600"} 7
prometheus_tsdb_compaction_chunk_range_bucket{le="102400"} 7
prometheus_tsdb_compaction_chunk_range_bucket{le="409600"} 1.412839e+06
prometheus_tsdb_compaction_chunk_range_bucket{le="1.6384e+06"} 1.69185e+06
prometheus_tsdb_compaction_chunk_range_bucket{le="6.5536e+06"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_bucket{le="2.62144e+07"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_bucket{le="+Inf"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_sum 6.71393432189e+11
prometheus_tsdb_compaction_chunk_range_count 1.691853e+06

View file

@ -0,0 +1,15 @@
# HELP prometheus_tsdb_compaction_chunk_range Final time range of chunks on their first compaction
# TYPE prometheus_tsdb_compaction_chunk_range histogram
prometheus_tsdb_compaction_chunk_range_bucket{le="100"} 0
prometheus_tsdb_compaction_chunk_range_bucket{le="400"} 0
prometheus_tsdb_compaction_chunk_range_bucket{le="1600"} 0
prometheus_tsdb_compaction_chunk_range_bucket{le="6400"} 0
prometheus_tsdb_compaction_chunk_range_bucket{le="25600"} 7
prometheus_tsdb_compaction_chunk_range_bucket{le="102400"} 7
prometheus_tsdb_compaction_chunk_range_bucket{le="409600"} 1.412839e+06
prometheus_tsdb_compaction_chunk_range_bucket{le="1.6384e+06"} 1.69185e+06
prometheus_tsdb_compaction_chunk_range_bucket{le="6.5536e+06"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_bucket{le="2.62144e+07"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_bucket{le="+Inf"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_sum 6.71393432189e+11
prometheus_tsdb_compaction_chunk_range_count 1.691853e+06

View file

@ -0,0 +1,34 @@
# HELP node_textfile_mtime Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime gauge
node_textfile_mtime{file="metrics.prom"} 1
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0
# HELP prometheus_tsdb_compaction_chunk_range Final time range of chunks on their first compaction
# TYPE prometheus_tsdb_compaction_chunk_range histogram
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="100"} 0
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="400"} 0
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1600"} 0
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6400"} 0
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="25600"} 7
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="102400"} 7
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="409600"} 1.412839e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1.6384e+06"} 1.69185e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6.5536e+06"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="2.62144e+07"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="+Inf"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_sum{foo="bar"} 6.71393432189e+11
prometheus_tsdb_compaction_chunk_range_count{foo="bar"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="100"} 0
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="400"} 0
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1600"} 0
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6400"} 0
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="25600"} 7
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="102400"} 7
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="409600"} 1.412839e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1.6384e+06"} 1.69185e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6.5536e+06"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="2.62144e+07"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="+Inf"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_sum{foo="baz"} 6.71393432189e+11
prometheus_tsdb_compaction_chunk_range_count{foo="baz"} 1.691853e+06

View file

@ -0,0 +1,28 @@
# HELP prometheus_tsdb_compaction_chunk_range Final time range of chunks on their first compaction
# TYPE prometheus_tsdb_compaction_chunk_range histogram
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="100"} 0
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="400"} 0
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1600"} 0
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6400"} 0
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="25600"} 7
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="102400"} 7
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="409600"} 1.412839e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1.6384e+06"} 1.69185e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6.5536e+06"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="2.62144e+07"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="+Inf"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_sum{foo="bar"} 6.71393432189e+11
prometheus_tsdb_compaction_chunk_range_count{foo="bar"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="100"} 0
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="400"} 0
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1600"} 0
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6400"} 0
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="25600"} 7
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="102400"} 7
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="409600"} 1.412839e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1.6384e+06"} 1.69185e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6.5536e+06"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="2.62144e+07"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="+Inf"} 1.691853e+06
prometheus_tsdb_compaction_chunk_range_sum{foo="baz"} 6.71393432189e+11
prometheus_tsdb_compaction_chunk_range_count{foo="baz"} 1.691853e+06

View file

@ -0,0 +1,29 @@
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
go_goroutines{foo=""} 20
go_goroutines{foo="bar"} 229
# HELP http_requests_total Total number of HTTP requests made.
# TYPE http_requests_total counter
http_requests_total{baz="",code="200",foo="",handler="",method="get"} 11
http_requests_total{baz="",code="200",foo="",handler="alerts",method="get"} 35
http_requests_total{baz="",code="200",foo="",handler="config",method="get"} 8
http_requests_total{baz="",code="200",foo="",handler="flags",method="get"} 18
http_requests_total{baz="",code="200",foo="",handler="graph",method="get"} 89
http_requests_total{baz="",code="200",foo="",handler="prometheus",method="get"} 17051
http_requests_total{baz="",code="200",foo="",handler="query",method="get"} 401
http_requests_total{baz="",code="200",foo="",handler="query_range",method="get"} 15663
http_requests_total{baz="",code="200",foo="",handler="rules",method="get"} 7
http_requests_total{baz="",code="200",foo="",handler="series",method="get"} 221
http_requests_total{baz="",code="200",foo="",handler="static",method="get"} 1647
http_requests_total{baz="",code="200",foo="",handler="status",method="get"} 12
http_requests_total{baz="",code="200",foo="bar",handler="",method="get"} 325
http_requests_total{baz="",code="206",foo="",handler="static",method="get"} 2
http_requests_total{baz="",code="400",foo="",handler="query_range",method="get"} 40
http_requests_total{baz="",code="503",foo="",handler="query_range",method="get"} 3
http_requests_total{baz="bar",code="200",foo="",handler="",method="get"} 93
# HELP node_textfile_mtime Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime gauge
node_textfile_mtime{file="metrics.prom"} 1
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0

View file

@ -0,0 +1,24 @@
# HELP http_requests_total Total number of HTTP requests made.
# TYPE http_requests_total counter
http_requests_total{code="200",handler="alerts",method="get"} 35
http_requests_total{code="200",handler="config",method="get"} 8
http_requests_total{code="200",method="get", foo="bar"} 325
http_requests_total{code="200",handler="flags",method="get"} 18
http_requests_total{code="200",handler="graph",method="get"} 89
http_requests_total{code="200",method="get", baz="bar"} 93
http_requests_total{code="200",handler="prometheus",method="get"} 17051
http_requests_total{code="200",handler="query",method="get"} 401
http_requests_total{code="200",handler="query_range",method="get"} 15663
http_requests_total{code="200",handler="rules",method="get"} 7
http_requests_total{code="200",handler="series",method="get"} 221
http_requests_total{code="200",handler="static",method="get"} 1647
http_requests_total{code="200",handler="status",method="get"} 12
http_requests_total{code="200",method="get"} 11
http_requests_total{code="206",handler="static",method="get"} 2
http_requests_total{code="400",handler="query_range",method="get"} 40
http_requests_total{code="503",handler="query_range",method="get"} 3
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
go_goroutines{foo="bar"} 229
go_goroutines 20

View file

@ -1,8 +1,3 @@
name: "node_textfile_scrape_error" # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
help: "1 if there was an error opening or reading a file, 0 otherwise" # TYPE node_textfile_scrape_error gauge
type: GAUGE node_textfile_scrape_error 0
metric: <
gauge: <
value: 0
>
>

View file

@ -1,8 +1,3 @@
name: "node_textfile_scrape_error" # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
help: "1 if there was an error opening or reading a file, 0 otherwise" # TYPE node_textfile_scrape_error gauge
type: GAUGE node_textfile_scrape_error 1
metric: <
gauge: <
value: 1
>
>

View file

@ -0,0 +1,28 @@
# HELP event_duration_seconds_total Query timings
# TYPE event_duration_seconds_total summary
event_duration_seconds_total{baz="inner_eval",quantile="0.5"} 1.073e-06
event_duration_seconds_total{baz="inner_eval",quantile="0.9"} 1.928e-06
event_duration_seconds_total{baz="inner_eval",quantile="0.99"} 4.35e-06
event_duration_seconds_total_sum{baz="inner_eval"} 1.8652166505091474e+06
event_duration_seconds_total_count{baz="inner_eval"} 1.492355615e+09
event_duration_seconds_total{baz="prepare_time",quantile="0.5"} 4.283e-06
event_duration_seconds_total{baz="prepare_time",quantile="0.9"} 7.796e-06
event_duration_seconds_total{baz="prepare_time",quantile="0.99"} 2.2083e-05
event_duration_seconds_total_sum{baz="prepare_time"} 840923.7919437207
event_duration_seconds_total_count{baz="prepare_time"} 1.492355814e+09
event_duration_seconds_total{baz="result_append",quantile="0.5"} 1.566e-06
event_duration_seconds_total{baz="result_append",quantile="0.9"} 3.223e-06
event_duration_seconds_total{baz="result_append",quantile="0.99"} 6.53e-06
event_duration_seconds_total_sum{baz="result_append"} 4.404109951000078
event_duration_seconds_total_count{baz="result_append"} 1.427647e+06
event_duration_seconds_total{baz="result_sort",quantile="0.5"} 1.847e-06
event_duration_seconds_total{baz="result_sort",quantile="0.9"} 2.975e-06
event_duration_seconds_total{baz="result_sort",quantile="0.99"} 4.08e-06
event_duration_seconds_total_sum{baz="result_sort"} 3.4123187829998307
event_duration_seconds_total_count{baz="result_sort"} 1.427647e+06
# HELP node_textfile_mtime Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime gauge
node_textfile_mtime{file="metrics.prom"} 1
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0

View file

@ -0,0 +1,22 @@
# HELP event_duration_seconds_total Query timings
# TYPE event_duration_seconds_total summary
event_duration_seconds_total{baz="inner_eval",quantile="0.5"} 1.073e-06
event_duration_seconds_total{baz="inner_eval",quantile="0.9"} 1.928e-06
event_duration_seconds_total{baz="inner_eval",quantile="0.99"} 4.35e-06
event_duration_seconds_total_sum{baz="inner_eval"} 1.8652166505091474e+06
event_duration_seconds_total_count{baz="inner_eval"} 1.492355615e+09
event_duration_seconds_total{baz="prepare_time",quantile="0.5"} 4.283e-06
event_duration_seconds_total{baz="prepare_time",quantile="0.9"} 7.796e-06
event_duration_seconds_total{baz="prepare_time",quantile="0.99"} 2.2083e-05
event_duration_seconds_total_sum{baz="prepare_time"} 840923.7919437207
event_duration_seconds_total_count{baz="prepare_time"} 1.492355814e+09
event_duration_seconds_total{baz="result_append",quantile="0.5"} 1.566e-06
event_duration_seconds_total{baz="result_append",quantile="0.9"} 3.223e-06
event_duration_seconds_total{baz="result_append",quantile="0.99"} 6.53e-06
event_duration_seconds_total_sum{baz="result_append"} 4.404109951000078
event_duration_seconds_total_count{baz="result_append"} 1.427647e+06
event_duration_seconds_total{baz="result_sort",quantile="0.5"} 1.847e-06
event_duration_seconds_total{baz="result_sort",quantile="0.9"} 2.975e-06
event_duration_seconds_total{baz="result_sort",quantile="0.99"} 4.08e-06
event_duration_seconds_total_sum{baz="result_sort"} 3.4123187829998307
event_duration_seconds_total_count{baz="result_sort"} 1.427647e+06

View file

@ -0,0 +1,20 @@
# HELP node_textfile_mtime Unixtime mtime of textfiles successfully read.
# TYPE node_textfile_mtime gauge
node_textfile_mtime{file="metrics.prom"} 1
# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
# TYPE node_textfile_scrape_error gauge
node_textfile_scrape_error 0
# HELP prometheus_rule_evaluation_duration_seconds The duration for a rule to execute.
# TYPE prometheus_rule_evaluation_duration_seconds summary
prometheus_rule_evaluation_duration_seconds{handler="",rule_type="alerting",quantile="0.9"} 0.001765451
prometheus_rule_evaluation_duration_seconds{handler="",rule_type="alerting",quantile="0.99"} 0.018672076
prometheus_rule_evaluation_duration_seconds_sum{handler="",rule_type="alerting"} 214.85081044700146
prometheus_rule_evaluation_duration_seconds_count{handler="",rule_type="alerting"} 185209
prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.5"} 4.3132e-05
prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.9"} 8.9295e-05
prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.99"} 0.000193657
prometheus_rule_evaluation_duration_seconds_sum{handler="",rule_type="recording"} 185091.01317759082
prometheus_rule_evaluation_duration_seconds_count{handler="",rule_type="recording"} 1.0020195e+08
prometheus_rule_evaluation_duration_seconds{handler="foo",rule_type="alerting",quantile="0.5"} 0.000571464
prometheus_rule_evaluation_duration_seconds_sum{handler="foo",rule_type="alerting"} 0
prometheus_rule_evaluation_duration_seconds_count{handler="foo",rule_type="alerting"} 0

View file

@ -0,0 +1,12 @@
# HELP prometheus_rule_evaluation_duration_seconds The duration for a rule to execute.
# TYPE prometheus_rule_evaluation_duration_seconds summary
prometheus_rule_evaluation_duration_seconds{rule_type="alerting",quantile="0.5", handler="foo"} 0.000571464
prometheus_rule_evaluation_duration_seconds{rule_type="alerting",quantile="0.9"} 0.001765451
prometheus_rule_evaluation_duration_seconds{rule_type="alerting",quantile="0.99"} 0.018672076
prometheus_rule_evaluation_duration_seconds_sum{rule_type="alerting"} 214.85081044700146
prometheus_rule_evaluation_duration_seconds_count{rule_type="alerting"} 185209
prometheus_rule_evaluation_duration_seconds{rule_type="recording",quantile="0.5"} 4.3132e-05
prometheus_rule_evaluation_duration_seconds{rule_type="recording",quantile="0.9"} 8.9295e-05
prometheus_rule_evaluation_duration_seconds{rule_type="recording",quantile="0.99"} 0.000193657
prometheus_rule_evaluation_duration_seconds_sum{rule_type="recording"} 185091.01317759082
prometheus_rule_evaluation_duration_seconds_count{rule_type="recording"} 1.0020195e+08

View file

@ -1,79 +1,19 @@
name: "node_textfile_mtime" # HELP node_textfile_mtime Unixtime mtime of textfiles successfully read.
help: "Unixtime mtime of textfiles successfully read." # TYPE node_textfile_mtime gauge
type: GAUGE node_textfile_mtime{file="metrics1.prom"} 1
metric: < node_textfile_mtime{file="metrics2.prom"} 1
label: < # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise
name: "file" # TYPE node_textfile_scrape_error gauge
value: "metrics1.prom" node_textfile_scrape_error 0
> # HELP testmetric1_1 Metric read from fixtures/textfile/two_metric_files/metrics1.prom
gauge: < # TYPE testmetric1_1 untyped
value: 1 testmetric1_1{foo="bar"} 10
> # HELP testmetric1_2 Metric read from fixtures/textfile/two_metric_files/metrics1.prom
> # TYPE testmetric1_2 untyped
metric: < testmetric1_2{foo="baz"} 20
label: < # HELP testmetric2_1 Metric read from fixtures/textfile/two_metric_files/metrics2.prom
name: "file" # TYPE testmetric2_1 untyped
value: "metrics2.prom" testmetric2_1{foo="bar"} 30
> # HELP testmetric2_2 Metric read from fixtures/textfile/two_metric_files/metrics2.prom
gauge: < # TYPE testmetric2_2 untyped
value: 2 testmetric2_2{foo="baz"} 40
>
>
name: "node_textfile_scrape_error"
help: "1 if there was an error opening or reading a file, 0 otherwise"
type: GAUGE
metric: <
gauge: <
value: 0
>
>
name: "testmetric1_1"
help: "Metric read from fixtures/textfile/two_metric_files/metrics1.prom"
type: UNTYPED
metric: <
label: <
name: "foo"
value: "bar"
>
untyped: <
value: 10
>
>
name: "testmetric1_2"
help: "Metric read from fixtures/textfile/two_metric_files/metrics1.prom"
type: UNTYPED
metric: <
label: <
name: "foo"
value: "baz"
>
untyped: <
value: 20
>
>
name: "testmetric2_1"
help: "Metric read from fixtures/textfile/two_metric_files/metrics2.prom"
type: UNTYPED
metric: <
label: <
name: "foo"
value: "bar"
>
untyped: <
value: 30
>
timestamp_ms: 1441205977284
>
name: "testmetric2_2"
help: "Metric read from fixtures/textfile/two_metric_files/metrics2.prom"
type: UNTYPED
metric: <
label: <
name: "foo"
value: "baz"
>
untyped: <
value: 40
>
timestamp_ms: 1441205977284
>

View file

@ -25,21 +25,28 @@ import (
"sync" "sync"
"time" "time"
"github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt" "github.com/prometheus/common/expfmt"
"github.com/prometheus/common/log" "github.com/prometheus/common/log"
"gopkg.in/alecthomas/kingpin.v2" kingpin "gopkg.in/alecthomas/kingpin.v2"
) )
var ( var (
textFileDirectory = kingpin.Flag("collector.textfile.directory", "Directory to read text files with metrics from.").Default("").String() textFileDirectory = kingpin.Flag("collector.textfile.directory", "Directory to read text files with metrics from.").Default("").String()
textFileAddOnce sync.Once textFileAddOnce sync.Once
mtimeDesc = prometheus.NewDesc(
"node_textfile_mtime",
"Unixtime mtime of textfiles successfully read.",
[]string{"file"},
nil,
)
) )
type textFileCollector struct { type textFileCollector struct {
path string path string
// Only set for testing to get predictable output.
mtime *float64
} }
func init() { func init() {
@ -52,31 +59,129 @@ func NewTextFileCollector() (Collector, error) {
c := &textFileCollector{ c := &textFileCollector{
path: *textFileDirectory, path: *textFileDirectory,
} }
return c, nil
}
if c.path == "" { func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Metric) {
// This collector is enabled by default, so do not fail if var valType prometheus.ValueType
// the flag is not passed. var val float64
log.Infof("No directory specified, see --collector.textfile.directory")
} else { allLabelNames := map[string]struct{}{}
textFileAddOnce.Do(func() { for _, metric := range metricFamily.Metric {
prometheus.DefaultGatherer = prometheus.Gatherers{ labels := metric.GetLabel()
prometheus.DefaultGatherer, for _, label := range labels {
prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { return c.parseTextFiles(), nil }), if _, ok := allLabelNames[label.GetName()]; !ok {
allLabelNames[label.GetName()] = struct{}{}
} }
}) }
} }
return c, nil for _, metric := range metricFamily.Metric {
labels := metric.GetLabel()
var names []string
var values []string
for _, label := range labels {
names = append(names, label.GetName())
values = append(values, label.GetValue())
}
for k := range allLabelNames {
present := false
for _, name := range names {
if k == name {
present = true
break
}
}
if present == false {
names = append(names, k)
values = append(values, "")
}
}
metricType := metricFamily.GetType()
switch metricType {
case dto.MetricType_COUNTER:
valType = prometheus.CounterValue
val = metric.Counter.GetValue()
case dto.MetricType_GAUGE:
valType = prometheus.GaugeValue
val = metric.Gauge.GetValue()
case dto.MetricType_UNTYPED:
valType = prometheus.UntypedValue
val = metric.Untyped.GetValue()
case dto.MetricType_SUMMARY:
quantiles := map[float64]float64{}
for _, q := range metric.Summary.Quantile {
quantiles[q.GetQuantile()] = q.GetValue()
}
ch <- prometheus.MustNewConstSummary(
prometheus.NewDesc(
*metricFamily.Name,
metricFamily.GetHelp(),
names, nil,
),
metric.Summary.GetSampleCount(),
metric.Summary.GetSampleSum(),
quantiles, values...,
)
case dto.MetricType_HISTOGRAM:
buckets := map[float64]uint64{}
for _, b := range metric.Histogram.Bucket {
buckets[b.GetUpperBound()] = b.GetCumulativeCount()
}
ch <- prometheus.MustNewConstHistogram(
prometheus.NewDesc(
*metricFamily.Name,
metricFamily.GetHelp(),
names, nil,
),
metric.Histogram.GetSampleCount(),
metric.Histogram.GetSampleSum(),
buckets, values...,
)
default:
panic("unknown metric type")
}
if metricType == dto.MetricType_GAUGE || metricType == dto.MetricType_COUNTER || metricType == dto.MetricType_UNTYPED {
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(
*metricFamily.Name,
metricFamily.GetHelp(),
names, nil,
),
valType, val, values...,
)
}
}
}
func (c *textFileCollector) exportMTimes(mtimes map[string]time.Time, ch chan<- prometheus.Metric) {
// Export the mtimes of the successful files.
if len(mtimes) > 0 {
// Sorting is needed for predictable output comparison in tests.
filenames := make([]string, 0, len(mtimes))
for filename := range mtimes {
filenames = append(filenames, filename)
}
sort.Strings(filenames)
for _, filename := range filenames {
mtime := float64(mtimes[filename].UnixNano() / 1e9)
if c.mtime != nil {
mtime = *c.mtime
}
ch <- prometheus.MustNewConstMetric(mtimeDesc, prometheus.GaugeValue, mtime, filename)
}
}
} }
// Update implements the Collector interface. // Update implements the Collector interface.
func (c *textFileCollector) Update(ch chan<- prometheus.Metric) error { func (c *textFileCollector) Update(ch chan<- prometheus.Metric) error {
return nil
}
func (c *textFileCollector) parseTextFiles() []*dto.MetricFamily {
error := 0.0 error := 0.0
var metricFamilies []*dto.MetricFamily
mtimes := map[string]time.Time{} mtimes := map[string]time.Time{}
// Iterate over files and accumulate their metrics. // Iterate over files and accumulate their metrics.
@ -112,52 +217,20 @@ func (c *textFileCollector) parseTextFiles() []*dto.MetricFamily {
help := fmt.Sprintf("Metric read from %s", path) help := fmt.Sprintf("Metric read from %s", path)
mf.Help = &help mf.Help = &help
} }
metricFamilies = append(metricFamilies, mf) convertMetricFamily(mf, ch)
} }
} }
// Export the mtimes of the successful files. c.exportMTimes(mtimes, ch)
if len(mtimes) > 0 {
mtimeMetricFamily := dto.MetricFamily{
Name: proto.String("node_textfile_mtime"),
Help: proto.String("Unixtime mtime of textfiles successfully read."),
Type: dto.MetricType_GAUGE.Enum(),
Metric: []*dto.Metric{},
}
// Sorting is needed for predictable output comparison in tests.
filenames := make([]string, 0, len(mtimes))
for filename := range mtimes {
filenames = append(filenames, filename)
}
sort.Strings(filenames)
for _, filename := range filenames {
mtimeMetricFamily.Metric = append(mtimeMetricFamily.Metric,
&dto.Metric{
Label: []*dto.LabelPair{
{
Name: proto.String("file"),
Value: proto.String(filename),
},
},
Gauge: &dto.Gauge{Value: proto.Float64(float64(mtimes[filename].UnixNano()) / 1e9)},
},
)
}
metricFamilies = append(metricFamilies, &mtimeMetricFamily)
}
// Export if there were errors. // Export if there were errors.
metricFamilies = append(metricFamilies, &dto.MetricFamily{ ch <- prometheus.MustNewConstMetric(
Name: proto.String("node_textfile_scrape_error"), prometheus.NewDesc(
Help: proto.String("1 if there was an error opening or reading a file, 0 otherwise"), "node_textfile_scrape_error",
Type: dto.MetricType_GAUGE.Enum(), "1 if there was an error opening or reading a file, 0 otherwise",
Metric: []*dto.Metric{ nil, nil,
{ ),
Gauge: &dto.Gauge{Value: &error}, prometheus.GaugeValue, error,
}, )
}, return nil
})
return metricFamilies
} }

View file

@ -14,17 +14,38 @@
package collector package collector
import ( import (
"fmt"
"io/ioutil" "io/ioutil"
"sort" "net/http"
"strings" "net/http/httptest"
"testing" "testing"
"github.com/golang/protobuf/proto" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/log" "github.com/prometheus/common/log"
"gopkg.in/alecthomas/kingpin.v2" kingpin "gopkg.in/alecthomas/kingpin.v2"
) )
func TestParseTextFiles(t *testing.T) { type collectorAdapter struct {
Collector
}
// Describe implements the prometheus.Collector interface.
func (a collectorAdapter) Describe(ch chan<- *prometheus.Desc) {
// We have to send *some* metric in Describe, but we don't know which ones
// we're going to get, so just send a dummy metric.
ch <- prometheus.NewDesc("dummy_metric", "Dummy metric.", nil, nil)
}
// Collect implements the prometheus.Collector interface.
func (a collectorAdapter) Collect(ch chan<- prometheus.Metric) {
err := a.Update(ch)
if err != nil {
panic(fmt.Sprintf("failed to update collector: %v", err))
}
}
func TestTextfileCollector(t *testing.T) {
tests := []struct { tests := []struct {
path string path string
out string out string
@ -41,11 +62,37 @@ func TestParseTextFiles(t *testing.T) {
path: "fixtures/textfile/nonexistent_path", path: "fixtures/textfile/nonexistent_path",
out: "fixtures/textfile/nonexistent_path.out", out: "fixtures/textfile/nonexistent_path.out",
}, },
{
path: "fixtures/textfile/different_metric_types",
out: "fixtures/textfile/different_metric_types.out",
},
{
path: "fixtures/textfile/inconsistent_metrics",
out: "fixtures/textfile/inconsistent_metrics.out",
},
{
path: "fixtures/textfile/histogram",
out: "fixtures/textfile/histogram.out",
},
{
path: "fixtures/textfile/histogram_extra_dimension",
out: "fixtures/textfile/histogram_extra_dimension.out",
},
{
path: "fixtures/textfile/summary",
out: "fixtures/textfile/summary.out",
},
{
path: "fixtures/textfile/summary_extra_dimension",
out: "fixtures/textfile/summary_extra_dimension.out",
},
} }
for i, test := range tests { for i, test := range tests {
c := textFileCollector{ mtime := 1.0
path: test.path, c := &textFileCollector{
path: test.path,
mtime: &mtime,
} }
// Suppress a log message about `nonexistent_path` not existing, this is // Suppress a log message about `nonexistent_path` not existing, this is
@ -56,17 +103,12 @@ func TestParseTextFiles(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
mfs := c.parseTextFiles() registry := prometheus.NewRegistry()
textMFs := make([]string, 0, len(mfs)) registry.MustRegister(collectorAdapter{c})
for _, mf := range mfs {
if mf.GetName() == "node_textfile_mtime" { rw := httptest.NewRecorder()
mf.GetMetric()[0].GetGauge().Value = proto.Float64(1) promhttp.HandlerFor(registry, promhttp.HandlerOpts{}).ServeHTTP(rw, &http.Request{})
mf.GetMetric()[1].GetGauge().Value = proto.Float64(2) got := string(rw.Body.String())
}
textMFs = append(textMFs, proto.MarshalTextString(mf))
}
sort.Strings(textMFs)
got := strings.Join(textMFs, "")
want, err := ioutil.ReadFile(test.out) want, err := ioutil.ReadFile(test.out)
if err != nil { if err != nil {
@ -74,7 +116,7 @@ func TestParseTextFiles(t *testing.T) {
} }
if string(want) != got { if string(want) != got {
t.Fatalf("%d. want:\n\n%s\n\ngot:\n\n%s", i, string(want), got) t.Fatalf("%d.%q want:\n\n%s\n\ngot:\n\n%s", i, test.path, string(want), got)
} }
} }
} }