mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-25 21:54:10 -08:00
Update mixin dashboards and alerts for new remote write label names.
Signed-off-by: Callum Styan <callumstyan@gmail.com>
This commit is contained in:
parent
9a21fdcd1b
commit
5400e71b91
|
@ -187,7 +187,7 @@
|
||||||
},
|
},
|
||||||
annotations: {
|
annotations: {
|
||||||
summary: 'Prometheus fails to send samples to remote storage.',
|
summary: 'Prometheus fails to send samples to remote storage.',
|
||||||
description: 'Prometheus %(prometheusName)s failed to send {{ printf "%%.1f" $value }}%% of the samples to {{ if $labels.queue }}{{ $labels.queue }}{{ else }}{{ $labels.url }}{{ end }}.' % $._config,
|
description: 'Prometheus %(prometheusName)s failed to send {{ printf "%%.1f" $value }}%% of the samples to {{ $labels.remote_name}}:{{ $labels.url }}' % $._config,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -208,7 +208,7 @@
|
||||||
},
|
},
|
||||||
annotations: {
|
annotations: {
|
||||||
summary: 'Prometheus remote write is behind.',
|
summary: 'Prometheus remote write is behind.',
|
||||||
description: 'Prometheus %(prometheusName)s remote write is {{ printf "%%.1f" $value }}s behind for {{ if $labels.queue }}{{ $labels.queue }}{{ else }}{{ $labels.url }}{{ end }}.' % $._config,
|
description: 'Prometheus %(prometheusName)s remote write is {{ printf "%%.1f" $value }}s behind for {{ $labels.remote_name}}:{{ $labels.url }}.' % $._config,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -228,7 +228,7 @@
|
||||||
},
|
},
|
||||||
annotations: {
|
annotations: {
|
||||||
summary: 'Prometheus remote write desired shards calculation wants to run more than configured max shards.',
|
summary: 'Prometheus remote write desired shards calculation wants to run more than configured max shards.',
|
||||||
description: 'Prometheus %(prometheusName)s remote write desired shards calculation wants to run {{ $value }} shards, which is more than the max of {{ printf `prometheus_remote_storage_shards_max{instance="%%s",%(prometheusSelector)s}` $labels.instance | query | first | value }}.' % $._config,
|
description: 'Prometheus %(prometheusName)s remote write desired shards calculation wants to run {{ $value }} shards for queue {{ $labels.remote_name}}:{{ $labels.url }}, which is more than the max of {{ printf `prometheus_remote_storage_shards_max{instance="%%s",%(prometheusSelector)s}` $labels.instance | query | first | value }}.' % $._config,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -110,10 +110,10 @@ local template = grafana.template;
|
||||||
(
|
(
|
||||||
prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}
|
prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}
|
||||||
-
|
-
|
||||||
ignoring(queue) group_right(instance) prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance"}
|
ignoring(remote_name, url) group_right(instance) prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance"}
|
||||||
)
|
)
|
||||||
|||,
|
|||,
|
||||||
legendFormat='{{cluster}}:{{instance}}-{{queue}}',
|
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}',
|
||||||
));
|
));
|
||||||
|
|
||||||
local timestampComparisonRate =
|
local timestampComparisonRate =
|
||||||
|
@ -127,10 +127,10 @@ local template = grafana.template;
|
||||||
(
|
(
|
||||||
rate(prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}[5m])
|
rate(prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}[5m])
|
||||||
-
|
-
|
||||||
ignoring (queue) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance"}[5m])
|
ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance"}[5m])
|
||||||
)
|
)
|
||||||
|||,
|
|||,
|
||||||
legendFormat='{{cluster}}:{{instance}}-{{queue}}',
|
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}',
|
||||||
));
|
));
|
||||||
|
|
||||||
local samplesRate =
|
local samplesRate =
|
||||||
|
@ -144,11 +144,11 @@ local template = grafana.template;
|
||||||
rate(
|
rate(
|
||||||
prometheus_remote_storage_samples_in_total{cluster=~"$cluster", instance=~"$instance"}[5m])
|
prometheus_remote_storage_samples_in_total{cluster=~"$cluster", instance=~"$instance"}[5m])
|
||||||
-
|
-
|
||||||
ignoring(queue) group_right(instance) rate(prometheus_remote_storage_succeeded_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m])
|
ignoring(remote_name, url) group_right(instance) rate(prometheus_remote_storage_succeeded_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m])
|
||||||
-
|
-
|
||||||
rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m])
|
rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m])
|
||||||
|||,
|
|||,
|
||||||
legendFormat='{{cluster}}:{{instance}}-{{queue}}'
|
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
|
||||||
));
|
));
|
||||||
|
|
||||||
local currentShards =
|
local currentShards =
|
||||||
|
@ -160,7 +160,7 @@ local template = grafana.template;
|
||||||
)
|
)
|
||||||
.addTarget(prometheus.target(
|
.addTarget(prometheus.target(
|
||||||
'prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance"}',
|
'prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance"}',
|
||||||
legendFormat='{{cluster}}:{{instance}}-{{queue}}'
|
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
|
||||||
));
|
));
|
||||||
|
|
||||||
local maxShards =
|
local maxShards =
|
||||||
|
@ -171,7 +171,7 @@ local template = grafana.template;
|
||||||
)
|
)
|
||||||
.addTarget(prometheus.target(
|
.addTarget(prometheus.target(
|
||||||
'prometheus_remote_storage_shards_max{cluster=~"$cluster", instance=~"$instance"}',
|
'prometheus_remote_storage_shards_max{cluster=~"$cluster", instance=~"$instance"}',
|
||||||
legendFormat='{{cluster}}:{{instance}}-{{queue}}'
|
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
|
||||||
));
|
));
|
||||||
|
|
||||||
local minShards =
|
local minShards =
|
||||||
|
@ -182,7 +182,7 @@ local template = grafana.template;
|
||||||
)
|
)
|
||||||
.addTarget(prometheus.target(
|
.addTarget(prometheus.target(
|
||||||
'prometheus_remote_storage_shards_min{cluster=~"$cluster", instance=~"$instance"}',
|
'prometheus_remote_storage_shards_min{cluster=~"$cluster", instance=~"$instance"}',
|
||||||
legendFormat='{{cluster}}:{{instance}}-{{queue}}'
|
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
|
||||||
));
|
));
|
||||||
|
|
||||||
local desiredShards =
|
local desiredShards =
|
||||||
|
@ -193,7 +193,7 @@ local template = grafana.template;
|
||||||
)
|
)
|
||||||
.addTarget(prometheus.target(
|
.addTarget(prometheus.target(
|
||||||
'prometheus_remote_storage_shards_desired{cluster=~"$cluster", instance=~"$instance"}',
|
'prometheus_remote_storage_shards_desired{cluster=~"$cluster", instance=~"$instance"}',
|
||||||
legendFormat='{{cluster}}:{{instance}}-{{queue}}'
|
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
|
||||||
));
|
));
|
||||||
|
|
||||||
local shardsCapacity =
|
local shardsCapacity =
|
||||||
|
@ -204,7 +204,7 @@ local template = grafana.template;
|
||||||
)
|
)
|
||||||
.addTarget(prometheus.target(
|
.addTarget(prometheus.target(
|
||||||
'prometheus_remote_storage_shard_capacity{cluster=~"$cluster", instance=~"$instance"}',
|
'prometheus_remote_storage_shard_capacity{cluster=~"$cluster", instance=~"$instance"}',
|
||||||
legendFormat='{{cluster}}:{{instance}}-{{queue}}'
|
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
|
||||||
));
|
));
|
||||||
|
|
||||||
|
|
||||||
|
@ -216,7 +216,7 @@ local template = grafana.template;
|
||||||
)
|
)
|
||||||
.addTarget(prometheus.target(
|
.addTarget(prometheus.target(
|
||||||
'prometheus_remote_storage_pending_samples{cluster=~"$cluster", instance=~"$instance"}',
|
'prometheus_remote_storage_pending_samples{cluster=~"$cluster", instance=~"$instance"}',
|
||||||
legendFormat='{{cluster}}:{{instance}}-{{queue}}'
|
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
|
||||||
));
|
));
|
||||||
|
|
||||||
local walSegment =
|
local walSegment =
|
||||||
|
@ -240,7 +240,7 @@ local template = grafana.template;
|
||||||
)
|
)
|
||||||
.addTarget(prometheus.target(
|
.addTarget(prometheus.target(
|
||||||
'prometheus_wal_watcher_current_segment{cluster=~"$cluster", instance=~"$instance"}',
|
'prometheus_wal_watcher_current_segment{cluster=~"$cluster", instance=~"$instance"}',
|
||||||
legendFormat='{{cluster}}:{{instance}}-{{queue}}'
|
legendFormat='{{cluster}}:{{instance}} {{consumer}}'
|
||||||
));
|
));
|
||||||
|
|
||||||
local droppedSamples =
|
local droppedSamples =
|
||||||
|
@ -251,7 +251,7 @@ local template = grafana.template;
|
||||||
)
|
)
|
||||||
.addTarget(prometheus.target(
|
.addTarget(prometheus.target(
|
||||||
'rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m])',
|
'rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m])',
|
||||||
legendFormat='{{cluster}}:{{instance}}-{{queue}}'
|
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
|
||||||
));
|
));
|
||||||
|
|
||||||
local failedSamples =
|
local failedSamples =
|
||||||
|
@ -262,7 +262,7 @@ local template = grafana.template;
|
||||||
)
|
)
|
||||||
.addTarget(prometheus.target(
|
.addTarget(prometheus.target(
|
||||||
'rate(prometheus_remote_storage_failed_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m])',
|
'rate(prometheus_remote_storage_failed_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m])',
|
||||||
legendFormat='{{cluster}}:{{instance}}-{{queue}}'
|
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
|
||||||
));
|
));
|
||||||
|
|
||||||
local retriedSamples =
|
local retriedSamples =
|
||||||
|
@ -273,7 +273,7 @@ local template = grafana.template;
|
||||||
)
|
)
|
||||||
.addTarget(prometheus.target(
|
.addTarget(prometheus.target(
|
||||||
'rate(prometheus_remote_storage_retried_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m])',
|
'rate(prometheus_remote_storage_retried_samples_total{cluster=~"$cluster", instance=~"$instance"}[5m])',
|
||||||
legendFormat='{{cluster}}:{{instance}}-{{queue}}'
|
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
|
||||||
));
|
));
|
||||||
|
|
||||||
local enqueueRetries =
|
local enqueueRetries =
|
||||||
|
@ -284,7 +284,7 @@ local template = grafana.template;
|
||||||
)
|
)
|
||||||
.addTarget(prometheus.target(
|
.addTarget(prometheus.target(
|
||||||
'rate(prometheus_remote_storage_enqueue_retries_total{cluster=~"$cluster", instance=~"$instance"}[5m])',
|
'rate(prometheus_remote_storage_enqueue_retries_total{cluster=~"$cluster", instance=~"$instance"}[5m])',
|
||||||
legendFormat='{{cluster}}:{{instance}}-{{queue}}'
|
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
|
||||||
));
|
));
|
||||||
|
|
||||||
dashboard.new('Prometheus Remote Write',
|
dashboard.new('Prometheus Remote Write',
|
||||||
|
@ -331,9 +331,9 @@ local template = grafana.template;
|
||||||
)
|
)
|
||||||
.addTemplate(
|
.addTemplate(
|
||||||
template.new(
|
template.new(
|
||||||
'queue',
|
'url',
|
||||||
'$datasource',
|
'$datasource',
|
||||||
'label_values(prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance"}, queue)' % $._config,
|
'label_values(prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance"}, url)' % $._config,
|
||||||
refresh='time',
|
refresh='time',
|
||||||
includeAll=true,
|
includeAll=true,
|
||||||
)
|
)
|
||||||
|
|
Loading…
Reference in a new issue