mirror of
https://github.com/prometheus/prometheus.git
synced 2025-02-21 03:16:00 -08:00
remote write 2.0: sync with main
branch (#13510)
* consoles: exclude iowait and steal from CPU Utilisation 'iowait' and 'steal' indicate specific idle/wait states, which shouldn't be counted into CPU Utilisation. Also see https://github.com/prometheus-operator/kube-prometheus/pull/796 and https://github.com/kubernetes-monitoring/kubernetes-mixin/pull/667. Per the iostat man page: %idle Show the percentage of time that the CPU or CPUs were idle and the system did not have an outstanding disk I/O request. %iowait Show the percentage of time that the CPU or CPUs were idle during which the system had an outstanding disk I/O request. %steal Show the percentage of time spent in involuntary wait by the virtual CPU or CPUs while the hypervisor was servicing another virtual processor. Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com> * tsdb: shrink txRing with smaller integers 4 billion active transactions ought to be enough for anyone. Signed-off-by: Bryan Boreham <bjboreham@gmail.com> * tsdb: create isolation transaction slice on demand When Prometheus restarts it creates every series read in from the WAL, but many of those series will be finished, and never receive any more samples. By defering allocation of the txRing slice to when it is first needed, we save 32 bytes per stale series. Signed-off-by: Bryan Boreham <bjboreham@gmail.com> * add cluster variable to Overview dashboard Signed-off-by: Erik Sommer <ersotech@posteo.de> * promql: simplify Native Histogram arithmetics Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com> * Cut 2.49.0-rc.0 (#13270) * Cut 2.49.0-rc.0 Signed-off-by: bwplotka <bwplotka@gmail.com> * Removed the duplicate. Signed-off-by: bwplotka <bwplotka@gmail.com> --------- Signed-off-by: bwplotka <bwplotka@gmail.com> * Add unit protobuf parser Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it> * Go on adding protobuf parsing for unit Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it> * ui: create a reproduction for https://github.com/prometheus/prometheus/issues/13292 Signed-off-by: machine424 <ayoubmrini424@gmail.com> * Get conditional right Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it> * Get VM Scale Set NIC (#13283) Calling `*armnetwork.InterfacesClient.Get()` doesn't work for Scale Set VM NIC, because these use a different Resource ID format. Use `*armnetwork.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface()` instead. This needs both the scale set name and the instance ID, so add an `InstanceID` field to the `virtualMachine` struct. `InstanceID` is empty for a VM that isn't a ScaleSetVM. Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com> * Cut v2.49.0-rc.1 Signed-off-by: bwplotka <bwplotka@gmail.com> * Delete debugging lines, amend error message for unit Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it> * Correct order in error message Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it> * Consider storage.ErrTooOldSample as non-retryable Signed-off-by: Daniel Kerbel <nmdanny@gmail.com> * scrape_test.go: Increase scrape interval in TestScrapeLoopCache to reduce potential flakiness Signed-off-by: machine424 <ayoubmrini424@gmail.com> * Avoid creating string for suffix, consider counters without _total suffix Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it> * build(deps): bump github.com/prometheus/client_golang Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.17.0 to 1.18.0. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> * build(deps): bump actions/setup-node from 3.8.1 to 4.0.1 Bumps [actions/setup-node](https://github.com/actions/setup-node) from 3.8.1 to 4.0.1. - [Release notes](https://github.com/actions/setup-node/releases) - [Commits](5e21ff4d9b...b39b52d121
) --- updated-dependencies: - dependency-name: actions/setup-node dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] <support@github.com> * scripts: sort file list in embed directive Otherwise the resulting string depends on find, which afaict depends on the underlying filesystem. A stable file list make it easier to detect UI changes in downstreams that need to track UI assets. Signed-off-by: Jan Fajerski <jfajersk@redhat.com> * Fix DataTableProps['data'] for resultType string Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com> * Fix handling of scalar and string in isHeatmapData Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com> * build(deps): bump github.com/influxdata/influxdb Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.2 to 1.11.4. - [Release notes](https://github.com/influxdata/influxdb/releases) - [Commits](https://github.com/influxdata/influxdb/compare/v1.11.2...v1.11.4) --- updated-dependencies: - dependency-name: github.com/influxdata/influxdb dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> * build(deps): bump github.com/prometheus/prometheus Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.48.0 to 0.48.1. - [Release notes](https://github.com/prometheus/prometheus/releases) - [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/prometheus/compare/v0.48.0...v0.48.1) --- updated-dependencies: - dependency-name: github.com/prometheus/prometheus dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> * Bump client_golang to v1.18.0 (#13373) Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Drop old inmemory samples (#13002) * Drop old inmemory samples Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Avoid copying timeseries when the feature is disabled Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Run gofmt Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Clarify docs Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Add more logging info Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Remove loggers Signed-off-by: Marc Tuduri <marctc@protonmail.com> * optimize function and add tests Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Simplify filter Signed-off-by: Marc Tuduri <marctc@protonmail.com> * rename var Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Update help info from metrics Signed-off-by: Marc Tuduri <marctc@protonmail.com> * use metrics to keep track of drop elements during buildWriteRequest Signed-off-by: Marc Tuduri <marctc@protonmail.com> * rename var in tests Signed-off-by: Marc Tuduri <marctc@protonmail.com> * pass time.Now as parameter Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Change buildwriterequest during retries Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Revert "Remove loggers" This reverts commit 54f91dfcae20488944162335ab4ad8be459df1ab. Signed-off-by: Marc Tuduri <marctc@protonmail.com> * use log level debug for loggers Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Fix linter Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Remove noisy debug-level logs; add 'reason' label to drop metrics Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Remove accidentally committed files Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Propagate logger to buildWriteRequest to log dropped data Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Fix docs comment Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Make drop reason more specific Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Remove unnecessary pass of logger Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Use snake_case for reason label Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Fix dropped samples metric Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> --------- Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Signed-off-by: Marc Tuduri <marctc@protonmail.com> Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com> Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com> * fix(discovery): allow requireUpdate util to timeout in discovery/file/file_test.go. The loop ran indefinitely if the condition isn't met. Before, each iteration created a new timer channel which was always outpaced by the other timer channel with smaller duration. minor detail: There was a memory leak: resources of the ~10 previous timers were constantly kept. With the fix, we may keep the resources of one timer around for defaultWait but this isn't worth the changes to make it right. Signed-off-by: machine424 <ayoubmrini424@gmail.com> * Merge pull request #13371 from kevinmingtarja/fix-isHeatmapData ui: fix handling of scalar and string in isHeatmapData * tsdb/{index,compact}: allow using custom postings encoding format (#13242) * tsdb/{index,compact}: allow using custom postings encoding format We would like to experiment with a different postings encoding format in Thanos so in this change I am proposing adding another argument to `NewWriter` which would allow users to change the format if needed. Also, wire the leveled compactor so that it would be possible to change the format there too. Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> * tsdb/compact: use a struct for leveled compactor options As discussed on Slack, let's use a struct for the options in leveled compactor. Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> * tsdb: make changes after Bryan's review - Make changes less intrusive - Turn the postings encoder type into a function - Add NewWriterWithEncoder() Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> --------- Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> * Cut 2.49.0-rc.2 Signed-off-by: bwplotka <bwplotka@gmail.com> * build(deps): bump actions/setup-go from 3.5.0 to 5.0.0 in /scripts (#13362) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3.5.0 to 5.0.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](6edd4406fa...0c52d547c9
) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump github/codeql-action from 2.22.8 to 3.22.12 (#13358) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.22.8 to 3.22.12. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](407ffafae6...012739e508
) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * put @nexucis has a release shepherd (#13383) Signed-off-by: Augustin Husson <augustin.husson@amadeus.com> * Add analyze histograms command to promtool (#12331) Add `query analyze` command to promtool This command analyzes the buckets of classic and native histograms, based on data queried from the Prometheus query API, i.e. it doesn't require direct access to the TSDB files. Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com> --------- Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com> * included instance in all necessary descriptions Signed-off-by: Erik Sommer <ersotech@posteo.de> * tsdb/compact: fix passing merge func Fixing a very small logical problem I've introduced :(. Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> * tsdb: add enable overlapping compaction This functionality is needed in downstream projects because they have a separate component that does compaction. Upstreaming7c8e9a2a76/tsdb/compact.go (L323-L325)
. Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> * Cut 2.49.0 Signed-off-by: bwplotka <bwplotka@gmail.com> * promtool: allow setting multiple matchers to "promtool tsdb dump" command. (#13296) Conditions are ANDed inside the same matcher but matchers are ORed Including unit tests for "promtool tsdb dump". Refactor some matchers scraping utils. Signed-off-by: machine424 <ayoubmrini424@gmail.com> * Fixed changelog Signed-off-by: bwplotka <bwplotka@gmail.com> * tsdb/main: wire "EnableOverlappingCompaction" to tsdb.Options (#13398) This added the https://github.com/prometheus/prometheus/pull/13393 "EnableOverlappingCompaction" parameter to the compactor code but not to the tsdb.Options. I forgot about that. Add it to `tsdb.Options` too and set it to `true` in Prometheus. Copy/paste the description from https://github.com/prometheus/prometheus/pull/13393#issuecomment-1891787986 Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> * Issue #13268: fix quality value in accept header Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com> * Cut 2.49.1 with scrape q= bugfix. Signed-off-by: bwplotka <bwplotka@gmail.com> * Cut 2.49.1 web package. Signed-off-by: bwplotka <bwplotka@gmail.com> * Restore more efficient version of NewPossibleNonCounterInfo annotation (#13022) Restore more efficient version of NewPossibleNonCounterInfo annotation Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com> --------- Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com> * Fix regressions introduced by #13242 Signed-off-by: Marco Pracucci <marco@pracucci.com> * fix slice copy in 1.20 (#13389) The slices package is added to the standard library in Go 1.21; we need to import from the exp area to maintain compatibility with Go 1.20. Signed-off-by: tyltr <tylitianrui@126.com> * Docs: Query Basics: link to rate (#10538) Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu> * chore(kubernetes): check preconditions earlier and avoid unnecessary checks or iterations Signed-off-by: machine424 <ayoubmrini424@gmail.com> * Examples: link to `rate` for new users (#10535) * Examples: link to `rate` for new users Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com Co-authored-by: Bryan Boreham <bjboreham@gmail.com> * promql: use natural sort in sort_by_label and sort_by_label_desc (#13411) These functions are intended for humans, as robots can already sort the results however they please. Humans like things sorted "naturally": * https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/ A similar thing has been done to Grafana, which is also used by humans: * https://github.com/grafana/grafana/pull/78024 * https://github.com/grafana/grafana/pull/78494 Signed-off-by: Ivan Babrou <github@ivan.computer> * TestLabelValuesWithMatchers: Add test case Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * remove obsolete build tag Signed-off-by: tyltr <tylitianrui@126.com> * Upgrade some golang dependencies for resty 2.11 Signed-off-by: Israel Blancas <iblancasa@gmail.com> * Native Histograms: support `native_histogram_min_bucket_factor` in scrape_config (#13222) Native Histograms: support native_histogram_min_bucket_factor in scrape_config --------- Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> Signed-off-by: Björn Rabenstein <github@rabenste.in> Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com> Co-authored-by: Björn Rabenstein <github@rabenste.in> * Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram (#13392) Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram --------- Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com> * Minor fixes to otlp vendor update script Signed-off-by: Goutham <gouthamve@gmail.com> * build(deps): bump github.com/hetznercloud/hcloud-go/v2 Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.4.0 to 2.6.0. - [Release notes](https://github.com/hetznercloud/hcloud-go/releases) - [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.4.0...v2.6.0) --- updated-dependencies: - dependency-name: github.com/hetznercloud/hcloud-go/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> * Enhanced visibility for `promtool test rules` with JSON colored formatting (#13342) * Added diff flag for unit test to improvise readability & debugging Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * Removed blank spaces Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * Fixed linting error Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * Added cli flags to documentation Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * Revert unrrelated linting fixes Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * Fixed review suggestions Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * Cleanup Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * Updated flag description Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * Updated flag description Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> --------- Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * storage: skip merging when no remote storage configured Prometheus is hard-coded to use a fanout storage between TSDB and a remote storage which by default is empty. This change detects the empty storage and skips merging between result sets, which would make `Select()` sort results. Bottom line: we skip a sort unless there really is some remote storage configured. Signed-off-by: Bryan Boreham <bjboreham@gmail.com> * Remove csmarchbanks from remote write owners (#13432) I have not had the time to keep up with remote write and have no plans to work on it in the near future so I am withdrawing my maintainership of that part of the codebase. I continue to focus on client_python. Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com> * add more context cancellation check at evaluation time Signed-off-by: Ben Ye <benye@amazon.com> * Optimize label values with matchers by taking shortcuts (#13426) Don't calculate postings beforehand: we may not need them. If all matchers are for the requested label, we can just filter its values. Also, if there are no values at all, no need to run any kind of logic. Also add more labelValuesWithMatchers benchmarks Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Add automatic memory limit handling Enable automatic detection of memory limits and configure GOMEMLIMIT to match. * Also includes a flag to allow controlling the reserved ratio. Signed-off-by: SuperQ <superq@gmail.com> * Update OSSF badge link (#13433) Provide a more user friendly interface Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> * SD Managers taking over responsibility for registration of debug metrics (#13375) SD Managers take over responsibility for SD metrics registration --------- Signed-off-by: Paulin Todev <paulin.todev@gmail.com> Signed-off-by: Björn Rabenstein <github@rabenste.in> Co-authored-by: Björn Rabenstein <github@rabenste.in> * Optimize histogram iterators (#13340) Optimize histogram iterators Histogram iterators allocate new objects in the AtHistogram and AtFloatHistogram methods, which makes calculating rates over long ranges expensive. In #13215 we allowed an existing object to be reused when converting an integer histogram to a float histogram. This commit follows the same idea and allows injecting an existing object in the AtHistogram and AtFloatHistogram methods. When the injected value is nil, iterators allocate new histograms, otherwise they populate and return the injected object. The commit also adds a CopyTo method to Histogram and FloatHistogram which is used in the BufferedIterator to overwrite items in the ring instead of making new copies. Note that a specialized HPoint pool is needed for all of this to work (`matrixSelectorHPool`). --------- Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com> Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com> * doc: Mark `mad_over_time` as experimental (#13440) We forgot to do that in https://github.com/prometheus/prometheus/pull/13059 Signed-off-by: beorn7 <beorn@grafana.com> * Change metric label for Puppetdb from 'http' to 'puppetdb' Signed-off-by: Paulin Todev <paulin.todev@gmail.com> * mirror metrics.proto change & generate code Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> * TestHeadLabelValuesWithMatchers: Add test case (#13414) Add test case to TestHeadLabelValuesWithMatchers, while fixing a couple of typos in other test cases. Also enclosing some implicit sub-tests in a `t.Run` call to make them explicitly sub-tests. Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * update all go dependencies (#13438) Signed-off-by: Augustin Husson <husson.augustin@gmail.com> * build(deps): bump the k8s-io group with 2 updates (#13454) Bumps the k8s-io group with 2 updates: [k8s.io/api](https://github.com/kubernetes/api) and [k8s.io/client-go](https://github.com/kubernetes/client-go). Updates `k8s.io/api` from 0.28.4 to 0.29.1 - [Commits](https://github.com/kubernetes/api/compare/v0.28.4...v0.29.1) Updates `k8s.io/client-go` from 0.28.4 to 0.29.1 - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.28.4...v0.29.1) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-io - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-io ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump the go-opentelemetry-io group with 1 update (#13453) Bumps the go-opentelemetry-io group with 1 update: [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector). Updates `go.opentelemetry.io/collector/semconv` from 0.92.0 to 0.93.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.92.0...v0.93.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/collector/semconv dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump actions/upload-artifact from 3.1.3 to 4.0.0 (#13355) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3.1.3 to 4.0.0. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](a8a3f3ad30...c7d193f32e
) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump bufbuild/buf-push-action (#13357) Bumps [bufbuild/buf-push-action](https://github.com/bufbuild/buf-push-action) from 342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 to a654ff18effe4641ebea4a4ce242c49800728459. - [Release notes](https://github.com/bufbuild/buf-push-action/releases) - [Commits](342fc4cdcf...a654ff18ef
) --- updated-dependencies: - dependency-name: bufbuild/buf-push-action dependency-type: direct:production ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Labels: Add DropMetricName function, used in PromQL (#13446) This function is called very frequently when executing PromQL functions, and we can do it much more efficiently inside Labels. In the common case that `__name__` comes first in the labels, we simply re-point to start at the next label, which is nearly free. `DropMetricName` is now so cheap I removed the cache - benchmarks show everything still goes faster. Signed-off-by: Bryan Boreham <bjboreham@gmail.com> * tsdb: simplify internal series delete function (#13261) Lifting an optimisation from Agent code, `seriesHashmap.del` can use the unique series reference, doesn't need to check Labels. Also streamline the logic for deleting from `unique` and `conflicts` maps, and add some comments to help the next person. Signed-off-by: Bryan Boreham <bjboreham@gmail.com> * otlptranslator/update-copy.sh: Fix sed command lines Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * Rollback k8s.io requirements (#13462) Rollback k8s.io Go modules to v0.28.6 to avoid forcing upgrade of Go to 1.21. This allows us to keep compatibility with the currently supported upstream Go releases. Signed-off-by: SuperQ <superq@gmail.com> * Make update-copy.sh work for both OSX and GNU sed Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * Name @beorn7 and @krajorama as maintainers for native histograms I have been the de-facto maintainer for native histograms from the beginning. So let's put this into MAINTAINERS.md. In addition, I hereby proposose George Krajcsovits AKA Krajo as a co-maintainer. He has contributed a lot of native histogram code, but more importantly, he has contributed substantially to reviewing other contributors' native histogram code, up to a point where I was merely rubberstamping the PRs he had already reviewed. I'm confident that he is ready to to be granted commit rights as outlined in the "Maintainers" section of the governance: https://prometheus.io/governance/#maintainers According to the same section of the governance, I will announce the proposed change on the developers mailing list and will give some time for lazy consensus before merging this PR. Signed-off-by: beorn7 <beorn@grafana.com> * ui/fix: correct url handling for stacked graphs (#13460) Signed-off-by: Yury Moladau <yurymolodov@gmail.com> * tsdb: use cheaper Mutex on series Mutex is 8 bytes; RWMutex is 24 bytes and much more complicated. Since `RLock` is only used in two places, `UpdateMetadata` and `Delete`, neither of which are hotspots, we should use the cheaper one. Signed-off-by: Bryan Boreham <bjboreham@gmail.com> * Fix last_over_time for native histograms The last_over_time retains a histogram sample without making a copy. This sample is now coming from the buffered iterator used for windowing functions, and can be reused for reading subsequent samples as the iterator progresses. I would propose copying the sample in the last_over_time function, similar to how it is done for rate, sum_over_time and others. Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com> * Implementation NOTE: Rebased from main after refactor in #13014 Signed-off-by: Danny Kopping <danny.kopping@grafana.com> * Add feature flag Signed-off-by: Danny Kopping <danny.kopping@grafana.com> * Refactor concurrency control Signed-off-by: Danny Kopping <danny.kopping@grafana.com> * Optimising dependencies/dependents funcs to not produce new slices each request Signed-off-by: Danny Kopping <danny.kopping@grafana.com> * Refactoring Signed-off-by: Danny Kopping <danny.kopping@grafana.com> * Rename flag Signed-off-by: Danny Kopping <danny.kopping@grafana.com> * Refactoring for performance, and to allow controller to be overridden Signed-off-by: Danny Kopping <danny.kopping@grafana.com> * Block until all rules, both sync & async, have completed evaluating Updated & added tests Review feedback nits Return empty map if not indeterminate Use highWatermark to track inflight requests counter Appease the linter Clarify feature flag Signed-off-by: Danny Kopping <danny.kopping@grafana.com> * Fix typo in CLI flag description Signed-off-by: Marco Pracucci <marco@pracucci.com> * Fixed auto-generated doc Signed-off-by: Marco Pracucci <marco@pracucci.com> * Improve doc Signed-off-by: Marco Pracucci <marco@pracucci.com> * Simplify the design to update concurrency controller once the rule evaluation has done Signed-off-by: Marco Pracucci <marco@pracucci.com> * Add more test cases to TestDependenciesEdgeCases Signed-off-by: Marco Pracucci <marco@pracucci.com> * Added more test cases to TestDependenciesEdgeCases Signed-off-by: Marco Pracucci <marco@pracucci.com> * Improved RuleConcurrencyController interface doc Signed-off-by: Marco Pracucci <marco@pracucci.com> * Introduced sequentialRuleEvalController Signed-off-by: Marco Pracucci <marco@pracucci.com> * Remove superfluous nil check in Group.metrics Signed-off-by: Marco Pracucci <marco@pracucci.com> * api: Serialize discovered and target labels into JSON directly (#13469) Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com> * api: Serialize discovered labels into JSON directly in dropped targets (#13484) Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com> * Add ShardedPostings() support to TSDB (#10421) This PR is a reference implementation of the proposal described in #10420. In addition to what described in #10420, in this PR I've introduced labels.StableHash(). The idea is to offer an hashing function which doesn't change over time, and that's used by query sharding in order to get a stable behaviour over time. The implementation of labels.StableHash() is the hashing function used by Prometheus before stringlabels, and what's used by Grafana Mimir for query sharding (because built before stringlabels was a thing). Follow up work As mentioned in #10420, if this PR is accepted I'm also open to upload another foundamental piece used by Grafana Mimir query sharding to accelerate the query execution: an optional, configurable and fast in-memory cache for the series hashes. Signed-off-by: Marco Pracucci <marco@pracucci.com> * storage/remote: document why two benchmarks are skipped One was silently doing nothing; one was doing something but the work didn't go up linearly with iteration count. Signed-off-by: Bryan Boreham <bjboreham@gmail.com> * Pod status changes not discovered by Kube Endpoints SD (#13337) * fix(discovery/kubernetes/endpoints): react to changes on Pods because some modifications can occur on them without triggering an update on the related Endpoints (The Pod phase changing from Pending to Running e.g.). --------- Signed-off-by: machine424 <ayoubmrini424@gmail.com> Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com> * Small improvements, add const, remove copypasta (#8106) Signed-off-by: Mikhail Fesenko <proggga@gmail.com> Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com> * Proposal to improve FPointSlice and HPointSlice allocation. (#13448) * Reusing points slice from previous series when the slice is under utilized * Adding comments on the bench test Signed-off-by: Alan Protasio <alanprot@gmail.com> * lint Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * go mod tidy Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> --------- Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com> Signed-off-by: Bryan Boreham <bjboreham@gmail.com> Signed-off-by: Erik Sommer <ersotech@posteo.de> Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com> Signed-off-by: bwplotka <bwplotka@gmail.com> Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it> Signed-off-by: machine424 <ayoubmrini424@gmail.com> Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com> Signed-off-by: Daniel Kerbel <nmdanny@gmail.com> Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: Jan Fajerski <jfajersk@redhat.com> Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com> Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Signed-off-by: Marc Tuduri <marctc@protonmail.com> Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com> Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> Signed-off-by: Augustin Husson <augustin.husson@amadeus.com> Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com> Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com> Signed-off-by: Marco Pracucci <marco@pracucci.com> Signed-off-by: tyltr <tylitianrui@126.com> Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com Signed-off-by: Ivan Babrou <github@ivan.computer> Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> Signed-off-by: Israel Blancas <iblancasa@gmail.com> Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> Signed-off-by: Björn Rabenstein <github@rabenste.in> Signed-off-by: Goutham <gouthamve@gmail.com> Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com> Signed-off-by: Ben Ye <benye@amazon.com> Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> Signed-off-by: SuperQ <superq@gmail.com> Signed-off-by: Ben Kochie <superq@gmail.com> Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> Signed-off-by: Paulin Todev <paulin.todev@gmail.com> Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com> Signed-off-by: beorn7 <beorn@grafana.com> Signed-off-by: Augustin Husson <husson.augustin@gmail.com> Signed-off-by: Yury Moladau <yurymolodov@gmail.com> Signed-off-by: Danny Kopping <danny.kopping@grafana.com> Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com> Signed-off-by: Mikhail Fesenko <proggga@gmail.com> Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com> Signed-off-by: Alan Protasio <alanprot@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> Co-authored-by: Julian Wiedmann <jwi@linux.ibm.com> Co-authored-by: Bryan Boreham <bjboreham@gmail.com> Co-authored-by: Erik Sommer <ersotech@posteo.de> Co-authored-by: Linas Medziunas <linas.medziunas@gmail.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com> Co-authored-by: Arianna Vespri <arianna.vespri@yahoo.it> Co-authored-by: machine424 <ayoubmrini424@gmail.com> Co-authored-by: daniel-resdiary <109083091+daniel-resdiary@users.noreply.github.com> Co-authored-by: Daniel Kerbel <nmdanny@gmail.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Jan Fajerski <jfajersk@redhat.com> Co-authored-by: Kevin Mingtarja <kevin.mingtarja@gmail.com> Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com> Co-authored-by: Marc Tudurí <marctc@protonmail.com> Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Co-authored-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> Co-authored-by: Augustin Husson <husson.augustin@gmail.com> Co-authored-by: Björn Rabenstein <beorn@grafana.com> Co-authored-by: zenador <zenador@users.noreply.github.com> Co-authored-by: gotjosh <josue.abreu@gmail.com> Co-authored-by: Ben Kochie <superq@gmail.com> Co-authored-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com> Co-authored-by: Marco Pracucci <marco@pracucci.com> Co-authored-by: tyltr <tylitianrui@126.com> Co-authored-by: Ted Robertson <10043369+tredondo@users.noreply.github.com> Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu> Co-authored-by: Matthias Loibl <mail@matthiasloibl.com> Co-authored-by: Ivan Babrou <github@ivan.computer> Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com> Co-authored-by: Israel Blancas <iblancasa@gmail.com> Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com> Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com> Co-authored-by: Björn Rabenstein <github@rabenste.in> Co-authored-by: Goutham <gouthamve@gmail.com> Co-authored-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> Co-authored-by: Chris Marchbanks <csmarchbanks@gmail.com> Co-authored-by: Ben Ye <benye@amazon.com> Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com> Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com> Co-authored-by: Paulin Todev <paulin.todev@gmail.com> Co-authored-by: Filip Petkovski <filip.petkovsky@gmail.com> Co-authored-by: Yury Molodov <yurymolodov@gmail.com> Co-authored-by: Danny Kopping <danny.kopping@grafana.com> Co-authored-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com> Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com> Co-authored-by: Mikhail Fesenko <proggga@gmail.com> Co-authored-by: Alan Protasio <alanprot@gmail.com>
This commit is contained in:
parent
a0f08a8365
commit
aa3513fc89
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
|
@ -1,6 +1,6 @@
|
|||
/web/ui @juliusv
|
||||
/web/ui/module @juliusv @nexucis
|
||||
/storage/remote @csmarchbanks @cstyan @bwplotka @tomwilkie
|
||||
/storage/remote @cstyan @bwplotka @tomwilkie
|
||||
/storage/remote/otlptranslator @gouthamve @jesusvazquez
|
||||
/discovery/kubernetes @brancz
|
||||
/tsdb @jesusvazquez
|
||||
|
|
2
.github/workflows/buf.yml
vendored
2
.github/workflows/buf.yml
vendored
|
@ -23,7 +23,7 @@ jobs:
|
|||
with:
|
||||
input: 'prompb'
|
||||
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD~1,subdir=prompb'
|
||||
- uses: bufbuild/buf-push-action@342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 # v1.1.1
|
||||
- uses: bufbuild/buf-push-action@a654ff18effe4641ebea4a4ce242c49800728459 # v1.1.1
|
||||
with:
|
||||
input: 'prompb'
|
||||
buf_token: ${{ secrets.BUF_TOKEN }}
|
||||
|
|
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
|
@ -197,7 +197,7 @@ jobs:
|
|||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||
- name: Install nodejs
|
||||
uses: actions/setup-node@5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d # v3.8.1
|
||||
uses: actions/setup-node@b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8 # v4.0.1
|
||||
with:
|
||||
node-version-file: "web/ui/.nvmrc"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
|
|
6
.github/workflows/codeql-analysis.yml
vendored
6
.github/workflows/codeql-analysis.yml
vendored
|
@ -30,12 +30,12 @@ jobs:
|
|||
go-version: 1.21.x
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@407ffafae6a767df3e0230c3df91b6443ae8df75 # v2.22.8
|
||||
uses: github/codeql-action/init@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@407ffafae6a767df3e0230c3df91b6443ae8df75 # v2.22.8
|
||||
uses: github/codeql-action/autobuild@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@407ffafae6a767df3e0230c3df91b6443ae8df75 # v2.22.8
|
||||
uses: github/codeql-action/analyze@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12
|
||||
|
|
2
.github/workflows/fuzzing.yml
vendored
2
.github/workflows/fuzzing.yml
vendored
|
@ -21,7 +21,7 @@ jobs:
|
|||
fuzz-seconds: 600
|
||||
dry-run: false
|
||||
- name: Upload Crash
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||
uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0
|
||||
if: failure() && steps.build.outcome == 'success'
|
||||
with:
|
||||
name: artifacts
|
||||
|
|
4
.github/workflows/scorecards.yml
vendored
4
.github/workflows/scorecards.yml
vendored
|
@ -37,7 +37,7 @@ jobs:
|
|||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # tag=v3.1.3
|
||||
uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # tag=v4.0.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
|
@ -45,6 +45,6 @@ jobs:
|
|||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@407ffafae6a767df3e0230c3df91b6443ae8df75 # tag=v2.22.8
|
||||
uses: github/codeql-action/upload-sarif@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # tag=v3.22.12
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
|
44
CHANGELOG.md
44
CHANGELOG.md
|
@ -1,9 +1,47 @@
|
|||
# Changelog
|
||||
|
||||
## unreleased
|
||||
## 2.49.1 / 2024-01-15
|
||||
|
||||
* [ENHANCEMENT] TSDB: Make the wlog watcher read segments synchronously when not tailing. #13224
|
||||
* [BUGFIX] Agent: Participate in notify calls. #13223
|
||||
* [BUGFIX] TSDB: Fixed a wrong `q=` value in scrape accept header #13313
|
||||
|
||||
## 2.49.0 / 2024-01-15
|
||||
|
||||
* [FEATURE] Promtool: Add `--run` flag promtool test rules command. #12206
|
||||
* [FEATURE] SD: Add support for `NS` records to DNS SD. #13219
|
||||
* [FEATURE] UI: Add heatmap visualization setting in the Graph tab, useful histograms. #13096 #13371
|
||||
* [FEATURE] Scraping: Add `scrape_config.enable_compression` (default true) to disable gzip compression when scraping the target. #13166
|
||||
* [FEATURE] PromQL: Add a `promql-experimental-functions` feature flag containing some new experimental PromQL functions. #13103 NOTE: More experimental functions might be added behind the same feature flag in the future. Added functions:
|
||||
* Experimental `mad_over_time` (median absolute deviation around the median) function. #13059
|
||||
* Experimental `sort_by_label` and `sort_by_label_desc` functions allowing sorting returned series by labels. #11299
|
||||
* [FEATURE] SD: Add `__meta_linode_gpus` label to Linode SD. #13097
|
||||
* [FEATURE] API: Add `exclude_alerts` query parameter to `/api/v1/rules` to only return recording rules. #12999
|
||||
* [FEATURE] TSDB: --storage.tsdb.retention.time flag value is now exposed as a `prometheus_tsdb_retention_limit_seconds` metric. #12986
|
||||
* [FEATURE] Scraping: Add ability to specify priority of scrape protocols to accept during scrape (e.g. to scrape Prometheus proto format for certain jobs). This can be changed by setting `global.scrape_protocols` and `scrape_config.scrape_protocols`. #12738
|
||||
* [ENHANCEMENT] Scraping: Automated handling of scraping histograms that violate `scrape_config.native_histogram_bucket_limit` setting. #13129
|
||||
* [ENHANCEMENT] Scraping: Optimized memory allocations when scraping. #12992
|
||||
* [ENHANCEMENT] SD: Added cache for Azure SD to avoid rate-limits. #12622
|
||||
* [ENHANCEMENT] TSDB: Various improvements to OOO exemplar scraping. E.g. allowing ingestion of exemplars with the same timestamp, but with different labels. #13021
|
||||
* [ENHANCEMENT] API: Optimize `/api/v1/labels` and `/api/v1/label/<label_name>/values` when 1 set of matchers are used. #12888
|
||||
* [ENHANCEMENT] TSDB: Various optimizations for TSDB block index, head mmap chunks and WAL, reducing latency and memory allocations (improving API calls, compaction queries etc). #12997 #13058 #13056 #13040
|
||||
* [ENHANCEMENT] PromQL: Optimize memory allocations and latency when querying float histograms. #12954
|
||||
* [ENHANCEMENT] Rules: Instrument TraceID in log lines for rule evaluations. #13034
|
||||
* [ENHANCEMENT] PromQL: Optimize memory allocations in query_range calls. #13043
|
||||
* [ENHANCEMENT] Promtool: unittest interval now defaults to evaluation_intervals when not set. #12729
|
||||
* [BUGFIX] SD: Fixed Azure SD public IP reporting #13241
|
||||
* [BUGFIX] API: Fix inaccuracies in posting cardinality statistics. #12653
|
||||
* [BUGFIX] PromQL: Fix inaccuracies of `histogram_quantile` with classic histograms. #13153
|
||||
* [BUGFIX] TSDB: Fix rare fails or inaccurate queries with OOO samples. #13115
|
||||
* [BUGFIX] TSDB: Fix rare panics on append commit when exemplars are used. #13092
|
||||
* [BUGFIX] TSDB: Fix exemplar WAL storage, so remote write can send/receive samples before exemplars. #13113
|
||||
* [BUGFIX] Mixins: Fix `url` filter on remote write dashboards. #10721
|
||||
* [BUGFIX] PromQL/TSDB: Various fixes to float histogram operations. #12891 #12977 #12609 #13190 #13189 #13191 #13201 #13212 #13208
|
||||
* [BUGFIX] Promtool: Fix int32 overflow issues for 32-bit architectures. #12978
|
||||
* [BUGFIX] SD: Fix Azure VM Scale Set NIC issue. #13283
|
||||
|
||||
## 2.48.1 / 2023-12-07
|
||||
|
||||
* [BUGFIX] TSDB: Make the wlog watcher read segments synchronously when not tailing. #13224
|
||||
* [BUGFIX] Agent: Participate in notify calls (fixes slow down in remote write handling introduced in 2.45). #13223
|
||||
|
||||
## 2.48.0 / 2023-11-16
|
||||
|
||||
|
|
|
@ -8,8 +8,10 @@ Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) and Levi Harrison
|
|||
* `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz)
|
||||
* `documentation`
|
||||
* `prometheus-mixin`: Matthias Loibl (<mail@matthiasloibl.com> / @metalmatze)
|
||||
* `model/histogram` and other code related to native histograms: Björn Rabenstein (<beorn@grafana.com> / @beorn7),
|
||||
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
|
||||
* `storage`
|
||||
* `remote`: Chris Marchbanks (<csmarchbanks@gmail.com> / @csmarchbanks), Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie)
|
||||
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie)
|
||||
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||
* `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)
|
||||
* `web`
|
||||
|
@ -17,6 +19,7 @@ Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) and Levi Harrison
|
|||
* `module`: Augustin Husson (<husson.augustin@gmail.com> @nexucis)
|
||||
* `Makefile` and related build configuration: Simon Pasquier (<pasquier.simon@gmail.com> / @simonpasquier), Ben Kochie (<superq@gmail.com> / @SuperQ)
|
||||
|
||||
|
||||
For the sake of brevity, not all subtrees are explicitly listed. Due to the
|
||||
size of this repository, the natural changes in focus of maintainers over time,
|
||||
and nuances of where particular features live, this list will always be
|
||||
|
|
|
@ -14,7 +14,7 @@ examples and guides.</p>
|
|||
[](https://bestpractices.coreinfrastructure.org/projects/486)
|
||||
[](https://gitpod.io/#https://github.com/prometheus/prometheus)
|
||||
[](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus)
|
||||
[](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus)
|
||||
[](https://securityscorecards.dev/viewer/?uri=github.com/prometheus/prometheus)
|
||||
|
||||
</div>
|
||||
|
||||
|
|
|
@ -54,7 +54,8 @@ Release cadence of first pre-releases being cut is 6 weeks.
|
|||
| v2.47 | 2023-08-23 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| v2.48 | 2023-10-04 | Levi Harrison (GitHub: @LeviHarrison) |
|
||||
| v2.49 | 2023-12-05 | Bartek Plotka (GitHub: @bwplotka) |
|
||||
| v2.50 | 2024-01-16 | **searching for volunteer** |
|
||||
| v2.50 | 2024-01-16 | Augustin Husson (GitHub: @nexucis) |
|
||||
| v2.51 | 2024-02-13 | **searching for volunteer** |
|
||||
|
||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/KimMachineGun/automemlimit/memlimit"
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/alecthomas/units"
|
||||
"github.com/go-kit/log"
|
||||
|
@ -136,6 +137,7 @@ type flagConfig struct {
|
|||
forGracePeriod model.Duration
|
||||
outageTolerance model.Duration
|
||||
resendDelay model.Duration
|
||||
maxConcurrentEvals int64
|
||||
web web.Options
|
||||
scrape scrape.Options
|
||||
tsdb tsdbOptions
|
||||
|
@ -147,7 +149,8 @@ type flagConfig struct {
|
|||
queryMaxSamples int
|
||||
RemoteFlushDeadline model.Duration
|
||||
|
||||
featureList []string
|
||||
featureList []string
|
||||
memlimitRatio float64
|
||||
// These options are extracted from featureList
|
||||
// for ease of use.
|
||||
enableExpandExternalLabels bool
|
||||
|
@ -155,7 +158,9 @@ type flagConfig struct {
|
|||
enablePerStepStats bool
|
||||
enableAutoGOMAXPROCS bool
|
||||
// todo: how to use the enable feature flag properly + use the remote format enum type
|
||||
rwFormat int
|
||||
rwFormat int
|
||||
enableAutoGOMEMLIMIT bool
|
||||
enableConcurrentRuleEval bool
|
||||
|
||||
prometheusURL string
|
||||
corsRegexString string
|
||||
|
@ -202,6 +207,12 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
case "auto-gomaxprocs":
|
||||
c.enableAutoGOMAXPROCS = true
|
||||
level.Info(logger).Log("msg", "Automatically set GOMAXPROCS to match Linux container CPU quota")
|
||||
case "auto-gomemlimit":
|
||||
c.enableAutoGOMEMLIMIT = true
|
||||
level.Info(logger).Log("msg", "Automatically set GOMEMLIMIT to match Linux container or system memory limit")
|
||||
case "concurrent-rule-eval":
|
||||
c.enableConcurrentRuleEval = true
|
||||
level.Info(logger).Log("msg", "Experimental concurrent rule evaluation enabled.")
|
||||
case "no-default-scrape-port":
|
||||
c.scrape.NoDefaultPort = true
|
||||
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
|
||||
|
@ -267,6 +278,9 @@ func main() {
|
|||
a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry.").
|
||||
Default("0.0.0.0:9090").StringVar(&cfg.web.ListenAddress)
|
||||
|
||||
a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory").
|
||||
Default("0.9").FloatVar(&cfg.memlimitRatio)
|
||||
|
||||
webConfig := a.Flag(
|
||||
"web.config.file",
|
||||
"[EXPERIMENTAL] Path to configuration file that can enable TLS or authentication.",
|
||||
|
@ -407,6 +421,9 @@ func main() {
|
|||
serverOnlyFlag(a, "rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager.").
|
||||
Default("1m").SetValue(&cfg.resendDelay)
|
||||
|
||||
serverOnlyFlag(a, "rules.max-concurrent-evals", "Global concurrency limit for independent rules that can run concurrently.").
|
||||
Default("4").Int64Var(&cfg.maxConcurrentEvals)
|
||||
|
||||
a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
|
||||
Hidden().Default("true").BoolVar(&scrape.AlignScrapeTimestamps)
|
||||
|
||||
|
@ -434,7 +451,7 @@ func main() {
|
|||
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
||||
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
||||
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, metadata-wal-records. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, metadata-wal-records. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
Default("").StringsVar(&cfg.featureList)
|
||||
|
||||
a.Flag("remote-write-format", "remote write proto format to use, valid options: 0 (1.0), 1 (reduced format), 3 (min64 format)").
|
||||
|
@ -475,6 +492,11 @@ func main() {
|
|||
os.Exit(3)
|
||||
}
|
||||
|
||||
if cfg.memlimitRatio <= 0.0 || cfg.memlimitRatio > 1.0 {
|
||||
fmt.Fprintf(os.Stderr, "--auto-gomemlimit.ratio must be greater than 0 and less than or equal to 1.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
localStoragePath := cfg.serverStoragePath
|
||||
if agentMode {
|
||||
localStoragePath = cfg.agentStoragePath
|
||||
|
@ -638,9 +660,16 @@ func main() {
|
|||
level.Error(logger).Log("msg", "failed to register Kubernetes client metrics", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
sdMetrics, err := discovery.CreateAndRegisterSDMetrics(prometheus.DefaultRegisterer)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "failed to register service discovery metrics", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if cfg.enableNewSDManager {
|
||||
{
|
||||
discMgr := discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, discovery.Name("scrape"))
|
||||
discMgr := discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"))
|
||||
if discMgr == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
|
||||
os.Exit(1)
|
||||
|
@ -649,7 +678,7 @@ func main() {
|
|||
}
|
||||
|
||||
{
|
||||
discMgr := discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, discovery.Name("notify"))
|
||||
discMgr := discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"))
|
||||
if discMgr == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
|
||||
os.Exit(1)
|
||||
|
@ -658,7 +687,7 @@ func main() {
|
|||
}
|
||||
} else {
|
||||
{
|
||||
discMgr := legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, legacymanager.Name("scrape"))
|
||||
discMgr := legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("scrape"))
|
||||
if discMgr == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
|
||||
os.Exit(1)
|
||||
|
@ -667,7 +696,7 @@ func main() {
|
|||
}
|
||||
|
||||
{
|
||||
discMgr := legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, legacymanager.Name("notify"))
|
||||
discMgr := legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("notify"))
|
||||
if discMgr == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
|
||||
os.Exit(1)
|
||||
|
@ -703,6 +732,20 @@ func main() {
|
|||
}
|
||||
}
|
||||
|
||||
if cfg.enableAutoGOMEMLIMIT {
|
||||
if _, err := memlimit.SetGoMemLimitWithOpts(
|
||||
memlimit.WithRatio(cfg.memlimitRatio),
|
||||
memlimit.WithProvider(
|
||||
memlimit.ApplyFallback(
|
||||
memlimit.FromCgroup,
|
||||
memlimit.FromSystem,
|
||||
),
|
||||
),
|
||||
); err != nil {
|
||||
level.Warn(logger).Log("component", "automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !agentMode {
|
||||
opts := promql.EngineOpts{
|
||||
Logger: log.With(logger, "component", "query engine"),
|
||||
|
@ -722,17 +765,19 @@ func main() {
|
|||
queryEngine = promql.NewEngine(opts)
|
||||
|
||||
ruleManager = rules.NewManager(&rules.ManagerOptions{
|
||||
Appendable: fanoutStorage,
|
||||
Queryable: localStorage,
|
||||
QueryFunc: rules.EngineQueryFunc(queryEngine, fanoutStorage),
|
||||
NotifyFunc: rules.SendAlerts(notifierManager, cfg.web.ExternalURL.String()),
|
||||
Context: ctxRule,
|
||||
ExternalURL: cfg.web.ExternalURL,
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
Logger: log.With(logger, "component", "rule manager"),
|
||||
OutageTolerance: time.Duration(cfg.outageTolerance),
|
||||
ForGracePeriod: time.Duration(cfg.forGracePeriod),
|
||||
ResendDelay: time.Duration(cfg.resendDelay),
|
||||
Appendable: fanoutStorage,
|
||||
Queryable: localStorage,
|
||||
QueryFunc: rules.EngineQueryFunc(queryEngine, fanoutStorage),
|
||||
NotifyFunc: rules.SendAlerts(notifierManager, cfg.web.ExternalURL.String()),
|
||||
Context: ctxRule,
|
||||
ExternalURL: cfg.web.ExternalURL,
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
Logger: log.With(logger, "component", "rule manager"),
|
||||
OutageTolerance: time.Duration(cfg.outageTolerance),
|
||||
ForGracePeriod: time.Duration(cfg.forGracePeriod),
|
||||
ResendDelay: time.Duration(cfg.resendDelay),
|
||||
MaxConcurrentEvals: cfg.maxConcurrentEvals,
|
||||
ConcurrentEvalsEnabled: cfg.enableConcurrentRuleEval,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1655,6 +1700,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
|||
EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown,
|
||||
EnableNativeHistograms: opts.EnableNativeHistograms,
|
||||
OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow,
|
||||
EnableOverlappingCompaction: true,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
// limitations under the License.
|
||||
//
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
|
|
370
cmd/promtool/analyze.go
Normal file
370
cmd/promtool/analyze.go
Normal file
|
@ -0,0 +1,370 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
)
|
||||
|
||||
var (
|
||||
errNotNativeHistogram = fmt.Errorf("not a native histogram")
|
||||
errNotEnoughData = fmt.Errorf("not enough data")
|
||||
|
||||
outputHeader = `Bucket stats for each histogram series over time
|
||||
------------------------------------------------
|
||||
First the min, avg, and max number of populated buckets, followed by the total
|
||||
number of buckets (only if different from the max number of populated buckets
|
||||
which is typical for classic but not native histograms).`
|
||||
outputFooter = `Aggregated bucket stats
|
||||
-----------------------
|
||||
Each line shows min/avg/max over the series above.`
|
||||
)
|
||||
|
||||
type QueryAnalyzeConfig struct {
|
||||
metricType string
|
||||
duration time.Duration
|
||||
time string
|
||||
matchers []string
|
||||
}
|
||||
|
||||
// run retrieves metrics that look like conventional histograms (i.e. have _bucket
|
||||
// suffixes) or native histograms, depending on metricType flag.
|
||||
func (c *QueryAnalyzeConfig) run(url *url.URL, roundtripper http.RoundTripper) error {
|
||||
if c.metricType != "histogram" {
|
||||
return fmt.Errorf("analyze type is %s, must be 'histogram'", c.metricType)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
api, err := newAPI(url, roundtripper, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var endTime time.Time
|
||||
if c.time != "" {
|
||||
endTime, err = parseTime(c.time)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing time '%s': %w", c.time, err)
|
||||
}
|
||||
} else {
|
||||
endTime = time.Now()
|
||||
}
|
||||
|
||||
return c.getStatsFromMetrics(ctx, api, endTime, os.Stdout, c.matchers)
|
||||
}
|
||||
|
||||
func (c *QueryAnalyzeConfig) getStatsFromMetrics(ctx context.Context, api v1.API, endTime time.Time, out io.Writer, matchers []string) error {
|
||||
fmt.Fprintf(out, "%s\n\n", outputHeader)
|
||||
metastatsNative := newMetaStatistics()
|
||||
metastatsClassic := newMetaStatistics()
|
||||
for _, matcher := range matchers {
|
||||
seriesSel := seriesSelector(matcher, c.duration)
|
||||
matrix, err := querySamples(ctx, api, seriesSel, endTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
matrices := make(map[string]model.Matrix)
|
||||
for _, series := range matrix {
|
||||
// We do not handle mixed types. If there are float values, we assume it is a
|
||||
// classic histogram, otherwise we assume it is a native histogram, and we
|
||||
// ignore series with errors if they do not match the expected type.
|
||||
if len(series.Values) == 0 {
|
||||
stats, err := calcNativeBucketStatistics(series)
|
||||
if err != nil {
|
||||
if errors.Is(err, errNotNativeHistogram) || errors.Is(err, errNotEnoughData) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(out, "- %s (native): %v\n", series.Metric, *stats)
|
||||
metastatsNative.update(stats)
|
||||
} else {
|
||||
lbs := model.LabelSet(series.Metric).Clone()
|
||||
if _, ok := lbs["le"]; !ok {
|
||||
continue
|
||||
}
|
||||
metricName := string(lbs[labels.MetricName])
|
||||
if !strings.HasSuffix(metricName, "_bucket") {
|
||||
continue
|
||||
}
|
||||
delete(lbs, labels.MetricName)
|
||||
delete(lbs, "le")
|
||||
key := formatSeriesName(metricName, lbs)
|
||||
matrices[key] = append(matrices[key], series)
|
||||
}
|
||||
}
|
||||
|
||||
for key, matrix := range matrices {
|
||||
stats, err := calcClassicBucketStatistics(matrix)
|
||||
if err != nil {
|
||||
if errors.Is(err, errNotEnoughData) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(out, "- %s (classic): %v\n", key, *stats)
|
||||
metastatsClassic.update(stats)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(out, "\n%s\n", outputFooter)
|
||||
if metastatsNative.Count() > 0 {
|
||||
fmt.Fprintf(out, "\nNative %s\n", metastatsNative)
|
||||
}
|
||||
if metastatsClassic.Count() > 0 {
|
||||
fmt.Fprintf(out, "\nClassic %s\n", metastatsClassic)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func seriesSelector(metricName string, duration time.Duration) string {
|
||||
builder := strings.Builder{}
|
||||
builder.WriteString(metricName)
|
||||
builder.WriteRune('[')
|
||||
builder.WriteString(duration.String())
|
||||
builder.WriteRune(']')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func formatSeriesName(metricName string, lbs model.LabelSet) string {
|
||||
builder := strings.Builder{}
|
||||
builder.WriteString(metricName)
|
||||
builder.WriteString(lbs.String())
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func querySamples(ctx context.Context, api v1.API, query string, end time.Time) (model.Matrix, error) {
|
||||
values, _, err := api.Query(ctx, query, end)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
matrix, ok := values.(model.Matrix)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("query of buckets resulted in non-Matrix")
|
||||
}
|
||||
|
||||
return matrix, nil
|
||||
}
|
||||
|
||||
// minPop/avgPop/maxPop is for the number of populated (non-zero) buckets.
|
||||
// total is the total number of buckets across all samples in the series,
|
||||
// populated or not.
|
||||
type statistics struct {
|
||||
minPop, maxPop, total int
|
||||
avgPop float64
|
||||
}
|
||||
|
||||
func (s statistics) String() string {
|
||||
if s.maxPop == s.total {
|
||||
return fmt.Sprintf("%d/%.3f/%d", s.minPop, s.avgPop, s.maxPop)
|
||||
}
|
||||
return fmt.Sprintf("%d/%.3f/%d/%d", s.minPop, s.avgPop, s.maxPop, s.total)
|
||||
}
|
||||
|
||||
func calcClassicBucketStatistics(matrix model.Matrix) (*statistics, error) {
|
||||
numBuckets := len(matrix)
|
||||
|
||||
stats := &statistics{
|
||||
minPop: math.MaxInt,
|
||||
total: numBuckets,
|
||||
}
|
||||
|
||||
if numBuckets == 0 || len(matrix[0].Values) < 2 {
|
||||
return stats, errNotEnoughData
|
||||
}
|
||||
|
||||
numSamples := len(matrix[0].Values)
|
||||
|
||||
sortMatrix(matrix)
|
||||
|
||||
totalPop := 0
|
||||
for timeIdx := 0; timeIdx < numSamples; timeIdx++ {
|
||||
curr, err := getBucketCountsAtTime(matrix, numBuckets, timeIdx)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
countPop := 0
|
||||
for _, b := range curr {
|
||||
if b != 0 {
|
||||
countPop++
|
||||
}
|
||||
}
|
||||
|
||||
totalPop += countPop
|
||||
if stats.minPop > countPop {
|
||||
stats.minPop = countPop
|
||||
}
|
||||
if stats.maxPop < countPop {
|
||||
stats.maxPop = countPop
|
||||
}
|
||||
}
|
||||
stats.avgPop = float64(totalPop) / float64(numSamples)
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func sortMatrix(matrix model.Matrix) {
|
||||
sort.SliceStable(matrix, func(i, j int) bool {
|
||||
return getLe(matrix[i]) < getLe(matrix[j])
|
||||
})
|
||||
}
|
||||
|
||||
func getLe(series *model.SampleStream) float64 {
|
||||
lbs := model.LabelSet(series.Metric)
|
||||
le, _ := strconv.ParseFloat(string(lbs["le"]), 64)
|
||||
return le
|
||||
}
|
||||
|
||||
func getBucketCountsAtTime(matrix model.Matrix, numBuckets, timeIdx int) ([]int, error) {
|
||||
counts := make([]int, numBuckets)
|
||||
if timeIdx >= len(matrix[0].Values) {
|
||||
// Just return zeroes instead of erroring out so we can get partial results.
|
||||
return counts, nil
|
||||
}
|
||||
counts[0] = int(matrix[0].Values[timeIdx].Value)
|
||||
for i, bucket := range matrix[1:] {
|
||||
if timeIdx >= len(bucket.Values) {
|
||||
// Just return zeroes instead of erroring out so we can get partial results.
|
||||
return counts, nil
|
||||
}
|
||||
curr := bucket.Values[timeIdx]
|
||||
prev := matrix[i].Values[timeIdx]
|
||||
// Assume the results are nicely aligned.
|
||||
if curr.Timestamp != prev.Timestamp {
|
||||
return counts, fmt.Errorf("matrix result is not time aligned")
|
||||
}
|
||||
counts[i+1] = int(curr.Value - prev.Value)
|
||||
}
|
||||
return counts, nil
|
||||
}
|
||||
|
||||
type bucketBounds struct {
|
||||
boundaries int32
|
||||
upper, lower float64
|
||||
}
|
||||
|
||||
func makeBucketBounds(b *model.HistogramBucket) bucketBounds {
|
||||
return bucketBounds{
|
||||
boundaries: b.Boundaries,
|
||||
upper: float64(b.Upper),
|
||||
lower: float64(b.Lower),
|
||||
}
|
||||
}
|
||||
|
||||
func calcNativeBucketStatistics(series *model.SampleStream) (*statistics, error) {
|
||||
stats := &statistics{
|
||||
minPop: math.MaxInt,
|
||||
}
|
||||
|
||||
overall := make(map[bucketBounds]struct{})
|
||||
totalPop := 0
|
||||
if len(series.Histograms) == 0 {
|
||||
return nil, errNotNativeHistogram
|
||||
}
|
||||
if len(series.Histograms) == 1 {
|
||||
return nil, errNotEnoughData
|
||||
}
|
||||
for _, histogram := range series.Histograms {
|
||||
for _, bucket := range histogram.Histogram.Buckets {
|
||||
bb := makeBucketBounds(bucket)
|
||||
overall[bb] = struct{}{}
|
||||
}
|
||||
countPop := len(histogram.Histogram.Buckets)
|
||||
|
||||
totalPop += countPop
|
||||
if stats.minPop > countPop {
|
||||
stats.minPop = countPop
|
||||
}
|
||||
if stats.maxPop < countPop {
|
||||
stats.maxPop = countPop
|
||||
}
|
||||
}
|
||||
stats.avgPop = float64(totalPop) / float64(len(series.Histograms))
|
||||
stats.total = len(overall)
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
type distribution struct {
|
||||
min, max, count int
|
||||
avg float64
|
||||
}
|
||||
|
||||
func newDistribution() distribution {
|
||||
return distribution{
|
||||
min: math.MaxInt,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *distribution) update(num int) {
|
||||
if d.min > num {
|
||||
d.min = num
|
||||
}
|
||||
if d.max < num {
|
||||
d.max = num
|
||||
}
|
||||
d.count++
|
||||
d.avg += float64(num)/float64(d.count) - d.avg/float64(d.count)
|
||||
}
|
||||
|
||||
func (d distribution) String() string {
|
||||
return fmt.Sprintf("%d/%.3f/%d", d.min, d.avg, d.max)
|
||||
}
|
||||
|
||||
type metaStatistics struct {
|
||||
minPop, avgPop, maxPop, total distribution
|
||||
}
|
||||
|
||||
func newMetaStatistics() *metaStatistics {
|
||||
return &metaStatistics{
|
||||
minPop: newDistribution(),
|
||||
avgPop: newDistribution(),
|
||||
maxPop: newDistribution(),
|
||||
total: newDistribution(),
|
||||
}
|
||||
}
|
||||
|
||||
func (ms metaStatistics) Count() int {
|
||||
return ms.minPop.count
|
||||
}
|
||||
|
||||
func (ms metaStatistics) String() string {
|
||||
if ms.maxPop == ms.total {
|
||||
return fmt.Sprintf("histogram series (%d in total):\n- min populated: %v\n- avg populated: %v\n- max populated: %v", ms.Count(), ms.minPop, ms.avgPop, ms.maxPop)
|
||||
}
|
||||
return fmt.Sprintf("histogram series (%d in total):\n- min populated: %v\n- avg populated: %v\n- max populated: %v\n- total: %v", ms.Count(), ms.minPop, ms.avgPop, ms.maxPop, ms.total)
|
||||
}
|
||||
|
||||
func (ms *metaStatistics) update(s *statistics) {
|
||||
ms.minPop.update(s.minPop)
|
||||
ms.avgPop.update(int(s.avgPop))
|
||||
ms.maxPop.update(s.maxPop)
|
||||
ms.total.update(s.total)
|
||||
}
|
170
cmd/promtool/analyze_test.go
Normal file
170
cmd/promtool/analyze_test.go
Normal file
|
@ -0,0 +1,170 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
var (
|
||||
exampleMatrix = model.Matrix{
|
||||
&model.SampleStream{
|
||||
Metric: model.Metric{
|
||||
"le": "+Inf",
|
||||
},
|
||||
Values: []model.SamplePair{
|
||||
{
|
||||
Value: 31,
|
||||
Timestamp: 100,
|
||||
},
|
||||
{
|
||||
Value: 32,
|
||||
Timestamp: 200,
|
||||
},
|
||||
{
|
||||
Value: 40,
|
||||
Timestamp: 300,
|
||||
},
|
||||
},
|
||||
},
|
||||
&model.SampleStream{
|
||||
Metric: model.Metric{
|
||||
"le": "0.5",
|
||||
},
|
||||
Values: []model.SamplePair{
|
||||
{
|
||||
Value: 10,
|
||||
Timestamp: 100,
|
||||
},
|
||||
{
|
||||
Value: 11,
|
||||
Timestamp: 200,
|
||||
},
|
||||
{
|
||||
Value: 11,
|
||||
Timestamp: 300,
|
||||
},
|
||||
},
|
||||
},
|
||||
&model.SampleStream{
|
||||
Metric: model.Metric{
|
||||
"le": "10",
|
||||
},
|
||||
Values: []model.SamplePair{
|
||||
{
|
||||
Value: 30,
|
||||
Timestamp: 100,
|
||||
},
|
||||
{
|
||||
Value: 31,
|
||||
Timestamp: 200,
|
||||
},
|
||||
{
|
||||
Value: 37,
|
||||
Timestamp: 300,
|
||||
},
|
||||
},
|
||||
},
|
||||
&model.SampleStream{
|
||||
Metric: model.Metric{
|
||||
"le": "2",
|
||||
},
|
||||
Values: []model.SamplePair{
|
||||
{
|
||||
Value: 25,
|
||||
Timestamp: 100,
|
||||
},
|
||||
{
|
||||
Value: 26,
|
||||
Timestamp: 200,
|
||||
},
|
||||
{
|
||||
Value: 27,
|
||||
Timestamp: 300,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
exampleMatrixLength = len(exampleMatrix)
|
||||
)
|
||||
|
||||
func init() {
|
||||
sortMatrix(exampleMatrix)
|
||||
}
|
||||
|
||||
func TestGetBucketCountsAtTime(t *testing.T) {
|
||||
cases := []struct {
|
||||
matrix model.Matrix
|
||||
length int
|
||||
timeIdx int
|
||||
expected []int
|
||||
}{
|
||||
{
|
||||
exampleMatrix,
|
||||
exampleMatrixLength,
|
||||
0,
|
||||
[]int{10, 15, 5, 1},
|
||||
},
|
||||
{
|
||||
exampleMatrix,
|
||||
exampleMatrixLength,
|
||||
1,
|
||||
[]int{11, 15, 5, 1},
|
||||
},
|
||||
{
|
||||
exampleMatrix,
|
||||
exampleMatrixLength,
|
||||
2,
|
||||
[]int{11, 16, 10, 3},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("exampleMatrix@%d", c.timeIdx), func(t *testing.T) {
|
||||
res, err := getBucketCountsAtTime(c.matrix, c.length, c.timeIdx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.expected, res)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCalcClassicBucketStatistics(t *testing.T) {
|
||||
cases := []struct {
|
||||
matrix model.Matrix
|
||||
expected *statistics
|
||||
}{
|
||||
{
|
||||
exampleMatrix,
|
||||
&statistics{
|
||||
minPop: 4,
|
||||
avgPop: 4,
|
||||
maxPop: 4,
|
||||
total: 4,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
|
||||
res, err := calcClassicBucketStatistics(c.matrix)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.expected, res)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -35,9 +35,7 @@ import (
|
|||
"github.com/go-kit/log"
|
||||
"github.com/google/pprof/profile"
|
||||
"github.com/prometheus/client_golang/api"
|
||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/prometheus/client_golang/prometheus/testutil/promlint"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -185,6 +183,14 @@ func main() {
|
|||
queryLabelsEnd := queryLabelsCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String()
|
||||
queryLabelsMatch := queryLabelsCmd.Flag("match", "Series selector. Can be specified multiple times.").Strings()
|
||||
|
||||
queryAnalyzeCfg := &QueryAnalyzeConfig{}
|
||||
queryAnalyzeCmd := queryCmd.Command("analyze", "Run queries against your Prometheus to analyze the usage pattern of certain metrics.")
|
||||
queryAnalyzeCmd.Flag("server", "Prometheus server to query.").Required().URLVar(&serverURL)
|
||||
queryAnalyzeCmd.Flag("type", "Type of metric: histogram.").Required().StringVar(&queryAnalyzeCfg.metricType)
|
||||
queryAnalyzeCmd.Flag("duration", "Time frame to analyze.").Default("1h").DurationVar(&queryAnalyzeCfg.duration)
|
||||
queryAnalyzeCmd.Flag("time", "Query time (RFC3339 or Unix timestamp), defaults to now.").StringVar(&queryAnalyzeCfg.time)
|
||||
queryAnalyzeCmd.Flag("match", "Series selector. Can be specified multiple times.").Required().StringsVar(&queryAnalyzeCfg.matchers)
|
||||
|
||||
pushCmd := app.Command("push", "Push to a Prometheus server.")
|
||||
pushCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
||||
pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write (for testing purpose only).")
|
||||
|
@ -204,6 +210,7 @@ func main() {
|
|||
"test-rule-file",
|
||||
"The unit test file.",
|
||||
).Required().ExistingFiles()
|
||||
testRulesDiff := testRulesCmd.Flag("diff", "[Experimental] Print colored differential output between expected & received output.").Default("false").Bool()
|
||||
|
||||
defaultDBPath := "data/"
|
||||
tsdbCmd := app.Command("tsdb", "Run tsdb commands.")
|
||||
|
@ -230,7 +237,7 @@ func main() {
|
|||
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
||||
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
||||
dumpMatch := tsdbDumpCmd.Flag("match", "Series selector.").Default("{__name__=~'(?s:.*)'}").String()
|
||||
dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
|
||||
|
||||
importCmd := tsdbCmd.Command("create-blocks-from", "[Experimental] Import samples from input and produce TSDB blocks. Please refer to the storage docs for more details.")
|
||||
importHumanReadable := importCmd.Flag("human-readable", "Print human readable values.").Short('r').Bool()
|
||||
|
@ -369,6 +376,7 @@ func main() {
|
|||
EnableNegativeOffset: true,
|
||||
},
|
||||
*testRulesRun,
|
||||
*testRulesDiff,
|
||||
*testRulesFiles...),
|
||||
)
|
||||
|
||||
|
@ -390,6 +398,9 @@ func main() {
|
|||
case importRulesCmd.FullCommand():
|
||||
os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...)))
|
||||
|
||||
case queryAnalyzeCmd.FullCommand():
|
||||
os.Exit(checkErr(queryAnalyzeCfg.run(serverURL, httpRoundTripper)))
|
||||
|
||||
case documentationCmd.FullCommand():
|
||||
os.Exit(checkErr(documentcli.GenerateMarkdown(app.Model(), os.Stdout)))
|
||||
|
||||
|
@ -997,246 +1008,6 @@ func checkMetricsExtended(r io.Reader) ([]metricStat, int, error) {
|
|||
return stats, total, nil
|
||||
}
|
||||
|
||||
// QueryInstant performs an instant query against a Prometheus server.
|
||||
func QueryInstant(url *url.URL, roundTripper http.RoundTripper, query, evalTime string, p printer) int {
|
||||
if url.Scheme == "" {
|
||||
url.Scheme = "http"
|
||||
}
|
||||
config := api.Config{
|
||||
Address: url.String(),
|
||||
RoundTripper: roundTripper,
|
||||
}
|
||||
|
||||
// Create new client.
|
||||
c, err := api.NewClient(config)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error creating API client:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
eTime := time.Now()
|
||||
if evalTime != "" {
|
||||
eTime, err = parseTime(evalTime)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error parsing evaluation time:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
}
|
||||
|
||||
// Run query against client.
|
||||
api := v1.NewAPI(c)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
val, _, err := api.Query(ctx, query, eTime) // Ignoring warnings for now.
|
||||
cancel()
|
||||
if err != nil {
|
||||
return handleAPIError(err)
|
||||
}
|
||||
|
||||
p.printValue(val)
|
||||
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
// QueryRange performs a range query against a Prometheus server.
|
||||
func QueryRange(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, query, start, end string, step time.Duration, p printer) int {
|
||||
if url.Scheme == "" {
|
||||
url.Scheme = "http"
|
||||
}
|
||||
config := api.Config{
|
||||
Address: url.String(),
|
||||
RoundTripper: roundTripper,
|
||||
}
|
||||
|
||||
if len(headers) > 0 {
|
||||
config.RoundTripper = promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) {
|
||||
for key, value := range headers {
|
||||
req.Header.Add(key, value)
|
||||
}
|
||||
return roundTripper.RoundTrip(req)
|
||||
})
|
||||
}
|
||||
|
||||
// Create new client.
|
||||
c, err := api.NewClient(config)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error creating API client:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
var stime, etime time.Time
|
||||
|
||||
if end == "" {
|
||||
etime = time.Now()
|
||||
} else {
|
||||
etime, err = parseTime(end)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error parsing end time:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
}
|
||||
|
||||
if start == "" {
|
||||
stime = etime.Add(-5 * time.Minute)
|
||||
} else {
|
||||
stime, err = parseTime(start)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error parsing start time:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
}
|
||||
|
||||
if !stime.Before(etime) {
|
||||
fmt.Fprintln(os.Stderr, "start time is not before end time")
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
if step == 0 {
|
||||
resolution := math.Max(math.Floor(etime.Sub(stime).Seconds()/250), 1)
|
||||
// Convert seconds to nanoseconds such that time.Duration parses correctly.
|
||||
step = time.Duration(resolution) * time.Second
|
||||
}
|
||||
|
||||
// Run query against client.
|
||||
api := v1.NewAPI(c)
|
||||
r := v1.Range{Start: stime, End: etime, Step: step}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
val, _, err := api.QueryRange(ctx, query, r) // Ignoring warnings for now.
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
return handleAPIError(err)
|
||||
}
|
||||
|
||||
p.printValue(val)
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
// QuerySeries queries for a series against a Prometheus server.
|
||||
func QuerySeries(url *url.URL, roundTripper http.RoundTripper, matchers []string, start, end string, p printer) int {
|
||||
if url.Scheme == "" {
|
||||
url.Scheme = "http"
|
||||
}
|
||||
config := api.Config{
|
||||
Address: url.String(),
|
||||
RoundTripper: roundTripper,
|
||||
}
|
||||
|
||||
// Create new client.
|
||||
c, err := api.NewClient(config)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error creating API client:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
stime, etime, err := parseStartTimeAndEndTime(start, end)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
// Run query against client.
|
||||
api := v1.NewAPI(c)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
val, _, err := api.Series(ctx, matchers, stime, etime) // Ignoring warnings for now.
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
return handleAPIError(err)
|
||||
}
|
||||
|
||||
p.printSeries(val)
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
// QueryLabels queries for label values against a Prometheus server.
|
||||
func QueryLabels(url *url.URL, roundTripper http.RoundTripper, matchers []string, name, start, end string, p printer) int {
|
||||
if url.Scheme == "" {
|
||||
url.Scheme = "http"
|
||||
}
|
||||
config := api.Config{
|
||||
Address: url.String(),
|
||||
RoundTripper: roundTripper,
|
||||
}
|
||||
|
||||
// Create new client.
|
||||
c, err := api.NewClient(config)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error creating API client:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
stime, etime, err := parseStartTimeAndEndTime(start, end)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
// Run query against client.
|
||||
api := v1.NewAPI(c)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
val, warn, err := api.LabelValues(ctx, name, matchers, stime, etime)
|
||||
cancel()
|
||||
|
||||
for _, v := range warn {
|
||||
fmt.Fprintln(os.Stderr, "query warning:", v)
|
||||
}
|
||||
if err != nil {
|
||||
return handleAPIError(err)
|
||||
}
|
||||
|
||||
p.printLabelValues(val)
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
func handleAPIError(err error) int {
|
||||
var apiErr *v1.Error
|
||||
if errors.As(err, &apiErr) && apiErr.Detail != "" {
|
||||
fmt.Fprintf(os.Stderr, "query error: %v (detail: %s)\n", apiErr, strings.TrimSpace(apiErr.Detail))
|
||||
} else {
|
||||
fmt.Fprintln(os.Stderr, "query error:", err)
|
||||
}
|
||||
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
func parseStartTimeAndEndTime(start, end string) (time.Time, time.Time, error) {
|
||||
var (
|
||||
minTime = time.Now().Add(-9999 * time.Hour)
|
||||
maxTime = time.Now().Add(9999 * time.Hour)
|
||||
err error
|
||||
)
|
||||
|
||||
stime := minTime
|
||||
etime := maxTime
|
||||
|
||||
if start != "" {
|
||||
stime, err = parseTime(start)
|
||||
if err != nil {
|
||||
return stime, etime, fmt.Errorf("error parsing start time: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if end != "" {
|
||||
etime, err = parseTime(end)
|
||||
if err != nil {
|
||||
return stime, etime, fmt.Errorf("error parsing end time: %w", err)
|
||||
}
|
||||
}
|
||||
return stime, etime, nil
|
||||
}
|
||||
|
||||
func parseTime(s string) (time.Time, error) {
|
||||
if t, err := strconv.ParseFloat(s, 64); err == nil {
|
||||
s, ns := math.Modf(t)
|
||||
return time.Unix(int64(s), int64(ns*float64(time.Second))).UTC(), nil
|
||||
}
|
||||
if t, err := time.Parse(time.RFC3339Nano, s); err == nil {
|
||||
return t, nil
|
||||
}
|
||||
return time.Time{}, fmt.Errorf("cannot parse %q to a valid timestamp", s)
|
||||
}
|
||||
|
||||
type endpointsGroup struct {
|
||||
urlToFilename map[string]string
|
||||
postProcess func(b []byte) ([]byte, error)
|
||||
|
@ -1390,15 +1161,12 @@ func importRules(url *url.URL, roundTripper http.RoundTripper, start, end, outpu
|
|||
evalInterval: evalInterval,
|
||||
maxBlockDuration: maxBlockDuration,
|
||||
}
|
||||
client, err := api.NewClient(api.Config{
|
||||
Address: url.String(),
|
||||
RoundTripper: roundTripper,
|
||||
})
|
||||
api, err := newAPI(url, roundTripper, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("new api client error: %w", err)
|
||||
}
|
||||
|
||||
ruleImporter := newRuleImporter(log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), cfg, v1.NewAPI(client))
|
||||
ruleImporter := newRuleImporter(log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), cfg, api)
|
||||
errs := ruleImporter.loadGroups(ctx, files)
|
||||
for _, err := range errs {
|
||||
if err != nil {
|
||||
|
|
251
cmd/promtool/query.go
Normal file
251
cmd/promtool/query.go
Normal file
|
@ -0,0 +1,251 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/api"
|
||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
||||
_ "github.com/prometheus/prometheus/plugins" // Register plugins.
|
||||
)
|
||||
|
||||
func newAPI(url *url.URL, roundTripper http.RoundTripper, headers map[string]string) (v1.API, error) {
|
||||
if url.Scheme == "" {
|
||||
url.Scheme = "http"
|
||||
}
|
||||
config := api.Config{
|
||||
Address: url.String(),
|
||||
RoundTripper: roundTripper,
|
||||
}
|
||||
|
||||
if len(headers) > 0 {
|
||||
config.RoundTripper = promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) {
|
||||
for key, value := range headers {
|
||||
req.Header.Add(key, value)
|
||||
}
|
||||
return roundTripper.RoundTrip(req)
|
||||
})
|
||||
}
|
||||
|
||||
// Create new client.
|
||||
client, err := api.NewClient(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
api := v1.NewAPI(client)
|
||||
return api, nil
|
||||
}
|
||||
|
||||
// QueryInstant performs an instant query against a Prometheus server.
|
||||
func QueryInstant(url *url.URL, roundTripper http.RoundTripper, query, evalTime string, p printer) int {
|
||||
api, err := newAPI(url, roundTripper, nil)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error creating API client:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
eTime := time.Now()
|
||||
if evalTime != "" {
|
||||
eTime, err = parseTime(evalTime)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error parsing evaluation time:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
}
|
||||
|
||||
// Run query against client.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
val, _, err := api.Query(ctx, query, eTime) // Ignoring warnings for now.
|
||||
cancel()
|
||||
if err != nil {
|
||||
return handleAPIError(err)
|
||||
}
|
||||
|
||||
p.printValue(val)
|
||||
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
// QueryRange performs a range query against a Prometheus server.
|
||||
func QueryRange(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, query, start, end string, step time.Duration, p printer) int {
|
||||
api, err := newAPI(url, roundTripper, headers)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error creating API client:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
var stime, etime time.Time
|
||||
|
||||
if end == "" {
|
||||
etime = time.Now()
|
||||
} else {
|
||||
etime, err = parseTime(end)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error parsing end time:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
}
|
||||
|
||||
if start == "" {
|
||||
stime = etime.Add(-5 * time.Minute)
|
||||
} else {
|
||||
stime, err = parseTime(start)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error parsing start time:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
}
|
||||
|
||||
if !stime.Before(etime) {
|
||||
fmt.Fprintln(os.Stderr, "start time is not before end time")
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
if step == 0 {
|
||||
resolution := math.Max(math.Floor(etime.Sub(stime).Seconds()/250), 1)
|
||||
// Convert seconds to nanoseconds such that time.Duration parses correctly.
|
||||
step = time.Duration(resolution) * time.Second
|
||||
}
|
||||
|
||||
// Run query against client.
|
||||
r := v1.Range{Start: stime, End: etime, Step: step}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
val, _, err := api.QueryRange(ctx, query, r) // Ignoring warnings for now.
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
return handleAPIError(err)
|
||||
}
|
||||
|
||||
p.printValue(val)
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
// QuerySeries queries for a series against a Prometheus server.
|
||||
func QuerySeries(url *url.URL, roundTripper http.RoundTripper, matchers []string, start, end string, p printer) int {
|
||||
api, err := newAPI(url, roundTripper, nil)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error creating API client:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
stime, etime, err := parseStartTimeAndEndTime(start, end)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
// Run query against client.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
val, _, err := api.Series(ctx, matchers, stime, etime) // Ignoring warnings for now.
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
return handleAPIError(err)
|
||||
}
|
||||
|
||||
p.printSeries(val)
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
// QueryLabels queries for label values against a Prometheus server.
|
||||
func QueryLabels(url *url.URL, roundTripper http.RoundTripper, matchers []string, name, start, end string, p printer) int {
|
||||
api, err := newAPI(url, roundTripper, nil)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error creating API client:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
stime, etime, err := parseStartTimeAndEndTime(start, end)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
// Run query against client.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
val, warn, err := api.LabelValues(ctx, name, matchers, stime, etime)
|
||||
cancel()
|
||||
|
||||
for _, v := range warn {
|
||||
fmt.Fprintln(os.Stderr, "query warning:", v)
|
||||
}
|
||||
if err != nil {
|
||||
return handleAPIError(err)
|
||||
}
|
||||
|
||||
p.printLabelValues(val)
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
func handleAPIError(err error) int {
|
||||
var apiErr *v1.Error
|
||||
if errors.As(err, &apiErr) && apiErr.Detail != "" {
|
||||
fmt.Fprintf(os.Stderr, "query error: %v (detail: %s)\n", apiErr, strings.TrimSpace(apiErr.Detail))
|
||||
} else {
|
||||
fmt.Fprintln(os.Stderr, "query error:", err)
|
||||
}
|
||||
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
func parseStartTimeAndEndTime(start, end string) (time.Time, time.Time, error) {
|
||||
var (
|
||||
minTime = time.Now().Add(-9999 * time.Hour)
|
||||
maxTime = time.Now().Add(9999 * time.Hour)
|
||||
err error
|
||||
)
|
||||
|
||||
stime := minTime
|
||||
etime := maxTime
|
||||
|
||||
if start != "" {
|
||||
stime, err = parseTime(start)
|
||||
if err != nil {
|
||||
return stime, etime, fmt.Errorf("error parsing start time: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if end != "" {
|
||||
etime, err = parseTime(end)
|
||||
if err != nil {
|
||||
return stime, etime, fmt.Errorf("error parsing end time: %w", err)
|
||||
}
|
||||
}
|
||||
return stime, etime, nil
|
||||
}
|
||||
|
||||
func parseTime(s string) (time.Time, error) {
|
||||
if t, err := strconv.ParseFloat(s, 64); err == nil {
|
||||
s, ns := math.Modf(t)
|
||||
return time.Unix(int64(s), int64(ns*float64(time.Second))).UTC(), nil
|
||||
}
|
||||
if t, err := time.Parse(time.RFC3339Nano, s); err == nil {
|
||||
return t, nil
|
||||
}
|
||||
return time.Time{}, fmt.Errorf("cannot parse %q to a valid timestamp", s)
|
||||
}
|
|
@ -78,12 +78,25 @@ func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefault
|
|||
defer cancel()
|
||||
|
||||
for _, cfg := range scrapeConfig.ServiceDiscoveryConfigs {
|
||||
d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{Logger: logger, Registerer: registerer})
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
err := metrics.Register()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Could not register service discovery metrics", err)
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{Logger: logger, Metrics: metrics})
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Could not create new discoverer", err)
|
||||
return failureExitCode
|
||||
}
|
||||
go d.Run(ctx, targetGroupChan)
|
||||
go func() {
|
||||
d.Run(ctx, targetGroupChan)
|
||||
metrics.Unregister()
|
||||
refreshMetrics.Unregister()
|
||||
}()
|
||||
}
|
||||
|
||||
var targetGroups []*targetgroup.Group
|
||||
|
|
15
cmd/promtool/testdata/dump-test-1.prom
vendored
Normal file
15
cmd/promtool/testdata/dump-test-1.prom
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
{__name__="heavy_metric", foo="bar"} 5 0
|
||||
{__name__="heavy_metric", foo="bar"} 4 60000
|
||||
{__name__="heavy_metric", foo="bar"} 3 120000
|
||||
{__name__="heavy_metric", foo="bar"} 2 180000
|
||||
{__name__="heavy_metric", foo="bar"} 1 240000
|
||||
{__name__="heavy_metric", foo="foo"} 5 0
|
||||
{__name__="heavy_metric", foo="foo"} 4 60000
|
||||
{__name__="heavy_metric", foo="foo"} 3 120000
|
||||
{__name__="heavy_metric", foo="foo"} 2 180000
|
||||
{__name__="heavy_metric", foo="foo"} 1 240000
|
||||
{__name__="metric", baz="abc", foo="bar"} 1 0
|
||||
{__name__="metric", baz="abc", foo="bar"} 2 60000
|
||||
{__name__="metric", baz="abc", foo="bar"} 3 120000
|
||||
{__name__="metric", baz="abc", foo="bar"} 4 180000
|
||||
{__name__="metric", baz="abc", foo="bar"} 5 240000
|
10
cmd/promtool/testdata/dump-test-2.prom
vendored
Normal file
10
cmd/promtool/testdata/dump-test-2.prom
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
{__name__="heavy_metric", foo="foo"} 5 0
|
||||
{__name__="heavy_metric", foo="foo"} 4 60000
|
||||
{__name__="heavy_metric", foo="foo"} 3 120000
|
||||
{__name__="heavy_metric", foo="foo"} 2 180000
|
||||
{__name__="heavy_metric", foo="foo"} 1 240000
|
||||
{__name__="metric", baz="abc", foo="bar"} 1 0
|
||||
{__name__="metric", baz="abc", foo="bar"} 2 60000
|
||||
{__name__="metric", baz="abc", foo="bar"} 3 120000
|
||||
{__name__="metric", baz="abc", foo="bar"} 4 180000
|
||||
{__name__="metric", baz="abc", foo="bar"} 5 240000
|
2
cmd/promtool/testdata/dump-test-3.prom
vendored
Normal file
2
cmd/promtool/testdata/dump-test-3.prom
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
{__name__="metric", baz="abc", foo="bar"} 2 60000
|
||||
{__name__="metric", baz="abc", foo="bar"} 3 120000
|
|
@ -667,7 +667,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
|
|||
it := fhchk.Iterator(nil)
|
||||
bucketCount := 0
|
||||
for it.Next() == chunkenc.ValFloatHistogram {
|
||||
_, f := it.AtFloatHistogram()
|
||||
_, f := it.AtFloatHistogram(nil)
|
||||
bucketCount += len(f.PositiveBuckets)
|
||||
bucketCount += len(f.NegativeBuckets)
|
||||
}
|
||||
|
@ -682,7 +682,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
|
|||
it := hchk.Iterator(nil)
|
||||
bucketCount := 0
|
||||
for it.Next() == chunkenc.ValHistogram {
|
||||
_, f := it.AtHistogram()
|
||||
_, f := it.AtHistogram(nil)
|
||||
bucketCount += len(f.PositiveBuckets)
|
||||
bucketCount += len(f.NegativeBuckets)
|
||||
}
|
||||
|
@ -706,7 +706,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
|
|||
return nil
|
||||
}
|
||||
|
||||
func dumpSamples(ctx context.Context, path string, mint, maxt int64, match string) (err error) {
|
||||
func dumpSamples(ctx context.Context, path string, mint, maxt int64, match []string) (err error) {
|
||||
db, err := tsdb.OpenDBReadOnly(path, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -720,11 +720,21 @@ func dumpSamples(ctx context.Context, path string, mint, maxt int64, match strin
|
|||
}
|
||||
defer q.Close()
|
||||
|
||||
matchers, err := parser.ParseMetricSelector(match)
|
||||
matcherSets, err := parser.ParseMetricSelectors(match)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ss := q.Select(ctx, false, nil, matchers...)
|
||||
|
||||
var ss storage.SeriesSet
|
||||
if len(matcherSets) > 1 {
|
||||
var sets []storage.SeriesSet
|
||||
for _, mset := range matcherSets {
|
||||
sets = append(sets, q.Select(ctx, true, nil, mset...))
|
||||
}
|
||||
ss = storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge)
|
||||
} else {
|
||||
ss = q.Select(ctx, false, nil, matcherSets[0]...)
|
||||
}
|
||||
|
||||
for ss.Next() {
|
||||
series := ss.At()
|
||||
|
@ -735,11 +745,11 @@ func dumpSamples(ctx context.Context, path string, mint, maxt int64, match strin
|
|||
fmt.Printf("%s %g %d\n", lbs, val, ts)
|
||||
}
|
||||
for it.Next() == chunkenc.ValFloatHistogram {
|
||||
ts, fh := it.AtFloatHistogram()
|
||||
ts, fh := it.AtFloatHistogram(nil)
|
||||
fmt.Printf("%s %s %d\n", lbs, fh.String(), ts)
|
||||
}
|
||||
for it.Next() == chunkenc.ValHistogram {
|
||||
ts, h := it.AtHistogram()
|
||||
ts, h := it.AtHistogram(nil)
|
||||
fmt.Printf("%s %s %d\n", lbs, h.String(), ts)
|
||||
}
|
||||
if it.Err() != nil {
|
||||
|
|
|
@ -14,9 +14,18 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
)
|
||||
|
||||
func TestGenerateBucket(t *testing.T) {
|
||||
|
@ -41,3 +50,101 @@ func TestGenerateBucket(t *testing.T) {
|
|||
require.Equal(t, tc.step, step)
|
||||
}
|
||||
}
|
||||
|
||||
// getDumpedSamples dumps samples and returns them.
|
||||
func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []string) string {
|
||||
t.Helper()
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
err := dumpSamples(
|
||||
context.Background(),
|
||||
path,
|
||||
mint,
|
||||
maxt,
|
||||
match,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
w.Close()
|
||||
os.Stdout = oldStdout
|
||||
|
||||
var buf bytes.Buffer
|
||||
io.Copy(&buf, r)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func TestTSDBDump(t *testing.T) {
|
||||
storage := promql.LoadedStorage(t, `
|
||||
load 1m
|
||||
metric{foo="bar", baz="abc"} 1 2 3 4 5
|
||||
heavy_metric{foo="bar"} 5 4 3 2 1
|
||||
heavy_metric{foo="foo"} 5 4 3 2 1
|
||||
`)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
mint int64
|
||||
maxt int64
|
||||
match []string
|
||||
expectedDump string
|
||||
}{
|
||||
{
|
||||
name: "default match",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__=~'(?s:.*)'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
},
|
||||
{
|
||||
name: "same matcher twice",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{foo=~'.+'}", "{foo=~'.+'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
},
|
||||
{
|
||||
name: "no duplication",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__=~'(?s:.*)'}", "{baz='abc'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
},
|
||||
{
|
||||
name: "well merged",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__='heavy_metric'}", "{baz='abc'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
},
|
||||
{
|
||||
name: "multi matchers",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__='heavy_metric',foo='foo'}", "{__name__='metric'}"},
|
||||
expectedDump: "testdata/dump-test-2.prom",
|
||||
},
|
||||
{
|
||||
name: "with reduced mint and maxt",
|
||||
mint: int64(60000),
|
||||
maxt: int64(120000),
|
||||
match: []string{"{__name__='metric'}"},
|
||||
expectedDump: "testdata/dump-test-3.prom",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.mint, tt.maxt, tt.match)
|
||||
expectedMetrics, err := os.ReadFile(tt.expectedDump)
|
||||
require.NoError(t, err)
|
||||
if strings.Contains(runtime.GOOS, "windows") {
|
||||
// We use "/n" while dumping on windows as well.
|
||||
expectedMetrics = bytes.ReplaceAll(expectedMetrics, []byte("\r\n"), []byte("\n"))
|
||||
}
|
||||
// even though in case of one matcher samples are not sorted, the order in the cases above should stay the same.
|
||||
require.Equal(t, string(expectedMetrics), dumpedMetrics)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
@ -27,6 +28,7 @@ import (
|
|||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/grafana/regexp"
|
||||
"github.com/nsf/jsondiff"
|
||||
"github.com/prometheus/common/model"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
|
@ -40,7 +42,7 @@ import (
|
|||
|
||||
// RulesUnitTest does unit testing of rules based on the unit testing files provided.
|
||||
// More info about the file format can be found in the docs.
|
||||
func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, files ...string) int {
|
||||
func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
|
||||
failed := false
|
||||
|
||||
var run *regexp.Regexp
|
||||
|
@ -49,7 +51,7 @@ func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, files .
|
|||
}
|
||||
|
||||
for _, f := range files {
|
||||
if errs := ruleUnitTest(f, queryOpts, run); errs != nil {
|
||||
if errs := ruleUnitTest(f, queryOpts, run, diffFlag); errs != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||
for _, e := range errs {
|
||||
fmt.Fprintln(os.Stderr, e.Error())
|
||||
|
@ -67,7 +69,7 @@ func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, files .
|
|||
return successExitCode
|
||||
}
|
||||
|
||||
func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts, run *regexp.Regexp) []error {
|
||||
func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error {
|
||||
fmt.Println("Unit Testing: ", filename)
|
||||
|
||||
b, err := os.ReadFile(filename)
|
||||
|
@ -109,7 +111,7 @@ func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts, run *regexp.
|
|||
if t.Interval == 0 {
|
||||
t.Interval = unitTestInp.EvaluationInterval
|
||||
}
|
||||
ers := t.test(evalInterval, groupOrderMap, queryOpts, unitTestInp.RuleFiles...)
|
||||
ers := t.test(evalInterval, groupOrderMap, queryOpts, diffFlag, unitTestInp.RuleFiles...)
|
||||
if ers != nil {
|
||||
errs = append(errs, ers...)
|
||||
}
|
||||
|
@ -173,7 +175,7 @@ type testGroup struct {
|
|||
}
|
||||
|
||||
// test performs the unit tests.
|
||||
func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promql.LazyLoaderOpts, ruleFiles ...string) []error {
|
||||
func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promql.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) []error {
|
||||
// Setup testing suite.
|
||||
suite, err := promql.NewLazyLoader(nil, tg.seriesLoadingString(), queryOpts)
|
||||
if err != nil {
|
||||
|
@ -345,8 +347,44 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
|
|||
}
|
||||
expString := indentLines(expAlerts.String(), " ")
|
||||
gotString := indentLines(gotAlerts.String(), " ")
|
||||
errs = append(errs, fmt.Errorf("%s alertname: %s, time: %s, \n exp:%v, \n got:%v",
|
||||
testName, testcase.Alertname, testcase.EvalTime.String(), expString, gotString))
|
||||
if diffFlag {
|
||||
// If empty, populates an empty value
|
||||
if gotAlerts.Len() == 0 {
|
||||
gotAlerts = append(gotAlerts, labelAndAnnotation{
|
||||
Labels: labels.Labels{},
|
||||
Annotations: labels.Labels{},
|
||||
})
|
||||
}
|
||||
// If empty, populates an empty value
|
||||
if expAlerts.Len() == 0 {
|
||||
expAlerts = append(expAlerts, labelAndAnnotation{
|
||||
Labels: labels.Labels{},
|
||||
Annotations: labels.Labels{},
|
||||
})
|
||||
}
|
||||
|
||||
diffOpts := jsondiff.DefaultConsoleOptions()
|
||||
expAlertsJSON, err := json.Marshal(expAlerts)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("error marshaling expected %s alert: [%s]", tg.TestGroupName, err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
gotAlertsJSON, err := json.Marshal(gotAlerts)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("error marshaling received %s alert: [%s]", tg.TestGroupName, err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
res, diff := jsondiff.Compare(expAlertsJSON, gotAlertsJSON, &diffOpts)
|
||||
if res != jsondiff.FullMatch {
|
||||
errs = append(errs, fmt.Errorf("%s alertname: %s, time: %s, \n diff: %v",
|
||||
testName, testcase.Alertname, testcase.EvalTime.String(), indentLines(diff, " ")))
|
||||
}
|
||||
} else {
|
||||
errs = append(errs, fmt.Errorf("%s alertname: %s, time: %s, \n exp:%v, \n got:%v",
|
||||
testName, testcase.Alertname, testcase.EvalTime.String(), expString, gotString))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@ func TestRulesUnitTest(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := RulesUnitTest(tt.queryOpts, nil, tt.args.files...); got != tt.want {
|
||||
if got := RulesUnitTest(tt.queryOpts, nil, false, tt.args.files...); got != tt.want {
|
||||
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
|
@ -178,7 +178,7 @@ func TestRulesUnitTestRun(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := RulesUnitTest(tt.queryOpts, tt.args.run, tt.args.files...); got != tt.want {
|
||||
if got := RulesUnitTest(tt.queryOpts, tt.args.run, false, tt.args.files...); got != tt.want {
|
||||
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
|
|
|
@ -610,9 +610,12 @@ type ScrapeConfig struct {
|
|||
// More than this label value length post metric-relabeling will cause the
|
||||
// scrape to fail. 0 means no limit.
|
||||
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
|
||||
// More than this many buckets in a native histogram will cause the scrape to
|
||||
// fail.
|
||||
// If there are more than this many buckets in a native histogram,
|
||||
// buckets will be merged to stay within the limit.
|
||||
NativeHistogramBucketLimit uint `yaml:"native_histogram_bucket_limit,omitempty"`
|
||||
// If the growth factor of one bucket to the next is smaller than this,
|
||||
// buckets will be merged to increase the factor sufficiently.
|
||||
NativeHistogramMinBucketFactor float64 `yaml:"native_histogram_min_bucket_factor,omitempty"`
|
||||
// Keep no more than this many dropped targets per job.
|
||||
// 0 means no limit.
|
||||
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
|
||||
|
@ -1124,6 +1127,9 @@ type QueueConfig struct {
|
|||
MinBackoff model.Duration `yaml:"min_backoff,omitempty"`
|
||||
MaxBackoff model.Duration `yaml:"max_backoff,omitempty"`
|
||||
RetryOnRateLimit bool `yaml:"retry_on_http_429,omitempty"`
|
||||
|
||||
// Samples older than the limit will be dropped.
|
||||
SampleAgeLimit model.Duration `yaml:"sample_age_limit,omitempty"`
|
||||
}
|
||||
|
||||
// MetadataConfig is the configuration for sending metadata to remote
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
// limitations under the License.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package config
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#cpuGraph"),
|
||||
expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle'}[5m]))",
|
||||
expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle',mode!='iowait',mode!='steal'}[5m]))",
|
||||
renderer: 'area',
|
||||
max: {{ with printf "count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
|
|
|
@ -69,7 +69,7 @@
|
|||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#cpuGraph"),
|
||||
expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle'}[5m]))",
|
||||
expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle',mode!='iowait',mode!='steal'}[5m]))",
|
||||
renderer: 'area',
|
||||
max: {{ with printf "count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
<tr>
|
||||
<td><a href="node-overview.html?instance={{ .Labels.instance }}">{{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Labels.instance }}</a></td>
|
||||
<td{{ if eq (. | value) 1.0 }}>Yes{{ else }} class="alert-danger">No{{ end }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "100 * (1 - avg by(instance)(irate(node_cpu_seconds_total{job='node',mode='idle',instance='%s'}[5m])))" .Labels.instance) "%" "printf.1f") }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "100 * (1 - avg by(instance) (sum without(mode) (irate(node_cpu_seconds_total{job='node',mode=~'idle|iowait|steal',instance='%s'}[5m]))))" .Labels.instance) "%" "printf.1f") }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'} + node_memory_Cached_bytes{job='node',instance='%s'} + node_memory_Buffers_bytes{job='node',instance='%s'}" .Labels.instance .Labels.instance .Labels.instance) "B" "humanize1024") }}</td>
|
||||
</tr>
|
||||
{{ else }}
|
||||
|
|
|
@ -97,12 +97,19 @@ type EC2SDConfig struct {
|
|||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*EC2SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return &ec2Metrics{
|
||||
refreshMetrics: rmi,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the EC2 Config.
|
||||
func (*EC2SDConfig) Name() string { return "ec2" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the EC2 Config.
|
||||
func (c *EC2SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewEC2Discovery(c, opts.Logger, opts.Registerer), nil
|
||||
return NewEC2Discovery(c, opts.Logger, opts.Metrics)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface for the EC2 Config.
|
||||
|
@ -148,7 +155,12 @@ type EC2Discovery struct {
|
|||
}
|
||||
|
||||
// NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets.
|
||||
func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, reg prometheus.Registerer) *EC2Discovery {
|
||||
func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) {
|
||||
m, ok := metrics.(*ec2Metrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
@ -158,14 +170,14 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, reg prometheus.Regist
|
|||
}
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: logger,
|
||||
Mech: "ec2",
|
||||
Interval: time.Duration(d.cfg.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
Registry: reg,
|
||||
Logger: logger,
|
||||
Mech: "ec2",
|
||||
Interval: time.Duration(d.cfg.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
)
|
||||
return d
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) {
|
||||
|
|
|
@ -80,12 +80,19 @@ type LightsailSDConfig struct {
|
|||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*LightsailSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return &lightsailMetrics{
|
||||
refreshMetrics: rmi,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the Lightsail Config.
|
||||
func (*LightsailSDConfig) Name() string { return "lightsail" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Lightsail Config.
|
||||
func (c *LightsailSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewLightsailDiscovery(c, opts.Logger, opts.Registerer), nil
|
||||
return NewLightsailDiscovery(c, opts.Logger, opts.Metrics)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface for the Lightsail Config.
|
||||
|
@ -122,23 +129,29 @@ type LightsailDiscovery struct {
|
|||
}
|
||||
|
||||
// NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets.
|
||||
func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger, reg prometheus.Registerer) *LightsailDiscovery {
|
||||
func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) {
|
||||
m, ok := metrics.(*lightsailMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
||||
d := &LightsailDiscovery{
|
||||
cfg: conf,
|
||||
}
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: logger,
|
||||
Mech: "lightsail",
|
||||
Interval: time.Duration(d.cfg.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
Registry: reg,
|
||||
Logger: logger,
|
||||
Mech: "lightsail",
|
||||
Interval: time.Duration(d.cfg.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
)
|
||||
return d
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (d *LightsailDiscovery) lightsailClient() (*lightsail.Lightsail, error) {
|
||||
|
|
32
discovery/aws/metrics_ec2.go
Normal file
32
discovery/aws/metrics_ec2.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
type ec2Metrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
}
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*ec2Metrics)(nil)
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *ec2Metrics) Register() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *ec2Metrics) Unregister() {}
|
32
discovery/aws/metrics_lightsail.go
Normal file
32
discovery/aws/metrics_lightsail.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
type lightsailMetrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
}
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*lightsailMetrics)(nil)
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *lightsailMetrics) Register() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *lightsailMetrics) Unregister() {}
|
|
@ -120,12 +120,17 @@ type SDConfig struct {
|
|||
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return newDiscovererMetrics(reg, rmi)
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "azure" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||
return NewDiscovery(c, opts.Logger, opts.Metrics)
|
||||
}
|
||||
|
||||
func validateAuthParam(param, name string) error {
|
||||
|
@ -168,45 +173,39 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
|
||||
type Discovery struct {
|
||||
*refresh.Discovery
|
||||
logger log.Logger
|
||||
cfg *SDConfig
|
||||
port int
|
||||
cache *cache.Cache[string, *armnetwork.Interface]
|
||||
failuresCount prometheus.Counter
|
||||
cacheHitCount prometheus.Counter
|
||||
logger log.Logger
|
||||
cfg *SDConfig
|
||||
port int
|
||||
cache *cache.Cache[string, *armnetwork.Interface]
|
||||
metrics *azureMetrics
|
||||
}
|
||||
|
||||
// NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets.
|
||||
func NewDiscovery(cfg *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||
func NewDiscovery(cfg *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*azureMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
l := cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5000)))
|
||||
d := &Discovery{
|
||||
cfg: cfg,
|
||||
port: cfg.Port,
|
||||
logger: logger,
|
||||
cache: l,
|
||||
failuresCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_azure_failures_total",
|
||||
Help: "Number of Azure service discovery refresh failures.",
|
||||
}),
|
||||
cacheHitCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_azure_cache_hit_total",
|
||||
Help: "Number of cache hit during refresh.",
|
||||
}),
|
||||
cfg: cfg,
|
||||
port: cfg.Port,
|
||||
logger: logger,
|
||||
cache: l,
|
||||
metrics: m,
|
||||
}
|
||||
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: logger,
|
||||
Mech: "azure",
|
||||
Interval: time.Duration(cfg.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
Registry: reg,
|
||||
Metrics: []prometheus.Collector{d.failuresCount, d.cacheHitCount},
|
||||
Logger: logger,
|
||||
Mech: "azure",
|
||||
Interval: time.Duration(cfg.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -333,14 +332,14 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
|
||||
client, err := createAzureClient(*d.cfg)
|
||||
if err != nil {
|
||||
d.failuresCount.Inc()
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, fmt.Errorf("could not create Azure client: %w", err)
|
||||
}
|
||||
client.logger = d.logger
|
||||
|
||||
machines, err := client.getVMs(ctx, d.cfg.ResourceGroup)
|
||||
if err != nil {
|
||||
d.failuresCount.Inc()
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, fmt.Errorf("could not get virtual machines: %w", err)
|
||||
}
|
||||
|
||||
|
@ -349,14 +348,14 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
// Load the vms managed by scale sets.
|
||||
scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup)
|
||||
if err != nil {
|
||||
d.failuresCount.Inc()
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, fmt.Errorf("could not get virtual machine scale sets: %w", err)
|
||||
}
|
||||
|
||||
for _, scaleSet := range scaleSets {
|
||||
scaleSetVms, err := client.getScaleSetVMs(ctx, scaleSet)
|
||||
if err != nil {
|
||||
d.failuresCount.Inc()
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, fmt.Errorf("could not get virtual machine scale set vms: %w", err)
|
||||
}
|
||||
machines = append(machines, scaleSetVms...)
|
||||
|
@ -407,7 +406,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
var networkInterface *armnetwork.Interface
|
||||
if v, ok := d.getFromCache(nicID); ok {
|
||||
networkInterface = v
|
||||
d.cacheHitCount.Add(1)
|
||||
d.metrics.cacheHitCount.Add(1)
|
||||
} else {
|
||||
if vm.ScaleSet == "" {
|
||||
networkInterface, err = client.getVMNetworkInterfaceByID(ctx, nicID)
|
||||
|
@ -420,10 +419,20 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
} else {
|
||||
ch <- target{labelSet: nil, err: err}
|
||||
}
|
||||
// Get out of this routine because we cannot continue without a network interface.
|
||||
return
|
||||
d.addToCache(nicID, networkInterface)
|
||||
} else {
|
||||
networkInterface, err = client.getVMScaleSetVMNetworkInterfaceByID(ctx, nicID, vm.ScaleSet, vm.InstanceID)
|
||||
if err != nil {
|
||||
if errors.Is(err, errorNotFound) {
|
||||
level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err)
|
||||
} else {
|
||||
ch <- target{labelSet: nil, err: err}
|
||||
}
|
||||
// Get out of this routine because we cannot continue without a network interface.
|
||||
return
|
||||
}
|
||||
d.addToCache(nicID, networkInterface)
|
||||
}
|
||||
d.addToCache(nicID, networkInterface)
|
||||
}
|
||||
|
||||
if networkInterface.Properties == nil {
|
||||
|
@ -470,7 +479,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
var tg targetgroup.Group
|
||||
for tgt := range ch {
|
||||
if tgt.err != nil {
|
||||
d.failuresCount.Inc()
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, fmt.Errorf("unable to complete Azure service discovery: %w", tgt.err)
|
||||
}
|
||||
if tgt.labelSet != nil {
|
||||
|
|
64
discovery/azure/metrics.go
Normal file
64
discovery/azure/metrics.go
Normal file
|
@ -0,0 +1,64 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*azureMetrics)(nil)
|
||||
|
||||
type azureMetrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
|
||||
failuresCount prometheus.Counter
|
||||
cacheHitCount prometheus.Counter
|
||||
|
||||
metricRegisterer discovery.MetricRegisterer
|
||||
}
|
||||
|
||||
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
m := &azureMetrics{
|
||||
refreshMetrics: rmi,
|
||||
failuresCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_azure_failures_total",
|
||||
Help: "Number of Azure service discovery refresh failures.",
|
||||
}),
|
||||
cacheHitCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_azure_cache_hit_total",
|
||||
Help: "Number of cache hit during refresh.",
|
||||
}),
|
||||
}
|
||||
|
||||
m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
|
||||
m.failuresCount,
|
||||
m.cacheHitCount,
|
||||
})
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *azureMetrics) Register() error {
|
||||
return m.metricRegisterer.RegisterMetrics()
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *azureMetrics) Unregister() {
|
||||
m.metricRegisterer.UnregisterMetrics()
|
||||
}
|
|
@ -119,12 +119,17 @@ type SDConfig struct {
|
|||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return newDiscovererMetrics(reg, rmi)
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "consul" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||
return NewDiscovery(c, opts.Logger, opts.Metrics)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -161,27 +166,28 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
// Discovery retrieves target information from a Consul server
|
||||
// and updates them via watches.
|
||||
type Discovery struct {
|
||||
client *consul.Client
|
||||
clientDatacenter string
|
||||
clientNamespace string
|
||||
clientPartition string
|
||||
tagSeparator string
|
||||
watchedServices []string // Set of services which will be discovered.
|
||||
watchedTags []string // Tags used to filter instances of a service.
|
||||
watchedNodeMeta map[string]string
|
||||
allowStale bool
|
||||
refreshInterval time.Duration
|
||||
finalizer func()
|
||||
logger log.Logger
|
||||
rpcFailuresCount prometheus.Counter
|
||||
rpcDuration *prometheus.SummaryVec
|
||||
servicesRPCDuration prometheus.Observer
|
||||
serviceRPCDuration prometheus.Observer
|
||||
metricRegisterer discovery.MetricRegisterer
|
||||
client *consul.Client
|
||||
clientDatacenter string
|
||||
clientNamespace string
|
||||
clientPartition string
|
||||
tagSeparator string
|
||||
watchedServices []string // Set of services which will be discovered.
|
||||
watchedTags []string // Tags used to filter instances of a service.
|
||||
watchedNodeMeta map[string]string
|
||||
allowStale bool
|
||||
refreshInterval time.Duration
|
||||
finalizer func()
|
||||
logger log.Logger
|
||||
metrics *consulMetrics
|
||||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery for the given config.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*consulMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
@ -219,35 +225,9 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer)
|
|||
clientPartition: conf.Partition,
|
||||
finalizer: wrapper.CloseIdleConnections,
|
||||
logger: logger,
|
||||
rpcFailuresCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "sd_consul_rpc_failures_total",
|
||||
Help: "The number of Consul RPC call failures.",
|
||||
}),
|
||||
rpcDuration: prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Namespace: namespace,
|
||||
Name: "sd_consul_rpc_duration_seconds",
|
||||
Help: "The duration of a Consul RPC call in seconds.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
},
|
||||
[]string{"endpoint", "call"},
|
||||
),
|
||||
metrics: m,
|
||||
}
|
||||
|
||||
cd.metricRegisterer = discovery.NewMetricRegisterer(
|
||||
reg,
|
||||
[]prometheus.Collector{
|
||||
cd.rpcFailuresCount,
|
||||
cd.rpcDuration,
|
||||
},
|
||||
)
|
||||
|
||||
// Initialize metric vectors.
|
||||
cd.servicesRPCDuration = cd.rpcDuration.WithLabelValues("catalog", "services")
|
||||
cd.serviceRPCDuration = cd.rpcDuration.WithLabelValues("catalog", "service")
|
||||
|
||||
return cd, nil
|
||||
}
|
||||
|
||||
|
@ -303,7 +283,7 @@ func (d *Discovery) getDatacenter() error {
|
|||
info, err := d.client.Agent().Self()
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err)
|
||||
d.rpcFailuresCount.Inc()
|
||||
d.metrics.rpcFailuresCount.Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -344,13 +324,6 @@ func (d *Discovery) initialize(ctx context.Context) {
|
|||
|
||||
// Run implements the Discoverer interface.
|
||||
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
err := d.metricRegisterer.RegisterMetrics()
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error())
|
||||
return
|
||||
}
|
||||
defer d.metricRegisterer.UnregisterMetrics()
|
||||
|
||||
if d.finalizer != nil {
|
||||
defer d.finalizer()
|
||||
}
|
||||
|
@ -399,7 +372,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
|
|||
t0 := time.Now()
|
||||
srvs, meta, err := catalog.Services(opts.WithContext(ctx))
|
||||
elapsed := time.Since(t0)
|
||||
d.servicesRPCDuration.Observe(elapsed.Seconds())
|
||||
d.metrics.servicesRPCDuration.Observe(elapsed.Seconds())
|
||||
|
||||
// Check the context before in order to exit early.
|
||||
select {
|
||||
|
@ -410,7 +383,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
|
|||
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error refreshing service list", "err", err)
|
||||
d.rpcFailuresCount.Inc()
|
||||
d.metrics.rpcFailuresCount.Inc()
|
||||
time.Sleep(retryInterval)
|
||||
return
|
||||
}
|
||||
|
@ -490,8 +463,8 @@ func (d *Discovery) watchService(ctx context.Context, ch chan<- []*targetgroup.G
|
|||
},
|
||||
tagSeparator: d.tagSeparator,
|
||||
logger: d.logger,
|
||||
rpcFailuresCount: d.rpcFailuresCount,
|
||||
serviceRPCDuration: d.serviceRPCDuration,
|
||||
rpcFailuresCount: d.metrics.rpcFailuresCount,
|
||||
serviceRPCDuration: d.metrics.serviceRPCDuration,
|
||||
}
|
||||
|
||||
go func() {
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"go.uber.org/goleak"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
|
@ -36,11 +37,25 @@ func TestMain(m *testing.M) {
|
|||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
// TODO: Add ability to unregister metrics?
|
||||
func NewTestMetrics(t *testing.T, conf discovery.Config, reg prometheus.Registerer) discovery.DiscovererMetrics {
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
require.NoError(t, refreshMetrics.Register())
|
||||
|
||||
metrics := conf.NewDiscovererMetrics(prometheus.NewRegistry(), refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
|
||||
return metrics
|
||||
}
|
||||
|
||||
func TestConfiguredService(t *testing.T) {
|
||||
conf := &SDConfig{
|
||||
Services: []string{"configuredServiceName"},
|
||||
}
|
||||
consulDiscovery, err := NewDiscovery(conf, nil, prometheus.NewRegistry())
|
||||
|
||||
metrics := NewTestMetrics(t, conf, prometheus.NewRegistry())
|
||||
|
||||
consulDiscovery, err := NewDiscovery(conf, nil, metrics)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error when initializing discovery %v", err)
|
||||
}
|
||||
|
@ -57,7 +72,10 @@ func TestConfiguredServiceWithTag(t *testing.T) {
|
|||
Services: []string{"configuredServiceName"},
|
||||
ServiceTags: []string{"http"},
|
||||
}
|
||||
consulDiscovery, err := NewDiscovery(conf, nil, prometheus.NewRegistry())
|
||||
|
||||
metrics := NewTestMetrics(t, conf, prometheus.NewRegistry())
|
||||
|
||||
consulDiscovery, err := NewDiscovery(conf, nil, metrics)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error when initializing discovery %v", err)
|
||||
}
|
||||
|
@ -152,7 +170,9 @@ func TestConfiguredServiceWithTags(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
consulDiscovery, err := NewDiscovery(tc.conf, nil, prometheus.NewRegistry())
|
||||
metrics := NewTestMetrics(t, tc.conf, prometheus.NewRegistry())
|
||||
|
||||
consulDiscovery, err := NewDiscovery(tc.conf, nil, metrics)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error when initializing discovery %v", err)
|
||||
}
|
||||
|
@ -160,13 +180,15 @@ func TestConfiguredServiceWithTags(t *testing.T) {
|
|||
if ret != tc.shouldWatch {
|
||||
t.Errorf("Expected should watch? %t, got %t. Watched service and tags: %s %+v, input was %s %+v", tc.shouldWatch, ret, tc.conf.Services, tc.conf.ServiceTags, tc.serviceName, tc.serviceTags)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestNonConfiguredService(t *testing.T) {
|
||||
conf := &SDConfig{}
|
||||
consulDiscovery, err := NewDiscovery(conf, nil, prometheus.NewRegistry())
|
||||
|
||||
metrics := NewTestMetrics(t, conf, prometheus.NewRegistry())
|
||||
|
||||
consulDiscovery, err := NewDiscovery(conf, nil, metrics)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error when initializing discovery %v", err)
|
||||
}
|
||||
|
@ -263,7 +285,10 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) {
|
|||
|
||||
func newDiscovery(t *testing.T, config *SDConfig) *Discovery {
|
||||
logger := log.NewNopLogger()
|
||||
d, err := NewDiscovery(config, logger, prometheus.NewRegistry())
|
||||
|
||||
metrics := NewTestMetrics(t, config, prometheus.NewRegistry())
|
||||
|
||||
d, err := NewDiscovery(config, logger, metrics)
|
||||
require.NoError(t, err)
|
||||
return d
|
||||
}
|
||||
|
|
73
discovery/consul/metrics.go
Normal file
73
discovery/consul/metrics.go
Normal file
|
@ -0,0 +1,73 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package consul
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*consulMetrics)(nil)
|
||||
|
||||
type consulMetrics struct {
|
||||
rpcFailuresCount prometheus.Counter
|
||||
rpcDuration *prometheus.SummaryVec
|
||||
|
||||
servicesRPCDuration prometheus.Observer
|
||||
serviceRPCDuration prometheus.Observer
|
||||
|
||||
metricRegisterer discovery.MetricRegisterer
|
||||
}
|
||||
|
||||
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
m := &consulMetrics{
|
||||
rpcFailuresCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "sd_consul_rpc_failures_total",
|
||||
Help: "The number of Consul RPC call failures.",
|
||||
}),
|
||||
rpcDuration: prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Namespace: namespace,
|
||||
Name: "sd_consul_rpc_duration_seconds",
|
||||
Help: "The duration of a Consul RPC call in seconds.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
},
|
||||
[]string{"endpoint", "call"},
|
||||
),
|
||||
}
|
||||
|
||||
m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
|
||||
m.rpcFailuresCount,
|
||||
m.rpcDuration,
|
||||
})
|
||||
|
||||
// Initialize metric vectors.
|
||||
m.servicesRPCDuration = m.rpcDuration.WithLabelValues("catalog", "services")
|
||||
m.serviceRPCDuration = m.rpcDuration.WithLabelValues("catalog", "service")
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *consulMetrics) Register() error {
|
||||
return m.metricRegisterer.RegisterMetrics()
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *consulMetrics) Unregister() {
|
||||
m.metricRegisterer.UnregisterMetrics()
|
||||
}
|
|
@ -63,6 +63,13 @@ func init() {
|
|||
discovery.RegisterConfig(&SDConfig{})
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return &digitaloceanMetrics{
|
||||
refreshMetrics: rmi,
|
||||
}
|
||||
}
|
||||
|
||||
// SDConfig is the configuration for DigitalOcean based service discovery.
|
||||
type SDConfig struct {
|
||||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
|
@ -76,7 +83,7 @@ func (*SDConfig) Name() string { return "digitalocean" }
|
|||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||
return NewDiscovery(c, opts.Logger, opts.Metrics)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -104,7 +111,12 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*digitaloceanMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
port: conf.Port,
|
||||
}
|
||||
|
@ -127,11 +139,11 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer)
|
|||
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: logger,
|
||||
Mech: "digitalocean",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
Registry: reg,
|
||||
Logger: logger,
|
||||
Mech: "digitalocean",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
)
|
||||
return d, nil
|
||||
|
|
|
@ -23,6 +23,8 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
type DigitalOceanSDTestSuite struct {
|
||||
|
@ -47,7 +49,15 @@ func TestDigitalOceanSDRefresh(t *testing.T) {
|
|||
|
||||
cfg := DefaultSDConfig
|
||||
cfg.HTTPClientConfig.BearerToken = tokenID
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
endpoint, err := url.Parse(sdmock.Mock.Endpoint())
|
||||
require.NoError(t, err)
|
||||
|
|
32
discovery/digitalocean/metrics.go
Normal file
32
discovery/digitalocean/metrics.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package digitalocean
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*digitaloceanMetrics)(nil)
|
||||
|
||||
type digitaloceanMetrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *digitaloceanMetrics) Register() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *digitaloceanMetrics) Unregister() {}
|
28
discovery/discoverer_metrics_noop.go
Normal file
28
discovery/discoverer_metrics_noop.go
Normal file
|
@ -0,0 +1,28 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package discovery
|
||||
|
||||
// Create a dummy metrics struct, because this SD doesn't have any metrics.
|
||||
type NoopDiscovererMetrics struct{}
|
||||
|
||||
var _ DiscovererMetrics = (*NoopDiscovererMetrics)(nil)
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (*NoopDiscovererMetrics) Register() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (*NoopDiscovererMetrics) Unregister() {
|
||||
}
|
|
@ -39,24 +39,47 @@ type Discoverer interface {
|
|||
Run(ctx context.Context, up chan<- []*targetgroup.Group)
|
||||
}
|
||||
|
||||
// Internal metrics of service discovery mechanisms.
|
||||
type DiscovererMetrics interface {
|
||||
Register() error
|
||||
Unregister()
|
||||
}
|
||||
|
||||
// DiscovererOptions provides options for a Discoverer.
|
||||
type DiscovererOptions struct {
|
||||
Logger log.Logger
|
||||
|
||||
// A registerer for the Discoverer's metrics.
|
||||
// Some Discoverers may ignore this registerer and use the global one instead.
|
||||
// For now this will work, because the Prometheus `main` function uses the global registry.
|
||||
// However, in the future the Prometheus `main` function will be updated to not use the global registry.
|
||||
// Hence, if a discoverer wants its metrics to be visible via the Prometheus executable's
|
||||
// `/metrics` endpoint, it should use this explicit registerer.
|
||||
// TODO(ptodev): Update this comment once the Prometheus `main` function does not use the global registry.
|
||||
Registerer prometheus.Registerer
|
||||
Metrics DiscovererMetrics
|
||||
|
||||
// Extra HTTP client options to expose to Discoverers. This field may be
|
||||
// ignored; Discoverer implementations must opt-in to reading it.
|
||||
HTTPClientOptions []config.HTTPClientOption
|
||||
}
|
||||
|
||||
// Metrics used by the "refresh" package.
|
||||
// We define them here in the "discovery" package in order to avoid a cyclic dependency between
|
||||
// "discovery" and "refresh".
|
||||
type RefreshMetrics struct {
|
||||
Failures prometheus.Counter
|
||||
Duration prometheus.Observer
|
||||
}
|
||||
|
||||
// Instantiate the metrics used by the "refresh" package.
|
||||
type RefreshMetricsInstantiator interface {
|
||||
Instantiate(mech string) *RefreshMetrics
|
||||
}
|
||||
|
||||
// An interface for registering, unregistering, and instantiating metrics for the "refresh" package.
|
||||
// Refresh metrics are registered and unregistered outside of the service discovery mechanism.
|
||||
// This is so that the same metrics can be reused across different service discovery mechanisms.
|
||||
// To manage refresh metrics inside the SD mechanism, we'd need to use const labels which are
|
||||
// specific to that SD. However, doing so would also expose too many unused metrics on
|
||||
// the Prometheus /metrics endpoint.
|
||||
type RefreshMetricsManager interface {
|
||||
DiscovererMetrics
|
||||
RefreshMetricsInstantiator
|
||||
}
|
||||
|
||||
// A Config provides the configuration and constructor for a Discoverer.
|
||||
type Config interface {
|
||||
// Name returns the name of the discovery mechanism.
|
||||
|
@ -65,6 +88,9 @@ type Config interface {
|
|||
// NewDiscoverer returns a Discoverer for the Config
|
||||
// with the given DiscovererOptions.
|
||||
NewDiscoverer(DiscovererOptions) (Discoverer, error)
|
||||
|
||||
// NewDiscovererMetrics returns the metrics used by the service discovery.
|
||||
NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics
|
||||
}
|
||||
|
||||
// Configs is a slice of Config values that uses custom YAML marshaling and unmarshaling
|
||||
|
@ -119,6 +145,11 @@ func (c StaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) {
|
|||
return staticDiscoverer(c), nil
|
||||
}
|
||||
|
||||
// No metrics are needed for this service discovery mechanism.
|
||||
func (c StaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics {
|
||||
return &NoopDiscovererMetrics{}
|
||||
}
|
||||
|
||||
type staticDiscoverer []*targetgroup.Group
|
||||
|
||||
func (c staticDiscoverer) Run(ctx context.Context, up chan<- []*targetgroup.Group) {
|
||||
|
|
|
@ -67,12 +67,17 @@ type SDConfig struct {
|
|||
Port int `yaml:"port"` // Ignored for SRV records
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return newDiscovererMetrics(reg, rmi)
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "dns" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(*c, opts.Logger, opts.Registerer)
|
||||
return NewDiscovery(*c, opts.Logger, opts.Metrics)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
|
@ -102,18 +107,22 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
// the Discoverer interface.
|
||||
type Discovery struct {
|
||||
*refresh.Discovery
|
||||
names []string
|
||||
port int
|
||||
qtype uint16
|
||||
logger log.Logger
|
||||
dnsSDLookupsCount prometheus.Counter
|
||||
dnsSDLookupFailuresCount prometheus.Counter
|
||||
names []string
|
||||
port int
|
||||
qtype uint16
|
||||
logger log.Logger
|
||||
metrics *dnsMetrics
|
||||
|
||||
lookupFn func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error)
|
||||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||
func NewDiscovery(conf SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||
func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*dnsMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
@ -137,28 +146,16 @@ func NewDiscovery(conf SDConfig, logger log.Logger, reg prometheus.Registerer) (
|
|||
port: conf.Port,
|
||||
logger: logger,
|
||||
lookupFn: lookupWithSearchPath,
|
||||
dnsSDLookupsCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "sd_dns_lookups_total",
|
||||
Help: "The number of DNS-SD lookups.",
|
||||
}),
|
||||
dnsSDLookupFailuresCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "sd_dns_lookup_failures_total",
|
||||
Help: "The number of DNS-SD lookup failures.",
|
||||
}),
|
||||
metrics: m,
|
||||
}
|
||||
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: logger,
|
||||
Mech: "dns",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
Registry: prometheus.NewRegistry(),
|
||||
Metrics: []prometheus.Collector{d.dnsSDLookupsCount, d.dnsSDLookupFailuresCount},
|
||||
Logger: logger,
|
||||
Mech: "dns",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -195,9 +192,9 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
|
||||
func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targetgroup.Group) error {
|
||||
response, err := d.lookupFn(name, d.qtype, d.logger)
|
||||
d.dnsSDLookupsCount.Inc()
|
||||
d.metrics.dnsSDLookupsCount.Inc()
|
||||
if err != nil {
|
||||
d.dnsSDLookupFailuresCount.Inc()
|
||||
d.metrics.dnsSDLookupFailuresCount.Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"go.uber.org/goleak"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
|
@ -253,13 +254,21 @@ func TestDNS(t *testing.T) {
|
|||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
sd, err := NewDiscovery(tc.config, nil, prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := tc.config.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
|
||||
sd, err := NewDiscovery(tc.config, nil, metrics)
|
||||
require.NoError(t, err)
|
||||
sd.lookupFn = tc.lookup
|
||||
|
||||
tgs, err := sd.refresh(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expected, tgs)
|
||||
|
||||
metrics.Unregister()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
66
discovery/dns/metrics.go
Normal file
66
discovery/dns/metrics.go
Normal file
|
@ -0,0 +1,66 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*dnsMetrics)(nil)
|
||||
|
||||
type dnsMetrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
|
||||
dnsSDLookupsCount prometheus.Counter
|
||||
dnsSDLookupFailuresCount prometheus.Counter
|
||||
|
||||
metricRegisterer discovery.MetricRegisterer
|
||||
}
|
||||
|
||||
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
m := &dnsMetrics{
|
||||
refreshMetrics: rmi,
|
||||
dnsSDLookupsCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "sd_dns_lookups_total",
|
||||
Help: "The number of DNS-SD lookups.",
|
||||
}),
|
||||
dnsSDLookupFailuresCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "sd_dns_lookup_failures_total",
|
||||
Help: "The number of DNS-SD lookup failures.",
|
||||
}),
|
||||
}
|
||||
|
||||
m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
|
||||
m.dnsSDLookupsCount,
|
||||
m.dnsSDLookupFailuresCount,
|
||||
})
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *dnsMetrics) Register() error {
|
||||
return m.metricRegisterer.RegisterMetrics()
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *dnsMetrics) Unregister() {
|
||||
m.metricRegisterer.UnregisterMetrics()
|
||||
}
|
|
@ -16,6 +16,7 @@ package eureka
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -76,12 +77,19 @@ type SDConfig struct {
|
|||
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return &eurekaMetrics{
|
||||
refreshMetrics: rmi,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "eureka" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||
return NewDiscovery(c, opts.Logger, opts.Metrics)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -118,7 +126,12 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery creates a new Eureka discovery for the given role.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*eurekaMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -130,11 +143,11 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer)
|
|||
}
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: logger,
|
||||
Mech: "eureka",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
Registry: reg,
|
||||
Logger: logger,
|
||||
Mech: "eureka",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
)
|
||||
return d, nil
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
|
@ -36,7 +37,17 @@ func testUpdateServices(respHandler http.HandlerFunc) ([]*targetgroup.Group, err
|
|||
Server: ts.URL,
|
||||
}
|
||||
|
||||
md, err := NewDiscovery(&conf, nil, prometheus.NewRegistry())
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := conf.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
err := metrics.Register()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
md, err := NewDiscovery(&conf, nil, metrics)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
32
discovery/eureka/metrics.go
Normal file
32
discovery/eureka/metrics.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package eureka
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*eurekaMetrics)(nil)
|
||||
|
||||
type eurekaMetrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *eurekaMetrics) Register() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *eurekaMetrics) Unregister() {}
|
|
@ -57,12 +57,17 @@ type SDConfig struct {
|
|||
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return newDiscovererMetrics(reg, rmi)
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "file" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||
return NewDiscovery(c, opts.Logger, opts.Metrics)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -94,6 +99,9 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
const fileSDFilepathLabel = model.MetaLabelPrefix + "filepath"
|
||||
|
||||
// TimestampCollector is a Custom Collector for Timestamps of the files.
|
||||
// TODO(ptodev): Now that each file SD has its own TimestampCollector
|
||||
// inside discovery/file/metrics.go, we can refactor this collector
|
||||
// (or get rid of it) as each TimestampCollector instance will only use one discoverer.
|
||||
type TimestampCollector struct {
|
||||
Description *prometheus.Desc
|
||||
discoverers map[*Discovery]struct{}
|
||||
|
@ -169,16 +177,16 @@ type Discovery struct {
|
|||
lastRefresh map[string]int
|
||||
logger log.Logger
|
||||
|
||||
fileSDReadErrorsCount prometheus.Counter
|
||||
fileSDScanDuration prometheus.Summary
|
||||
fileWatcherErrorsCount prometheus.Counter
|
||||
fileSDTimeStamp *TimestampCollector
|
||||
|
||||
metricRegisterer discovery.MetricRegisterer
|
||||
metrics *fileMetrics
|
||||
}
|
||||
|
||||
// NewDiscovery returns a new file discovery for the given paths.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
fm, ok := metrics.(*fileMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
@ -188,33 +196,10 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer)
|
|||
interval: time.Duration(conf.RefreshInterval),
|
||||
timestamps: make(map[string]float64),
|
||||
logger: logger,
|
||||
fileSDReadErrorsCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_file_read_errors_total",
|
||||
Help: "The number of File-SD read errors.",
|
||||
}),
|
||||
fileSDScanDuration: prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "prometheus_sd_file_scan_duration_seconds",
|
||||
Help: "The duration of the File-SD scan in seconds.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
}),
|
||||
fileWatcherErrorsCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_file_watcher_errors_total",
|
||||
Help: "The number of File-SD errors caused by filesystem watch failures.",
|
||||
}),
|
||||
fileSDTimeStamp: NewTimestampCollector(),
|
||||
metrics: fm,
|
||||
}
|
||||
|
||||
disc.fileSDTimeStamp.addDiscoverer(disc)
|
||||
|
||||
disc.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
|
||||
disc.fileSDReadErrorsCount,
|
||||
disc.fileSDScanDuration,
|
||||
disc.fileWatcherErrorsCount,
|
||||
disc.fileSDTimeStamp,
|
||||
})
|
||||
fm.init(disc)
|
||||
|
||||
return disc, nil
|
||||
}
|
||||
|
@ -253,17 +238,10 @@ func (d *Discovery) watchFiles() {
|
|||
|
||||
// Run implements the Discoverer interface.
|
||||
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
err := d.metricRegisterer.RegisterMetrics()
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error())
|
||||
return
|
||||
}
|
||||
defer d.metricRegisterer.UnregisterMetrics()
|
||||
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err)
|
||||
d.fileWatcherErrorsCount.Inc()
|
||||
d.metrics.fileWatcherErrorsCount.Inc()
|
||||
return
|
||||
}
|
||||
d.watcher = watcher
|
||||
|
@ -327,7 +305,7 @@ func (d *Discovery) stop() {
|
|||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
d.fileSDTimeStamp.removeDiscoverer(d)
|
||||
d.metrics.fileSDTimeStamp.removeDiscoverer(d)
|
||||
|
||||
// Closing the watcher will deadlock unless all events and errors are drained.
|
||||
go func() {
|
||||
|
@ -353,13 +331,13 @@ func (d *Discovery) stop() {
|
|||
func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
d.fileSDScanDuration.Observe(time.Since(t0).Seconds())
|
||||
d.metrics.fileSDScanDuration.Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
ref := map[string]int{}
|
||||
for _, p := range d.listFiles() {
|
||||
tgroups, err := d.readFile(p)
|
||||
if err != nil {
|
||||
d.fileSDReadErrorsCount.Inc()
|
||||
d.metrics.fileSDReadErrorsCount.Inc()
|
||||
|
||||
level.Error(d.logger).Log("msg", "Error reading file", "path", p, "err", err)
|
||||
// Prevent deletion down below.
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
|
@ -144,19 +145,28 @@ func (t *testRunner) run(files ...string) {
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.cancelSD = cancel
|
||||
go func() {
|
||||
conf := &SDConfig{
|
||||
Files: files,
|
||||
// Setting a high refresh interval to make sure that the tests only
|
||||
// rely on file watches.
|
||||
RefreshInterval: model.Duration(1 * time.Hour),
|
||||
}
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := conf.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
|
||||
d, err := NewDiscovery(
|
||||
&SDConfig{
|
||||
Files: files,
|
||||
// Setting a high refresh interval to make sure that the tests only
|
||||
// rely on file watches.
|
||||
RefreshInterval: model.Duration(1 * time.Hour),
|
||||
},
|
||||
conf,
|
||||
nil,
|
||||
prometheus.NewRegistry(),
|
||||
metrics,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
d.Run(ctx, t.ch)
|
||||
|
||||
metrics.Unregister()
|
||||
}()
|
||||
}
|
||||
|
||||
|
@ -193,9 +203,10 @@ func (t *testRunner) targets() []*targetgroup.Group {
|
|||
func (t *testRunner) requireUpdate(ref time.Time, expected []*targetgroup.Group) {
|
||||
t.Helper()
|
||||
|
||||
timeout := time.After(defaultWait)
|
||||
for {
|
||||
select {
|
||||
case <-time.After(defaultWait):
|
||||
case <-timeout:
|
||||
t.Fatalf("Expected update but got none")
|
||||
return
|
||||
case <-time.After(defaultWait / 10):
|
||||
|
|
76
discovery/file/metrics.go
Normal file
76
discovery/file/metrics.go
Normal file
|
@ -0,0 +1,76 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package file
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*fileMetrics)(nil)
|
||||
|
||||
type fileMetrics struct {
|
||||
fileSDReadErrorsCount prometheus.Counter
|
||||
fileSDScanDuration prometheus.Summary
|
||||
fileWatcherErrorsCount prometheus.Counter
|
||||
fileSDTimeStamp *TimestampCollector
|
||||
|
||||
metricRegisterer discovery.MetricRegisterer
|
||||
}
|
||||
|
||||
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
fm := &fileMetrics{
|
||||
fileSDReadErrorsCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_file_read_errors_total",
|
||||
Help: "The number of File-SD read errors.",
|
||||
}),
|
||||
fileSDScanDuration: prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "prometheus_sd_file_scan_duration_seconds",
|
||||
Help: "The duration of the File-SD scan in seconds.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
}),
|
||||
fileWatcherErrorsCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_file_watcher_errors_total",
|
||||
Help: "The number of File-SD errors caused by filesystem watch failures.",
|
||||
}),
|
||||
fileSDTimeStamp: NewTimestampCollector(),
|
||||
}
|
||||
|
||||
fm.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
|
||||
fm.fileSDReadErrorsCount,
|
||||
fm.fileSDScanDuration,
|
||||
fm.fileWatcherErrorsCount,
|
||||
fm.fileSDTimeStamp,
|
||||
})
|
||||
|
||||
return fm
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (fm *fileMetrics) Register() error {
|
||||
return fm.metricRegisterer.RegisterMetrics()
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (fm *fileMetrics) Unregister() {
|
||||
fm.metricRegisterer.UnregisterMetrics()
|
||||
}
|
||||
|
||||
func (fm *fileMetrics) init(disc *Discovery) {
|
||||
fm.fileSDTimeStamp.addDiscoverer(disc)
|
||||
}
|
|
@ -82,12 +82,19 @@ type SDConfig struct {
|
|||
TagSeparator string `yaml:"tag_separator,omitempty"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return &gceMetrics{
|
||||
refreshMetrics: rmi,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "gce" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(*c, opts.Logger, opts.Registerer)
|
||||
return NewDiscovery(*c, opts.Logger, opts.Metrics)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
|
@ -122,7 +129,12 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||
func NewDiscovery(conf SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||
func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*gceMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
project: conf.Project,
|
||||
zone: conf.Zone,
|
||||
|
@ -143,11 +155,11 @@ func NewDiscovery(conf SDConfig, logger log.Logger, reg prometheus.Registerer) (
|
|||
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: logger,
|
||||
Mech: "gce",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
Registry: reg,
|
||||
Logger: logger,
|
||||
Mech: "gce",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
)
|
||||
return d, nil
|
||||
|
|
32
discovery/gce/metrics.go
Normal file
32
discovery/gce/metrics.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*gceMetrics)(nil)
|
||||
|
||||
type gceMetrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *gceMetrics) Register() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *gceMetrics) Unregister() {}
|
|
@ -63,12 +63,19 @@ type SDConfig struct {
|
|||
robotEndpoint string // For tests only.
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return &hetznerMetrics{
|
||||
refreshMetrics: rmi,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "hetzner" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||
return NewDiscovery(c, opts.Logger, opts.Metrics)
|
||||
}
|
||||
|
||||
type refresher interface {
|
||||
|
@ -128,7 +135,12 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*refresh.Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
m, ok := metrics.(*hetznerMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
r, err := newRefresher(conf, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -136,11 +148,11 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer)
|
|||
|
||||
return refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: logger,
|
||||
Mech: "hetzner",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: r.refresh,
|
||||
Registry: reg,
|
||||
Logger: logger,
|
||||
Mech: "hetzner",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: r.refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
), nil
|
||||
}
|
||||
|
|
32
discovery/hetzner/metrics.go
Normal file
32
discovery/hetzner/metrics.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package hetzner
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*hetznerMetrics)(nil)
|
||||
|
||||
type hetznerMetrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *hetznerMetrics) Register() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *hetznerMetrics) Unregister() {}
|
|
@ -58,12 +58,17 @@ type SDConfig struct {
|
|||
URL string `yaml:"url"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return newDiscovererMetrics(reg, rmi)
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "http" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger, opts.HTTPClientOptions, opts.Registerer)
|
||||
return NewDiscovery(c, opts.Logger, opts.HTTPClientOptions, opts.Metrics)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -105,11 +110,16 @@ type Discovery struct {
|
|||
client *http.Client
|
||||
refreshInterval time.Duration
|
||||
tgLastLength int
|
||||
failuresCount prometheus.Counter
|
||||
metrics *httpMetrics
|
||||
}
|
||||
|
||||
// NewDiscovery returns a new HTTP discovery for the given config.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPClientOption, reg prometheus.Registerer) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*httpMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
@ -124,21 +134,16 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPCli
|
|||
url: conf.URL,
|
||||
client: client,
|
||||
refreshInterval: time.Duration(conf.RefreshInterval), // Stored to be sent as headers.
|
||||
failuresCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_http_failures_total",
|
||||
Help: "Number of HTTP service discovery refresh failures.",
|
||||
}),
|
||||
metrics: m,
|
||||
}
|
||||
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: logger,
|
||||
Mech: "http",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.Refresh,
|
||||
Registry: reg,
|
||||
Metrics: []prometheus.Collector{d.failuresCount},
|
||||
Logger: logger,
|
||||
Mech: "http",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.Refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
)
|
||||
return d, nil
|
||||
|
@ -155,7 +160,7 @@ func (d *Discovery) Refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
|
||||
resp, err := d.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
d.failuresCount.Inc()
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
|
@ -164,31 +169,31 @@ func (d *Discovery) Refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
}()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
d.failuresCount.Inc()
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, fmt.Errorf("server returned HTTP status %s", resp.Status)
|
||||
}
|
||||
|
||||
if !matchContentType.MatchString(strings.TrimSpace(resp.Header.Get("Content-Type"))) {
|
||||
d.failuresCount.Inc()
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, fmt.Errorf("unsupported content type %q", resp.Header.Get("Content-Type"))
|
||||
}
|
||||
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
d.failuresCount.Inc()
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var targetGroups []*targetgroup.Group
|
||||
|
||||
if err := json.Unmarshal(b, &targetGroups); err != nil {
|
||||
d.failuresCount.Inc()
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, tg := range targetGroups {
|
||||
if tg == nil {
|
||||
d.failuresCount.Inc()
|
||||
d.metrics.failuresCount.Inc()
|
||||
err = errors.New("nil target group item found")
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
|
@ -41,7 +42,14 @@ func TestHTTPValidRefresh(t *testing.T) {
|
|||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
}
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, prometheus.NewRegistry())
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
defer refreshMetrics.Unregister()
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -63,7 +71,7 @@ func TestHTTPValidRefresh(t *testing.T) {
|
|||
},
|
||||
}
|
||||
require.Equal(t, expectedTargets, tgs)
|
||||
require.Equal(t, 0.0, getFailureCount(d.failuresCount))
|
||||
require.Equal(t, 0.0, getFailureCount(d.metrics.failuresCount))
|
||||
}
|
||||
|
||||
func TestHTTPInvalidCode(t *testing.T) {
|
||||
|
@ -79,13 +87,20 @@ func TestHTTPInvalidCode(t *testing.T) {
|
|||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
}
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, prometheus.NewRegistry())
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
defer refreshMetrics.Unregister()
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = d.Refresh(ctx)
|
||||
require.EqualError(t, err, "server returned HTTP status 400 Bad Request")
|
||||
require.Equal(t, 1.0, getFailureCount(d.failuresCount))
|
||||
require.Equal(t, 1.0, getFailureCount(d.metrics.failuresCount))
|
||||
}
|
||||
|
||||
func TestHTTPInvalidFormat(t *testing.T) {
|
||||
|
@ -101,13 +116,20 @@ func TestHTTPInvalidFormat(t *testing.T) {
|
|||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
}
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, prometheus.NewRegistry())
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
defer refreshMetrics.Unregister()
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = d.Refresh(ctx)
|
||||
require.EqualError(t, err, `unsupported content type "text/plain; charset=utf-8"`)
|
||||
require.Equal(t, 1.0, getFailureCount(d.failuresCount))
|
||||
require.Equal(t, 1.0, getFailureCount(d.metrics.failuresCount))
|
||||
}
|
||||
|
||||
func getFailureCount(failuresCount prometheus.Counter) float64 {
|
||||
|
@ -412,7 +434,15 @@ func TestSourceDisappeared(t *testing.T) {
|
|||
URL: ts.URL,
|
||||
RefreshInterval: model.Duration(1 * time.Second),
|
||||
}
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
defer refreshMetrics.Unregister()
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
|
||||
require.NoError(t, err)
|
||||
for _, test := range cases {
|
||||
ctx := context.Background()
|
||||
|
|
57
discovery/http/metrics.go
Normal file
57
discovery/http/metrics.go
Normal file
|
@ -0,0 +1,57 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*httpMetrics)(nil)
|
||||
|
||||
type httpMetrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
|
||||
failuresCount prometheus.Counter
|
||||
|
||||
metricRegisterer discovery.MetricRegisterer
|
||||
}
|
||||
|
||||
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
m := &httpMetrics{
|
||||
refreshMetrics: rmi,
|
||||
failuresCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_http_failures_total",
|
||||
Help: "Number of HTTP service discovery refresh failures.",
|
||||
}),
|
||||
}
|
||||
|
||||
m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
|
||||
m.failuresCount,
|
||||
})
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *httpMetrics) Register() error {
|
||||
return m.metricRegisterer.RegisterMetrics()
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *httpMetrics) Unregister() {
|
||||
m.metricRegisterer.UnregisterMetrics()
|
||||
}
|
|
@ -15,16 +15,16 @@ package ionos
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -43,7 +43,12 @@ func init() {
|
|||
type Discovery struct{}
|
||||
|
||||
// NewDiscovery returns a new refresh.Discovery for IONOS Cloud.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*refresh.Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
m, ok := metrics.(*ionosMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if conf.ionosEndpoint == "" {
|
||||
conf.ionosEndpoint = "https://api.ionos.com"
|
||||
}
|
||||
|
@ -55,11 +60,11 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer)
|
|||
|
||||
return refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: logger,
|
||||
Mech: "ionos",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
Registry: reg,
|
||||
Logger: logger,
|
||||
Mech: "ionos",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
), nil
|
||||
}
|
||||
|
@ -84,6 +89,13 @@ type SDConfig struct {
|
|||
ionosEndpoint string // For tests only.
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return &ionosMetrics{
|
||||
refreshMetrics: rmi,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the IONOS Cloud service discovery.
|
||||
func (c SDConfig) Name() string {
|
||||
return "ionos"
|
||||
|
@ -91,7 +103,7 @@ func (c SDConfig) Name() string {
|
|||
|
||||
// NewDiscoverer returns a new discovery.Discoverer for IONOS Cloud.
|
||||
func (c SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(&c, options.Logger, options.Registerer)
|
||||
return NewDiscovery(&c, options.Logger, options.Metrics)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
|
|
32
discovery/ionos/metrics.go
Normal file
32
discovery/ionos/metrics.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ionos
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*ionosMetrics)(nil)
|
||||
|
||||
type ionosMetrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *ionosMetrics) Register() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *ionosMetrics) Unregister() {}
|
|
@ -62,6 +62,8 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
|||
svcUpdateCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleUpdate)
|
||||
svcDeleteCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleDelete)
|
||||
|
||||
podUpdateCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleUpdate)
|
||||
|
||||
e := &Endpoints{
|
||||
logger: l,
|
||||
endpointsInf: eps,
|
||||
|
@ -131,6 +133,29 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
|||
if err != nil {
|
||||
level.Error(l).Log("msg", "Error adding services event handler.", "err", err)
|
||||
}
|
||||
_, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
podUpdateCount.Inc()
|
||||
oldPod, ok := old.(*apiv1.Pod)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
curPod, ok := cur.(*apiv1.Pod)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// the Pod's phase may change without triggering an update on the Endpoints/Service.
|
||||
// https://github.com/prometheus/prometheus/issues/11305.
|
||||
if curPod.Status.Phase != oldPod.Status.Phase {
|
||||
e.enqueuePod(namespacedName(curPod.Namespace, curPod.Name))
|
||||
}
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(l).Log("msg", "Error adding pods event handler.", "err", err)
|
||||
}
|
||||
if e.withNodeMetadata {
|
||||
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(o interface{}) {
|
||||
|
@ -166,6 +191,18 @@ func (e *Endpoints) enqueueNode(nodeName string) {
|
|||
}
|
||||
}
|
||||
|
||||
func (e *Endpoints) enqueuePod(podNamespacedName string) {
|
||||
endpoints, err := e.endpointsInf.GetIndexer().ByIndex(podIndex, podNamespacedName)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "Error getting endpoints for pod", "pod", podNamespacedName, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
e.enqueue(endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Endpoints) enqueue(obj interface{}) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
|
@ -312,7 +349,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
|||
tg.Targets = append(tg.Targets, target)
|
||||
return
|
||||
}
|
||||
s := pod.Namespace + "/" + pod.Name
|
||||
s := namespacedName(pod.Namespace, pod.Name)
|
||||
|
||||
sp, ok := seenPods[s]
|
||||
if !ok {
|
||||
|
@ -369,6 +406,11 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
|||
// For all seen pods, check all container ports. If they were not covered
|
||||
// by one of the service endpoints, generate targets for them.
|
||||
for _, pe := range seenPods {
|
||||
// PodIP can be empty when a pod is starting or has been evicted.
|
||||
if len(pe.pod.Status.PodIP) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, c := range pe.pod.Spec.Containers {
|
||||
for _, cport := range c.Ports {
|
||||
hasSeenPort := func() bool {
|
||||
|
@ -383,21 +425,18 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
|||
continue
|
||||
}
|
||||
|
||||
// PodIP can be empty when a pod is starting or has been evicted.
|
||||
if len(pe.pod.Status.PodIP) != 0 {
|
||||
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
||||
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
||||
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
||||
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
||||
|
||||
target := model.LabelSet{
|
||||
model.AddressLabel: lv(a),
|
||||
podContainerNameLabel: lv(c.Name),
|
||||
podContainerImageLabel: lv(c.Image),
|
||||
podContainerPortNameLabel: lv(cport.Name),
|
||||
podContainerPortNumberLabel: lv(ports),
|
||||
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
||||
}
|
||||
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
||||
target := model.LabelSet{
|
||||
model.AddressLabel: lv(a),
|
||||
podContainerNameLabel: lv(c.Name),
|
||||
podContainerImageLabel: lv(c.Image),
|
||||
podContainerPortNameLabel: lv(cport.Name),
|
||||
podContainerPortNumberLabel: lv(ports),
|
||||
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
||||
}
|
||||
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -969,3 +969,123 @@ func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) {
|
|||
expectedRes: map[string]*targetgroup.Group{},
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
// TestEndpointsUpdatePod makes sure that Endpoints discovery detects underlying Pods changes.
|
||||
// See https://github.com/prometheus/prometheus/issues/11305 for more details.
|
||||
func TestEndpointsDiscoveryUpdatePod(t *testing.T) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testpod",
|
||||
Namespace: "default",
|
||||
UID: types.UID("deadbeef"),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "testnode",
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "c1:latest",
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "mainport",
|
||||
ContainerPort: 9000,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
// Pod is in Pending phase when discovered for first time.
|
||||
Phase: "Pending",
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
},
|
||||
HostIP: "2.3.4.5",
|
||||
PodIP: "4.3.2.1",
|
||||
},
|
||||
}
|
||||
objs := []runtime.Object{
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testendpoints",
|
||||
Namespace: "default",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{
|
||||
IP: "4.3.2.1",
|
||||
// The Pending Pod may be included because the Endpoints was created manually.
|
||||
// Or because the corresponding service has ".spec.publishNotReadyAddresses: true".
|
||||
TargetRef: &v1.ObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: "testpod",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
Ports: []v1.EndpointPort{
|
||||
{
|
||||
Name: "mainport",
|
||||
Port: 9000,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pod,
|
||||
}
|
||||
n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, objs...)
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
// the Pod becomes Ready.
|
||||
pod.Status.Phase = "Running"
|
||||
pod.Status.Conditions = []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
}
|
||||
c.CoreV1().Pods(pod.Namespace).Update(context.Background(), pod, metav1.UpdateOptions{})
|
||||
},
|
||||
expectedMaxItems: 2,
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
"endpoints/default/testendpoints": {
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": "4.3.2.1:9000",
|
||||
"__meta_kubernetes_endpoint_port_name": "mainport",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "true",
|
||||
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
|
||||
"__meta_kubernetes_endpoint_address_target_name": "testpod",
|
||||
"__meta_kubernetes_pod_name": "testpod",
|
||||
"__meta_kubernetes_pod_ip": "4.3.2.1",
|
||||
"__meta_kubernetes_pod_ready": "true",
|
||||
"__meta_kubernetes_pod_phase": "Running",
|
||||
"__meta_kubernetes_pod_node_name": "testnode",
|
||||
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
|
||||
"__meta_kubernetes_pod_container_name": "c1",
|
||||
"__meta_kubernetes_pod_container_image": "c1:latest",
|
||||
"__meta_kubernetes_pod_container_port_name": "mainport",
|
||||
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_endpoints_name": "testendpoints",
|
||||
},
|
||||
Source: "endpoints/default/testendpoints",
|
||||
},
|
||||
},
|
||||
}.Run(t)
|
||||
}
|
||||
|
|
|
@ -358,7 +358,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
|||
tg.Targets = append(tg.Targets, target)
|
||||
return
|
||||
}
|
||||
s := pod.Namespace + "/" + pod.Name
|
||||
s := namespacedName(pod.Namespace, pod.Name)
|
||||
|
||||
sp, ok := seenPods[s]
|
||||
if !ok {
|
||||
|
@ -405,6 +405,11 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
|||
// For all seen pods, check all container ports. If they were not covered
|
||||
// by one of the service endpoints, generate targets for them.
|
||||
for _, pe := range seenPods {
|
||||
// PodIP can be empty when a pod is starting or has been evicted.
|
||||
if len(pe.pod.Status.PodIP) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, c := range pe.pod.Spec.Containers {
|
||||
for _, cport := range c.Ports {
|
||||
hasSeenPort := func() bool {
|
||||
|
@ -422,21 +427,18 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
|||
continue
|
||||
}
|
||||
|
||||
// PodIP can be empty when a pod is starting or has been evicted.
|
||||
if len(pe.pod.Status.PodIP) != 0 {
|
||||
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
||||
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
||||
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
||||
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
||||
|
||||
target := model.LabelSet{
|
||||
model.AddressLabel: lv(a),
|
||||
podContainerNameLabel: lv(c.Name),
|
||||
podContainerImageLabel: lv(c.Image),
|
||||
podContainerPortNameLabel: lv(cport.Name),
|
||||
podContainerPortNumberLabel: lv(ports),
|
||||
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
||||
}
|
||||
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
||||
target := model.LabelSet{
|
||||
model.AddressLabel: lv(a),
|
||||
podContainerNameLabel: lv(c.Name),
|
||||
podContainerImageLabel: lv(c.Image),
|
||||
podContainerPortNameLabel: lv(cport.Name),
|
||||
podContainerPortNumberLabel: lv(ports),
|
||||
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
||||
}
|
||||
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -123,12 +123,17 @@ type SDConfig struct {
|
|||
AttachMetadata AttachMetadataConfig `yaml:"attach_metadata,omitempty"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return newDiscovererMetrics(reg, rmi)
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "kubernetes" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return New(opts.Logger, opts.Registerer, c)
|
||||
return New(opts.Logger, opts.Metrics, c)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -265,8 +270,7 @@ type Discovery struct {
|
|||
selectors roleSelector
|
||||
ownNamespace string
|
||||
attachMetadata AttachMetadataConfig
|
||||
eventCount *prometheus.CounterVec
|
||||
metricRegisterer discovery.MetricRegisterer
|
||||
metrics *kubernetesMetrics
|
||||
}
|
||||
|
||||
func (d *Discovery) getNamespaces() []string {
|
||||
|
@ -285,7 +289,12 @@ func (d *Discovery) getNamespaces() []string {
|
|||
}
|
||||
|
||||
// New creates a new Kubernetes discovery for the given role.
|
||||
func New(l log.Logger, reg prometheus.Registerer, conf *SDConfig) (*Discovery, error) {
|
||||
func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) {
|
||||
m, ok := metrics.(*kubernetesMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if l == nil {
|
||||
l = log.NewNopLogger()
|
||||
}
|
||||
|
@ -348,34 +357,7 @@ func New(l log.Logger, reg prometheus.Registerer, conf *SDConfig) (*Discovery, e
|
|||
selectors: mapSelector(conf.Selectors),
|
||||
ownNamespace: ownNamespace,
|
||||
attachMetadata: conf.AttachMetadata,
|
||||
eventCount: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: discovery.KubernetesMetricsNamespace,
|
||||
Name: "events_total",
|
||||
Help: "The number of Kubernetes events handled.",
|
||||
},
|
||||
[]string{"role", "event"},
|
||||
),
|
||||
}
|
||||
|
||||
d.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{d.eventCount})
|
||||
|
||||
// Initialize metric vectors.
|
||||
for _, role := range []string{
|
||||
RoleEndpointSlice.String(),
|
||||
RoleEndpoint.String(),
|
||||
RoleNode.String(),
|
||||
RolePod.String(),
|
||||
RoleService.String(),
|
||||
RoleIngress.String(),
|
||||
} {
|
||||
for _, evt := range []string{
|
||||
MetricLabelRoleAdd,
|
||||
MetricLabelRoleDelete,
|
||||
MetricLabelRoleUpdate,
|
||||
} {
|
||||
d.eventCount.WithLabelValues(role, evt)
|
||||
}
|
||||
metrics: m,
|
||||
}
|
||||
|
||||
return d, nil
|
||||
|
@ -415,13 +397,6 @@ const resyncDisabled = 0
|
|||
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
d.Lock()
|
||||
|
||||
err := d.metricRegisterer.RegisterMetrics()
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error())
|
||||
return
|
||||
}
|
||||
defer d.metricRegisterer.UnregisterMetrics()
|
||||
|
||||
namespaces := d.getNamespaces()
|
||||
|
||||
switch d.role {
|
||||
|
@ -513,7 +488,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
||||
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
|
||||
nodeInf,
|
||||
d.eventCount,
|
||||
d.metrics.eventCount,
|
||||
)
|
||||
d.discoverers = append(d.discoverers, eps)
|
||||
go eps.endpointSliceInf.Run(ctx.Done())
|
||||
|
@ -573,7 +548,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
||||
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
|
||||
nodeInf,
|
||||
d.eventCount,
|
||||
d.metrics.eventCount,
|
||||
)
|
||||
d.discoverers = append(d.discoverers, eps)
|
||||
go eps.endpointsInf.Run(ctx.Done())
|
||||
|
@ -605,7 +580,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
log.With(d.logger, "role", "pod"),
|
||||
d.newPodsByNodeInformer(plw),
|
||||
nodeInformer,
|
||||
d.eventCount,
|
||||
d.metrics.eventCount,
|
||||
)
|
||||
d.discoverers = append(d.discoverers, pod)
|
||||
go pod.podInf.Run(ctx.Done())
|
||||
|
@ -628,7 +603,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
svc := NewService(
|
||||
log.With(d.logger, "role", "service"),
|
||||
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
||||
d.eventCount,
|
||||
d.metrics.eventCount,
|
||||
)
|
||||
d.discoverers = append(d.discoverers, svc)
|
||||
go svc.informer.Run(ctx.Done())
|
||||
|
@ -686,14 +661,14 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
ingress := NewIngress(
|
||||
log.With(d.logger, "role", "ingress"),
|
||||
informer,
|
||||
d.eventCount,
|
||||
d.metrics.eventCount,
|
||||
)
|
||||
d.discoverers = append(d.discoverers, ingress)
|
||||
go ingress.informer.Run(ctx.Done())
|
||||
}
|
||||
case RoleNode:
|
||||
nodeInformer := d.newNodeInformer(ctx)
|
||||
node := NewNode(log.With(d.logger, "role", "node"), nodeInformer, d.eventCount)
|
||||
node := NewNode(log.With(d.logger, "role", "node"), nodeInformer, d.metrics.eventCount)
|
||||
d.discoverers = append(d.discoverers, node)
|
||||
go node.informer.Run(ctx.Done())
|
||||
default:
|
||||
|
@ -792,6 +767,21 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde
|
|||
|
||||
func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
|
||||
indexers := make(map[string]cache.IndexFunc)
|
||||
indexers[podIndex] = func(obj interface{}) ([]string, error) {
|
||||
e, ok := obj.(*apiv1.Endpoints)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("object is not endpoints")
|
||||
}
|
||||
var pods []string
|
||||
for _, target := range e.Subsets {
|
||||
for _, addr := range target.Addresses {
|
||||
if addr.TargetRef != nil && addr.TargetRef.Kind == "Pod" {
|
||||
pods = append(pods, namespacedName(addr.TargetRef.Namespace, addr.TargetRef.Name))
|
||||
}
|
||||
}
|
||||
}
|
||||
return pods, nil
|
||||
}
|
||||
if !d.attachMetadata.Node {
|
||||
return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers)
|
||||
}
|
||||
|
@ -897,3 +887,7 @@ func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta,
|
|||
labelSet[model.LabelName(metaLabelPrefix+string(role)+"_annotationpresent_"+ln)] = presentValue
|
||||
}
|
||||
}
|
||||
|
||||
func namespacedName(namespace, name string) string {
|
||||
return namespace + "/" + name
|
||||
}
|
||||
|
|
|
@ -51,24 +51,29 @@ func makeDiscoveryWithVersion(role Role, nsDiscovery NamespaceDiscovery, k8sVer
|
|||
fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery)
|
||||
fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: k8sVer}
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := newDiscovererMetrics(reg, refreshMetrics)
|
||||
err := metrics.Register()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// TODO(ptodev): Unregister the metrics at the end of the test.
|
||||
|
||||
kubeMetrics, ok := metrics.(*kubernetesMetrics)
|
||||
if !ok {
|
||||
panic("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
client: clientset,
|
||||
logger: log.NewNopLogger(),
|
||||
role: role,
|
||||
namespaceDiscovery: &nsDiscovery,
|
||||
ownNamespace: "own-ns",
|
||||
eventCount: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: discovery.KubernetesMetricsNamespace,
|
||||
Name: "events_total",
|
||||
Help: "The number of Kubernetes events handled.",
|
||||
},
|
||||
[]string{"role", "event"},
|
||||
),
|
||||
metrics: kubeMetrics,
|
||||
}
|
||||
|
||||
d.metricRegisterer = discovery.NewMetricRegisterer(prometheus.NewRegistry(), []prometheus.Collector{d.eventCount})
|
||||
|
||||
return d, clientset
|
||||
}
|
||||
|
||||
|
|
75
discovery/kubernetes/metrics.go
Normal file
75
discovery/kubernetes/metrics.go
Normal file
|
@ -0,0 +1,75 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*kubernetesMetrics)(nil)
|
||||
|
||||
type kubernetesMetrics struct {
|
||||
eventCount *prometheus.CounterVec
|
||||
|
||||
metricRegisterer discovery.MetricRegisterer
|
||||
}
|
||||
|
||||
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
m := &kubernetesMetrics{
|
||||
eventCount: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: discovery.KubernetesMetricsNamespace,
|
||||
Name: "events_total",
|
||||
Help: "The number of Kubernetes events handled.",
|
||||
},
|
||||
[]string{"role", "event"},
|
||||
),
|
||||
}
|
||||
|
||||
m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
|
||||
m.eventCount,
|
||||
})
|
||||
|
||||
// Initialize metric vectors.
|
||||
for _, role := range []string{
|
||||
RoleEndpointSlice.String(),
|
||||
RoleEndpoint.String(),
|
||||
RoleNode.String(),
|
||||
RolePod.String(),
|
||||
RoleService.String(),
|
||||
RoleIngress.String(),
|
||||
} {
|
||||
for _, evt := range []string{
|
||||
MetricLabelRoleAdd,
|
||||
MetricLabelRoleDelete,
|
||||
MetricLabelRoleUpdate,
|
||||
} {
|
||||
m.eventCount.WithLabelValues(role, evt)
|
||||
}
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *kubernetesMetrics) Register() error {
|
||||
return m.metricRegisterer.RegisterMetrics()
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *kubernetesMetrics) Unregister() {
|
||||
m.metricRegisterer.UnregisterMetrics()
|
||||
}
|
|
@ -33,7 +33,10 @@ import (
|
|||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
const nodeIndex = "node"
|
||||
const (
|
||||
nodeIndex = "node"
|
||||
podIndex = "pod"
|
||||
)
|
||||
|
||||
// Pod discovers new pod targets.
|
||||
type Pod struct {
|
||||
|
@ -326,7 +329,7 @@ func podSource(pod *apiv1.Pod) string {
|
|||
}
|
||||
|
||||
func podSourceFromNamespaceAndName(namespace, name string) string {
|
||||
return "pod/" + namespace + "/" + name
|
||||
return "pod/" + namespacedName(namespace, name)
|
||||
}
|
||||
|
||||
func podReady(pod *apiv1.Pod) model.LabelValue {
|
||||
|
|
|
@ -42,7 +42,7 @@ type provider struct {
|
|||
}
|
||||
|
||||
// NewManager is the Discovery Manager constructor.
|
||||
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, options ...func(*Manager)) *Manager {
|
||||
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]discovery.DiscovererMetrics, options ...func(*Manager)) *Manager {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
@ -55,6 +55,7 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re
|
|||
updatert: 5 * time.Second,
|
||||
triggerSend: make(chan struct{}, 1),
|
||||
registerer: registerer,
|
||||
sdMetrics: sdMetrics,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(mgr)
|
||||
|
@ -62,7 +63,7 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re
|
|||
|
||||
// Register the metrics.
|
||||
// We have to do this after setting all options, so that the name of the Manager is set.
|
||||
if metrics, err := discovery.NewMetrics(registerer, mgr.name); err == nil {
|
||||
if metrics, err := discovery.NewManagerMetrics(registerer, mgr.name); err == nil {
|
||||
mgr.metrics = metrics
|
||||
} else {
|
||||
level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
|
||||
|
@ -108,7 +109,8 @@ type Manager struct {
|
|||
// A registerer for all service discovery metrics.
|
||||
registerer prometheus.Registerer
|
||||
|
||||
metrics *discovery.Metrics
|
||||
metrics *discovery.Metrics
|
||||
sdMetrics map[string]discovery.DiscovererMetrics
|
||||
}
|
||||
|
||||
// Run starts the background processing.
|
||||
|
@ -283,8 +285,8 @@ func (m *Manager) registerProviders(cfgs discovery.Configs, setName string) int
|
|||
}
|
||||
typ := cfg.Name()
|
||||
d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{
|
||||
Logger: log.With(m.logger, "discovery", typ, "config", setName),
|
||||
Registerer: m.registerer,
|
||||
Logger: log.With(m.logger, "discovery", typ, "config", setName),
|
||||
Metrics: m.sdMetrics[typ],
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName)
|
||||
|
|
|
@ -36,6 +36,13 @@ func TestMain(m *testing.M) {
|
|||
testutil.TolerantVerifyLeak(m)
|
||||
}
|
||||
|
||||
func newTestMetrics(t *testing.T, reg prometheus.Registerer) (*discovery.RefreshMetricsManager, map[string]discovery.DiscovererMetrics) {
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
sdMetrics, err := discovery.RegisterSDMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, err)
|
||||
return &refreshMetrics, sdMetrics
|
||||
}
|
||||
|
||||
// TestTargetUpdatesOrder checks that the target updates are received in the expected order.
|
||||
func TestTargetUpdatesOrder(t *testing.T) {
|
||||
// The order by which the updates are send is determined by the interval passed to the mock discovery adapter
|
||||
|
@ -665,7 +672,10 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := newTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
|
||||
|
@ -748,7 +758,11 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou
|
|||
func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := newTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -777,7 +791,11 @@ func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {
|
|||
func TestDiscovererConfigs(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := newTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -802,7 +820,11 @@ func TestDiscovererConfigs(t *testing.T) {
|
|||
func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := newTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -842,7 +864,11 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
|||
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, nil, prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := newTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, nil, reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -874,7 +900,11 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {
|
|||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := newTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -897,10 +927,19 @@ func (e errorConfig) NewDiscoverer(discovery.DiscovererOptions) (discovery.Disco
|
|||
return nil, e.err
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (errorConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return &discovery.NoopDiscovererMetrics{}
|
||||
}
|
||||
|
||||
func TestGaugeFailedConfigs(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := newTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -1057,7 +1096,10 @@ func TestCoordinationWithReceiver(t *testing.T) {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
mgr := NewManager(ctx, nil, prometheus.NewRegistry())
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := newTestMetrics(t, reg)
|
||||
|
||||
mgr := NewManager(ctx, nil, reg, sdMetrics)
|
||||
require.NotNil(t, mgr)
|
||||
mgr.updatert = updateDelay
|
||||
go mgr.Run()
|
||||
|
|
|
@ -87,12 +87,17 @@ type SDConfig struct {
|
|||
TagSeparator string `yaml:"tag_separator,omitempty"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return newDiscovererMetrics(reg, rmi)
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "linode" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||
return NewDiscovery(c, opts.Logger, opts.Metrics)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -122,22 +127,23 @@ type Discovery struct {
|
|||
pollCount int
|
||||
lastResults []*targetgroup.Group
|
||||
eventPollingEnabled bool
|
||||
failuresCount prometheus.Counter
|
||||
metrics *linodeMetrics
|
||||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*linodeMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
port: conf.Port,
|
||||
tagSeparator: conf.TagSeparator,
|
||||
pollCount: 0,
|
||||
lastRefreshTimestamp: time.Now().UTC(),
|
||||
eventPollingEnabled: true,
|
||||
failuresCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_linode_failures_total",
|
||||
Help: "Number of Linode service discovery refresh failures.",
|
||||
}),
|
||||
metrics: m,
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "linode_sd")
|
||||
|
@ -156,12 +162,11 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer)
|
|||
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: logger,
|
||||
Mech: "linode",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
Registry: reg,
|
||||
Metrics: []prometheus.Collector{d.failuresCount},
|
||||
Logger: logger,
|
||||
Mech: "linode",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
)
|
||||
return d, nil
|
||||
|
@ -223,14 +228,14 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
|
|||
// Gather all linode instances.
|
||||
instances, err := d.client.ListInstances(ctx, &linodego.ListOptions{PageSize: 500})
|
||||
if err != nil {
|
||||
d.failuresCount.Inc()
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Gather detailed IP address info for all IPs on all linode instances.
|
||||
detailedIPs, err := d.client.ListIPAddresses(ctx, &linodego.ListOptions{PageSize: 500})
|
||||
if err != nil {
|
||||
d.failuresCount.Inc()
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,8 @@ import (
|
|||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
type LinodeSDTestSuite struct {
|
||||
|
@ -53,7 +55,15 @@ func TestLinodeSDRefresh(t *testing.T) {
|
|||
Credentials: tokenID,
|
||||
Type: "Bearer",
|
||||
}
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
endpoint, err := url.Parse(sdmock.Mock.Endpoint())
|
||||
require.NoError(t, err)
|
||||
|
|
57
discovery/linode/metrics.go
Normal file
57
discovery/linode/metrics.go
Normal file
|
@ -0,0 +1,57 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package linode
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*linodeMetrics)(nil)
|
||||
|
||||
type linodeMetrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
|
||||
failuresCount prometheus.Counter
|
||||
|
||||
metricRegisterer discovery.MetricRegisterer
|
||||
}
|
||||
|
||||
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
m := &linodeMetrics{
|
||||
refreshMetrics: rmi,
|
||||
failuresCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_linode_failures_total",
|
||||
Help: "Number of Linode service discovery refresh failures.",
|
||||
}),
|
||||
}
|
||||
|
||||
m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
|
||||
m.failuresCount,
|
||||
})
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *linodeMetrics) Register() error {
|
||||
return m.metricRegisterer.RegisterMetrics()
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *linodeMetrics) Unregister() {
|
||||
m.metricRegisterer.UnregisterMetrics()
|
||||
}
|
|
@ -64,8 +64,24 @@ func (p *Provider) Config() interface{} {
|
|||
return p.config
|
||||
}
|
||||
|
||||
// Registers the metrics needed for SD mechanisms.
|
||||
// Does not register the metrics for the Discovery Manager.
|
||||
// TODO(ptodev): Add ability to unregister the metrics?
|
||||
func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]DiscovererMetrics, error) {
|
||||
// Some SD mechanisms use the "refresh" package, which has its own metrics.
|
||||
refreshSdMetrics := NewRefreshMetrics(reg)
|
||||
|
||||
// Register the metrics specific for each SD mechanism, and the ones for the refresh package.
|
||||
sdMetrics, err := RegisterSDMetrics(reg, refreshSdMetrics)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to register service discovery metrics: %w", err)
|
||||
}
|
||||
|
||||
return sdMetrics, nil
|
||||
}
|
||||
|
||||
// NewManager is the Discovery Manager constructor.
|
||||
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, options ...func(*Manager)) *Manager {
|
||||
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
@ -77,6 +93,7 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re
|
|||
updatert: 5 * time.Second,
|
||||
triggerSend: make(chan struct{}, 1),
|
||||
registerer: registerer,
|
||||
sdMetrics: sdMetrics,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(mgr)
|
||||
|
@ -84,7 +101,7 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re
|
|||
|
||||
// Register the metrics.
|
||||
// We have to do this after setting all options, so that the name of the Manager is set.
|
||||
if metrics, err := NewMetrics(registerer, mgr.name); err == nil {
|
||||
if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil {
|
||||
mgr.metrics = metrics
|
||||
} else {
|
||||
level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
|
||||
|
@ -143,7 +160,8 @@ type Manager struct {
|
|||
// A registerer for all service discovery metrics.
|
||||
registerer prometheus.Registerer
|
||||
|
||||
metrics *Metrics
|
||||
metrics *Metrics
|
||||
sdMetrics map[string]DiscovererMetrics
|
||||
}
|
||||
|
||||
// Providers returns the currently configured SD providers.
|
||||
|
@ -402,7 +420,7 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int {
|
|||
d, err := cfg.NewDiscoverer(DiscovererOptions{
|
||||
Logger: log.With(m.logger, "discovery", typ, "config", setName),
|
||||
HTTPClientOptions: m.httpOpts,
|
||||
Registerer: m.registerer,
|
||||
Metrics: m.sdMetrics[typ],
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName)
|
||||
|
|
|
@ -36,6 +36,13 @@ func TestMain(m *testing.M) {
|
|||
testutil.TolerantVerifyLeak(m)
|
||||
}
|
||||
|
||||
func NewTestMetrics(t *testing.T, reg prometheus.Registerer) (*RefreshMetricsManager, map[string]DiscovererMetrics) {
|
||||
refreshMetrics := NewRefreshMetrics(reg)
|
||||
sdMetrics, err := RegisterSDMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, err)
|
||||
return &refreshMetrics, sdMetrics
|
||||
}
|
||||
|
||||
// TestTargetUpdatesOrder checks that the target updates are received in the expected order.
|
||||
func TestTargetUpdatesOrder(t *testing.T) {
|
||||
// The order by which the updates are send is determined by the interval passed to the mock discovery adapter
|
||||
|
@ -665,7 +672,10 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
|
||||
|
@ -780,7 +790,11 @@ func pk(provider, setName string, n int) poolKey {
|
|||
func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -813,7 +827,11 @@ func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) {
|
|||
func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -849,7 +867,11 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
|
|||
func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -888,7 +910,11 @@ func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testi
|
|||
func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -950,7 +976,11 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
|
|||
func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -990,7 +1020,11 @@ func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) {
|
|||
func TestDiscovererConfigs(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -1023,7 +1057,11 @@ func TestDiscovererConfigs(t *testing.T) {
|
|||
func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -1071,7 +1109,11 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
|||
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, nil, prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, nil, reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -1108,7 +1150,11 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {
|
|||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -1129,11 +1175,21 @@ type errorConfig struct{ err error }
|
|||
func (e errorConfig) Name() string { return "error" }
|
||||
func (e errorConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { return nil, e.err }
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (errorConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics {
|
||||
return &NoopDiscovererMetrics{}
|
||||
}
|
||||
|
||||
type lockStaticConfig struct {
|
||||
mu *sync.Mutex
|
||||
config StaticConfig
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (lockStaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics {
|
||||
return &NoopDiscovererMetrics{}
|
||||
}
|
||||
|
||||
func (s lockStaticConfig) Name() string { return "lockstatic" }
|
||||
func (s lockStaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) {
|
||||
return (lockStaticDiscoverer)(s), nil
|
||||
|
@ -1155,7 +1211,11 @@ func (s lockStaticDiscoverer) Run(ctx context.Context, up chan<- []*targetgroup.
|
|||
func TestGaugeFailedConfigs(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -1312,7 +1372,10 @@ func TestCoordinationWithReceiver(t *testing.T) {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
mgr := NewManager(ctx, nil, prometheus.NewRegistry())
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
mgr := NewManager(ctx, nil, reg, sdMetrics)
|
||||
require.NotNil(t, mgr)
|
||||
mgr.updatert = updateDelay
|
||||
go mgr.Run()
|
||||
|
@ -1408,7 +1471,11 @@ func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) {
|
|||
func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -1470,6 +1537,11 @@ func newTestDiscoverer() *testDiscoverer {
|
|||
}
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*testDiscoverer) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics {
|
||||
return &NoopDiscovererMetrics{}
|
||||
}
|
||||
|
||||
// Name implements Config.
|
||||
func (t *testDiscoverer) Name() string {
|
||||
return "test"
|
||||
|
|
|
@ -79,12 +79,19 @@ type SDConfig struct {
|
|||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return &marathonMetrics{
|
||||
refreshMetrics: rmi,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "marathon" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(*c, opts.Logger, opts.Registerer)
|
||||
return NewDiscovery(*c, opts.Logger, opts.Metrics)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -133,7 +140,12 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new Marathon Discovery.
|
||||
func NewDiscovery(conf SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||
func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*marathonMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -156,11 +168,11 @@ func NewDiscovery(conf SDConfig, logger log.Logger, reg prometheus.Registerer) (
|
|||
}
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: logger,
|
||||
Mech: "marathon",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
Registry: reg,
|
||||
Logger: logger,
|
||||
Mech: "marathon",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
)
|
||||
return d, nil
|
||||
|
|
|
@ -23,7 +23,9 @@ import (
|
|||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
|
@ -37,7 +39,19 @@ func testConfig() SDConfig {
|
|||
}
|
||||
|
||||
func testUpdateServices(client appListClient) ([]*targetgroup.Group, error) {
|
||||
md, err := NewDiscovery(testConfig(), nil, prometheus.NewRegistry())
|
||||
cfg := testConfig()
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
err := metrics.Register()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
md, err := NewDiscovery(cfg, nil, metrics)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -130,7 +144,15 @@ func TestMarathonSDSendGroup(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestMarathonSDRemoveApp(t *testing.T) {
|
||||
md, err := NewDiscovery(testConfig(), nil, prometheus.NewRegistry())
|
||||
cfg := testConfig()
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
md, err := NewDiscovery(cfg, nil, metrics)
|
||||
if err != nil {
|
||||
t.Fatalf("%s", err)
|
||||
}
|
||||
|
|
32
discovery/marathon/metrics.go
Normal file
32
discovery/marathon/metrics.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package marathon
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*marathonMetrics)(nil)
|
||||
|
||||
type marathonMetrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *marathonMetrics) Register() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *marathonMetrics) Unregister() {}
|
|
@ -38,7 +38,7 @@ type Metrics struct {
|
|||
SentUpdates prometheus.Counter
|
||||
}
|
||||
|
||||
func NewMetrics(registerer prometheus.Registerer, sdManagerName string) (*Metrics, error) {
|
||||
func NewManagerMetrics(registerer prometheus.Registerer, sdManagerName string) (*Metrics, error) {
|
||||
m := &Metrics{}
|
||||
|
||||
m.FailedConfigs = prometheus.NewGauge(
|
||||
|
|
75
discovery/metrics_refresh.go
Normal file
75
discovery/metrics_refresh.go
Normal file
|
@ -0,0 +1,75 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package discovery
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// Metric vectors for the "refresh" package.
|
||||
// We define them here in the "discovery" package in order to avoid a cyclic dependency between
|
||||
// "discovery" and "refresh".
|
||||
type RefreshMetricsVecs struct {
|
||||
failuresVec *prometheus.CounterVec
|
||||
durationVec *prometheus.SummaryVec
|
||||
|
||||
metricRegisterer MetricRegisterer
|
||||
}
|
||||
|
||||
var _ RefreshMetricsManager = (*RefreshMetricsVecs)(nil)
|
||||
|
||||
func NewRefreshMetrics(reg prometheus.Registerer) RefreshMetricsManager {
|
||||
m := &RefreshMetricsVecs{
|
||||
failuresVec: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_refresh_failures_total",
|
||||
Help: "Number of refresh failures for the given SD mechanism.",
|
||||
},
|
||||
[]string{"mechanism"}),
|
||||
durationVec: prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "prometheus_sd_refresh_duration_seconds",
|
||||
Help: "The duration of a refresh in seconds for the given SD mechanism.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
},
|
||||
[]string{"mechanism"}),
|
||||
}
|
||||
|
||||
// The reason we register metric vectors instead of metrics is so that
|
||||
// the metrics are not visible until they are recorded.
|
||||
m.metricRegisterer = NewMetricRegisterer(reg, []prometheus.Collector{
|
||||
m.failuresVec,
|
||||
m.durationVec,
|
||||
})
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Instantiate returns metrics out of metric vectors.
|
||||
func (m *RefreshMetricsVecs) Instantiate(mech string) *RefreshMetrics {
|
||||
return &RefreshMetrics{
|
||||
Failures: m.failuresVec.WithLabelValues(mech),
|
||||
Duration: m.durationVec.WithLabelValues(mech),
|
||||
}
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *RefreshMetricsVecs) Register() error {
|
||||
return m.metricRegisterer.RegisterMetrics()
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *RefreshMetricsVecs) Unregister() {
|
||||
m.metricRegisterer.UnregisterMetrics()
|
||||
}
|
|
@ -22,7 +22,7 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/go-kit/log"
|
||||
|
@ -76,12 +76,19 @@ type DockerSDConfig struct {
|
|||
RefreshInterval model.Duration `yaml:"refresh_interval"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*DockerSDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return &dockerMetrics{
|
||||
refreshMetrics: rmi,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*DockerSDConfig) Name() string { return "docker" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *DockerSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDockerDiscovery(c, opts.Logger, opts.Registerer)
|
||||
return NewDockerDiscovery(c, opts.Logger, opts.Metrics)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -115,8 +122,11 @@ type DockerDiscovery struct {
|
|||
}
|
||||
|
||||
// NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets.
|
||||
func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, reg prometheus.Registerer) (*DockerDiscovery, error) {
|
||||
var err error
|
||||
func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) {
|
||||
m, ok := metrics.(*dockerMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &DockerDiscovery{
|
||||
port: conf.Port,
|
||||
|
@ -167,11 +177,11 @@ func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, reg prometheus.
|
|||
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: logger,
|
||||
Mech: "docker",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
Registry: reg,
|
||||
Logger: logger,
|
||||
Mech: "docker",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
)
|
||||
return d, nil
|
||||
|
@ -182,7 +192,7 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
|
|||
Source: "Docker",
|
||||
}
|
||||
|
||||
containers, err := d.client.ContainerList(ctx, types.ContainerListOptions{Filters: d.filters})
|
||||
containers, err := d.client.ContainerList(ctx, container.ListOptions{Filters: d.filters})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while listing containers: %w", err)
|
||||
}
|
||||
|
|
|
@ -23,6 +23,8 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
func TestDockerSDRefresh(t *testing.T) {
|
||||
|
@ -38,7 +40,14 @@ host: %s
|
|||
var cfg DockerSDConfig
|
||||
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
|
||||
|
||||
d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
|
|
@ -70,12 +70,19 @@ type Filter struct {
|
|||
Values []string `yaml:"values"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*DockerSwarmSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return &dockerswarmMetrics{
|
||||
refreshMetrics: rmi,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*DockerSwarmSDConfig) Name() string { return "dockerswarm" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *DockerSwarmSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||
return NewDiscovery(c, opts.Logger, opts.Metrics)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -118,8 +125,11 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||
func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||
var err error
|
||||
func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*dockerswarmMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
port: conf.Port,
|
||||
|
@ -170,11 +180,11 @@ func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger, reg prometheus.R
|
|||
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: logger,
|
||||
Mech: "dockerswarm",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
Registry: reg,
|
||||
Logger: logger,
|
||||
Mech: "dockerswarm",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
)
|
||||
return d, nil
|
||||
|
|
32
discovery/moby/metrics_docker.go
Normal file
32
discovery/moby/metrics_docker.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package moby
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*dockerMetrics)(nil)
|
||||
|
||||
type dockerMetrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *dockerMetrics) Register() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *dockerMetrics) Unregister() {}
|
32
discovery/moby/metrics_dockerswarm.go
Normal file
32
discovery/moby/metrics_dockerswarm.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package moby
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*dockerswarmMetrics)(nil)
|
||||
|
||||
type dockerswarmMetrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *dockerswarmMetrics) Register() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *dockerswarmMetrics) Unregister() {}
|
|
@ -23,6 +23,8 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
func TestDockerSwarmNodesSDRefresh(t *testing.T) {
|
||||
|
@ -39,7 +41,14 @@ host: %s
|
|||
var cfg DockerSwarmSDConfig
|
||||
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
|
|
@ -23,6 +23,8 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
func TestDockerSwarmSDServicesRefresh(t *testing.T) {
|
||||
|
@ -39,7 +41,14 @@ host: %s
|
|||
var cfg DockerSwarmSDConfig
|
||||
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -333,7 +342,14 @@ filters:
|
|||
var cfg DockerSwarmSDConfig
|
||||
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
|
|
@ -23,6 +23,8 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
func TestDockerSwarmTasksSDRefresh(t *testing.T) {
|
||||
|
@ -39,7 +41,14 @@ host: %s
|
|||
var cfg DockerSwarmSDConfig
|
||||
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
|
57
discovery/nomad/metrics.go
Normal file
57
discovery/nomad/metrics.go
Normal file
|
@ -0,0 +1,57 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package nomad
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*nomadMetrics)(nil)
|
||||
|
||||
type nomadMetrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
|
||||
failuresCount prometheus.Counter
|
||||
|
||||
metricRegisterer discovery.MetricRegisterer
|
||||
}
|
||||
|
||||
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
m := &nomadMetrics{
|
||||
refreshMetrics: rmi,
|
||||
failuresCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_nomad_failures_total",
|
||||
Help: "Number of nomad service discovery refresh failures.",
|
||||
}),
|
||||
}
|
||||
|
||||
m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
|
||||
m.failuresCount,
|
||||
})
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *nomadMetrics) Register() error {
|
||||
return m.metricRegisterer.RegisterMetrics()
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *nomadMetrics) Unregister() {
|
||||
m.metricRegisterer.UnregisterMetrics()
|
||||
}
|
|
@ -74,12 +74,17 @@ type SDConfig struct {
|
|||
TagSeparator string `yaml:"tag_separator,omitempty"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return newDiscovererMetrics(reg, rmi)
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "nomad" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||
return NewDiscovery(c, opts.Logger, opts.Metrics)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -112,11 +117,16 @@ type Discovery struct {
|
|||
region string
|
||||
server string
|
||||
tagSeparator string
|
||||
failuresCount prometheus.Counter
|
||||
metrics *nomadMetrics
|
||||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*nomadMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
allowStale: conf.AllowStale,
|
||||
namespace: conf.Namespace,
|
||||
|
@ -124,11 +134,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer)
|
|||
region: conf.Region,
|
||||
server: conf.Server,
|
||||
tagSeparator: conf.TagSeparator,
|
||||
failuresCount: prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_nomad_failures_total",
|
||||
Help: "Number of nomad service discovery refresh failures.",
|
||||
}),
|
||||
metrics: m,
|
||||
}
|
||||
|
||||
HTTPClient, err := config.NewClientFromConfig(conf.HTTPClientConfig, "nomad_sd")
|
||||
|
@ -151,12 +157,11 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer)
|
|||
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: logger,
|
||||
Mech: "nomad",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
Registry: reg,
|
||||
Metrics: []prometheus.Collector{d.failuresCount},
|
||||
Logger: logger,
|
||||
Mech: "nomad",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
)
|
||||
return d, nil
|
||||
|
@ -168,7 +173,7 @@ func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) {
|
|||
}
|
||||
stubs, _, err := d.client.Services().List(opts)
|
||||
if err != nil {
|
||||
d.failuresCount.Inc()
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -180,7 +185,7 @@ func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) {
|
|||
for _, service := range stub.Services {
|
||||
instances, _, err := d.client.Services().Get(service.ServiceName, opts)
|
||||
if err != nil {
|
||||
d.failuresCount.Inc()
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, fmt.Errorf("failed to fetch services: %w", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -25,6 +25,8 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
type NomadSDTestSuite struct {
|
||||
|
@ -128,8 +130,16 @@ func TestConfiguredService(t *testing.T) {
|
|||
conf := &SDConfig{
|
||||
Server: "http://localhost:4646",
|
||||
}
|
||||
_, err := NewDiscovery(conf, nil, prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := conf.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
|
||||
_, err := NewDiscovery(conf, nil, metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
metrics.Unregister()
|
||||
}
|
||||
|
||||
func TestNomadSDRefresh(t *testing.T) {
|
||||
|
@ -142,7 +152,15 @@ func TestNomadSDRefresh(t *testing.T) {
|
|||
|
||||
cfg := DefaultSDConfig
|
||||
cfg.Server = endpoint.String()
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
tgs, err := d.refresh(context.Background())
|
||||
|
|
32
discovery/openstack/metrics.go
Normal file
32
discovery/openstack/metrics.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
type openstackMetrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
}
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*openstackMetrics)(nil)
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *openstackMetrics) Register() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *openstackMetrics) Unregister() {}
|
|
@ -66,12 +66,19 @@ type SDConfig struct {
|
|||
Availability string `yaml:"availability,omitempty"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return &openstackMetrics{
|
||||
refreshMetrics: rmi,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "openstack" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||
return NewDiscovery(c, opts.Logger, opts.Metrics)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -135,18 +142,23 @@ type refresher interface {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new OpenStack Discoverer which periodically refreshes its targets.
|
||||
func NewDiscovery(conf *SDConfig, l log.Logger, reg prometheus.Registerer) (*refresh.Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, l log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
m, ok := metrics.(*openstackMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
r, err := newRefresher(conf, l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: l,
|
||||
Mech: "openstack",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: r.refresh,
|
||||
Registry: reg,
|
||||
Logger: l,
|
||||
Mech: "openstack",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: r.refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
), nil
|
||||
}
|
||||
|
|
32
discovery/ovhcloud/metrics.go
Normal file
32
discovery/ovhcloud/metrics.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ovhcloud
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*ovhcloudMetrics)(nil)
|
||||
|
||||
type ovhcloudMetrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *ovhcloudMetrics) Register() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *ovhcloudMetrics) Unregister() {}
|
|
@ -53,6 +53,13 @@ type SDConfig struct {
|
|||
Service string `yaml:"service"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return &ovhcloudMetrics{
|
||||
refreshMetrics: rmi,
|
||||
}
|
||||
}
|
||||
|
||||
// Name implements the Discoverer interface.
|
||||
func (c SDConfig) Name() string {
|
||||
return "ovhcloud"
|
||||
|
@ -94,7 +101,7 @@ func createClient(config *SDConfig) (*ovh.Client, error) {
|
|||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, options.Logger, options.Registerer)
|
||||
return NewDiscovery(c, options.Logger, options.Metrics)
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -141,7 +148,12 @@ func newRefresher(conf *SDConfig, logger log.Logger) (refresher, error) {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new OVHcloud Discoverer which periodically refreshes its targets.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*refresh.Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
m, ok := metrics.(*ovhcloudMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
r, err := newRefresher(conf, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -149,11 +161,11 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer)
|
|||
|
||||
return refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: logger,
|
||||
Mech: "ovhcloud",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: r.refresh,
|
||||
Registry: reg,
|
||||
Logger: logger,
|
||||
Mech: "ovhcloud",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: r.refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
), nil
|
||||
}
|
||||
|
|
|
@ -122,9 +122,17 @@ func TestParseIPs(t *testing.T) {
|
|||
func TestDiscoverer(t *testing.T) {
|
||||
conf, _ := getMockConf("vps")
|
||||
logger := testutil.NewLogger(t)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := conf.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
_, err := conf.NewDiscoverer(discovery.DiscovererOptions{
|
||||
Logger: logger,
|
||||
Registerer: prometheus.NewRegistry(),
|
||||
Logger: logger,
|
||||
Metrics: metrics,
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
|
32
discovery/puppetdb/metrics.go
Normal file
32
discovery/puppetdb/metrics.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package puppetdb
|
||||
|
||||
import (
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
)
|
||||
|
||||
var _ discovery.DiscovererMetrics = (*puppetdbMetrics)(nil)
|
||||
|
||||
type puppetdbMetrics struct {
|
||||
refreshMetrics discovery.RefreshMetricsInstantiator
|
||||
}
|
||||
|
||||
// Register implements discovery.DiscovererMetrics.
|
||||
func (m *puppetdbMetrics) Register() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unregister implements discovery.DiscovererMetrics.
|
||||
func (m *puppetdbMetrics) Unregister() {}
|
|
@ -79,12 +79,19 @@ type SDConfig struct {
|
|||
Port int `yaml:"port"`
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
|
||||
return &puppetdbMetrics{
|
||||
refreshMetrics: rmi,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "puppetdb" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger, opts.Registerer)
|
||||
return NewDiscovery(c, opts.Logger, opts.Metrics)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -131,7 +138,12 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new PuppetDB discovery for the given config.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*puppetdbMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
@ -158,11 +170,11 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer)
|
|||
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
refresh.Options{
|
||||
Logger: logger,
|
||||
Mech: "http",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
Registry: reg,
|
||||
Logger: logger,
|
||||
Mech: "puppetdb",
|
||||
Interval: time.Duration(conf.RefreshInterval),
|
||||
RefreshF: d.refresh,
|
||||
MetricsInstantiator: m.refreshMetrics,
|
||||
},
|
||||
)
|
||||
return d, nil
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
|
@ -63,9 +64,17 @@ func TestPuppetSlashInURL(t *testing.T) {
|
|||
Port: 80,
|
||||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
}
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, apiURL, d.url)
|
||||
|
||||
metrics.Unregister()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -80,7 +89,12 @@ func TestPuppetDBRefresh(t *testing.T) {
|
|||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
}
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -107,6 +121,8 @@ func TestPuppetDBRefresh(t *testing.T) {
|
|||
},
|
||||
}
|
||||
require.Equal(t, expectedTargets, tgs)
|
||||
|
||||
metrics.Unregister()
|
||||
}
|
||||
|
||||
func TestPuppetDBRefreshWithParameters(t *testing.T) {
|
||||
|
@ -121,7 +137,12 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) {
|
|||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
}
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -158,6 +179,8 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) {
|
|||
},
|
||||
}
|
||||
require.Equal(t, expectedTargets, tgs)
|
||||
|
||||
metrics.Unregister()
|
||||
}
|
||||
|
||||
func TestPuppetDBInvalidCode(t *testing.T) {
|
||||
|
@ -173,12 +196,19 @@ func TestPuppetDBInvalidCode(t *testing.T) {
|
|||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
}
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = d.refresh(ctx)
|
||||
require.EqualError(t, err, "server returned HTTP status 400 Bad Request")
|
||||
|
||||
metrics.Unregister()
|
||||
}
|
||||
|
||||
func TestPuppetDBInvalidFormat(t *testing.T) {
|
||||
|
@ -194,10 +224,17 @@ func TestPuppetDBInvalidFormat(t *testing.T) {
|
|||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
}
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = d.refresh(ctx)
|
||||
require.EqualError(t, err, "unsupported content type text/plain; charset=utf-8")
|
||||
|
||||
metrics.Unregister()
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue