mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
[PRW 2.0] Merging remote-write-2.0
feature branch to main (PRW 2.0 support + metadata in WAL) (#14395)
* Remote Write 1.1: e2e benchmarks (#13102) * Remote Write e2e benchmarks Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * Prometheus ports automatically assigned Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * make dashboard editable + more modular to different job label values Signed-off-by: Callum Styan <callumstyan@gmail.com> * Dashboard improvements * memory stats * diffs look at counter increases Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * run script: absolute path for config templates Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * grafana dashboard improvements * show actual values of metrics * add memory stats and diff Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * dashboard changes Signed-off-by: Callum Styan <callumstyan@gmail.com> --------- Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> Signed-off-by: Callum Styan <callumstyan@gmail.com> Co-authored-by: Callum Styan <callumstyan@gmail.com> * replace snappy encoding library Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * add new proto types Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * add decode function for new write request proto Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * add lookup table struct that is used to build the symbol table in new write request format Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * Implement code paths for new proto format Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * update example server to include handler for new format Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * Add new test client Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * tests and new -> original proto mapping util Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * add new proto support on receiver end Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * Fix test Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * no-brainer copypaste but more performance write support Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * remove some comented code Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * fix mocks and fixture Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * add basic reduce remote write handler benchmark Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * refactor out common code between write methods Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * fix: queue manager to include float histograms in new requests Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * add sender-side tests and fix failing ones Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * refactor queue manager code to remove some duplication Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * fix build Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * Improve sender benchmarks and some allocations Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * Use github.com/golang/snappy Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * cleanup: remove hardcoded fake url for testing Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * Add 1.1 version handling code Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * Remove config, update proto Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * gofmt Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * fix NewWriteClient and change new flags wording Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * fields rewording in handler Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * remote write handler to checks version header Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * fix typo in log Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * lint Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * Add minmized remote write proto format Co-authored-by: Marco Pracucci <marco@pracucci.com> Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * add functions for translating between new proto formats symbol table and actual prometheus labels Co-authored-by: Marco Pracucci <marco@pracucci.com> Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * add functionality for new minimized remote write request format Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * fix minor things Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * Make LabelSymbols a fixed32 Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * remove unused proto type Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * update tests Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * fix build for stringlabels tag Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * Use two uint32 to encode (offset,leng) Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * manually optimize varint marshaling Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * Use unsafe []byte->string cast to reuse buffer Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * fix writeRequestMinimizedFixture Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * remove all code from previous interning approach the 'minimized' version is now the only v1.1 version Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * minimally-tested exemplar support for rw 1.1 Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * refactor new version flag to make it easier to pick a specific format instead of having multiple flags, plus add new formats for testing Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * use exp slices for backwards compat. to go 1.20 plus add copyright header to test file Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * fix label ranging Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * Add bytes slice (instead of slice of 32bit vars) format for testing Co-authored-by: Nicolás Pazos <npazosmendez@gmail.com> Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * test additional len and lenbytes formats Co-authored-by: Nicolás Pazos <npazosmendez@gmail.com> Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * remove mistaken package lock changes Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * remove formats we've decided not to use Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * remove more format types we probably won't use Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * More cleanup Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * use require instead of assert in custom marshal test Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * cleanup; remove some unused functions Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * more cleanup, mostly linting fixes Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * remove package-lock.json change again Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * more cleanup, address review comments Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * fix test panic Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * fix minor lint issue + use labels Range function since it looks like the tests fail to do `range labels.Labels` on CI Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * new interning format based on []string indeces Co-authored-by: bwplotka <bwplotka@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * remove all new rw formats but the []string one also adapt tests to the new format Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * cleanup rwSymbolTable Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * add some TODOs for later Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * don't reserve field 3 for new proto and add TODO Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * fix custom marshaling Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * lint Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * additional merge fixes Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * lint fixes Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * fix server example Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * revert package-lock.json changes Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * update example prometheus version Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * define separate proto types for remote write 2.0 Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * lint Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * rename new proto types and move to separate pkg Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * update prometheus version for example Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * make proto Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * make Metadata not nullable Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * remove old MinSample proto message Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * change enum names to fit buf build recommend enum naming and lint rules Signed-off-by: Callum Styan <callumstyan@gmail.com> * remote: Added test for classic histogram grouping when sending rw; Fixed queue manager test delay. (#13421) Signed-off-by: bwplotka <bwplotka@gmail.com> * Remote write v2: metadata support in every write request (#13394) * Approach bundling metadata along with samples and exemplars Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Add first test; rebase with main Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * Alternative approach: bundle metadata in TimeSeries protobuf Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> * update go mod to match main branch Signed-off-by: Callum Styan <callumstyan@gmail.com> * fix after rebase Signed-off-by: Callum Styan <callumstyan@gmail.com> * we're not going to modify the 1.X format anymore Signed-off-by: Callum Styan <callumstyan@gmail.com> * Modify AppendMetadata based on the fact that we be putting metadata into timeseries Signed-off-by: Callum Styan <callumstyan@gmail.com> * Rename enums for remote write versions to something that makes more sense + remove the added `sendMetadata` flag. Signed-off-by: Callum Styan <callumstyan@gmail.com> * rename flag that enables writing of metadata records to the WAL Signed-off-by: Callum Styan <callumstyan@gmail.com> * additional clean up Signed-off-by: Callum Styan <callumstyan@gmail.com> * lint Signed-off-by: Callum Styan <callumstyan@gmail.com> * fix usage of require.Len Signed-off-by: Callum Styan <callumstyan@gmail.com> * some clean up from review comments Signed-off-by: Callum Styan <callumstyan@gmail.com> * more review fixes Signed-off-by: Callum Styan <callumstyan@gmail.com> --------- Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> Signed-off-by: Callum Styan <callumstyan@gmail.com> Co-authored-by: Paschalis Tsilias <paschalist0@gmail.com> * remote write 2.0: sync with `main` branch (#13510) * consoles: exclude iowait and steal from CPU Utilisation 'iowait' and 'steal' indicate specific idle/wait states, which shouldn't be counted into CPU Utilisation. Also see https://github.com/prometheus-operator/kube-prometheus/pull/796 and https://github.com/kubernetes-monitoring/kubernetes-mixin/pull/667. Per the iostat man page: %idle Show the percentage of time that the CPU or CPUs were idle and the system did not have an outstanding disk I/O request. %iowait Show the percentage of time that the CPU or CPUs were idle during which the system had an outstanding disk I/O request. %steal Show the percentage of time spent in involuntary wait by the virtual CPU or CPUs while the hypervisor was servicing another virtual processor. Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com> * tsdb: shrink txRing with smaller integers 4 billion active transactions ought to be enough for anyone. Signed-off-by: Bryan Boreham <bjboreham@gmail.com> * tsdb: create isolation transaction slice on demand When Prometheus restarts it creates every series read in from the WAL, but many of those series will be finished, and never receive any more samples. By defering allocation of the txRing slice to when it is first needed, we save 32 bytes per stale series. Signed-off-by: Bryan Boreham <bjboreham@gmail.com> * add cluster variable to Overview dashboard Signed-off-by: Erik Sommer <ersotech@posteo.de> * promql: simplify Native Histogram arithmetics Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com> * Cut 2.49.0-rc.0 (#13270) * Cut 2.49.0-rc.0 Signed-off-by: bwplotka <bwplotka@gmail.com> * Removed the duplicate. Signed-off-by: bwplotka <bwplotka@gmail.com> --------- Signed-off-by: bwplotka <bwplotka@gmail.com> * Add unit protobuf parser Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it> * Go on adding protobuf parsing for unit Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it> * ui: create a reproduction for https://github.com/prometheus/prometheus/issues/13292 Signed-off-by: machine424 <ayoubmrini424@gmail.com> * Get conditional right Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it> * Get VM Scale Set NIC (#13283) Calling `*armnetwork.InterfacesClient.Get()` doesn't work for Scale Set VM NIC, because these use a different Resource ID format. Use `*armnetwork.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface()` instead. This needs both the scale set name and the instance ID, so add an `InstanceID` field to the `virtualMachine` struct. `InstanceID` is empty for a VM that isn't a ScaleSetVM. Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com> * Cut v2.49.0-rc.1 Signed-off-by: bwplotka <bwplotka@gmail.com> * Delete debugging lines, amend error message for unit Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it> * Correct order in error message Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it> * Consider storage.ErrTooOldSample as non-retryable Signed-off-by: Daniel Kerbel <nmdanny@gmail.com> * scrape_test.go: Increase scrape interval in TestScrapeLoopCache to reduce potential flakiness Signed-off-by: machine424 <ayoubmrini424@gmail.com> * Avoid creating string for suffix, consider counters without _total suffix Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it> * build(deps): bump github.com/prometheus/client_golang Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.17.0 to 1.18.0. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> * build(deps): bump actions/setup-node from 3.8.1 to 4.0.1 Bumps [actions/setup-node](https://github.com/actions/setup-node) from 3.8.1 to 4.0.1. - [Release notes](https://github.com/actions/setup-node/releases) - [Commits](5e21ff4d9b...b39b52d121
) --- updated-dependencies: - dependency-name: actions/setup-node dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] <support@github.com> * scripts: sort file list in embed directive Otherwise the resulting string depends on find, which afaict depends on the underlying filesystem. A stable file list make it easier to detect UI changes in downstreams that need to track UI assets. Signed-off-by: Jan Fajerski <jfajersk@redhat.com> * Fix DataTableProps['data'] for resultType string Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com> * Fix handling of scalar and string in isHeatmapData Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com> * build(deps): bump github.com/influxdata/influxdb Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.2 to 1.11.4. - [Release notes](https://github.com/influxdata/influxdb/releases) - [Commits](https://github.com/influxdata/influxdb/compare/v1.11.2...v1.11.4) --- updated-dependencies: - dependency-name: github.com/influxdata/influxdb dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> * build(deps): bump github.com/prometheus/prometheus Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.48.0 to 0.48.1. - [Release notes](https://github.com/prometheus/prometheus/releases) - [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/prometheus/compare/v0.48.0...v0.48.1) --- updated-dependencies: - dependency-name: github.com/prometheus/prometheus dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> * Bump client_golang to v1.18.0 (#13373) Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Drop old inmemory samples (#13002) * Drop old inmemory samples Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Avoid copying timeseries when the feature is disabled Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Run gofmt Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Clarify docs Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Add more logging info Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Remove loggers Signed-off-by: Marc Tuduri <marctc@protonmail.com> * optimize function and add tests Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Simplify filter Signed-off-by: Marc Tuduri <marctc@protonmail.com> * rename var Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Update help info from metrics Signed-off-by: Marc Tuduri <marctc@protonmail.com> * use metrics to keep track of drop elements during buildWriteRequest Signed-off-by: Marc Tuduri <marctc@protonmail.com> * rename var in tests Signed-off-by: Marc Tuduri <marctc@protonmail.com> * pass time.Now as parameter Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Change buildwriterequest during retries Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Revert "Remove loggers" This reverts commit 54f91dfcae20488944162335ab4ad8be459df1ab. Signed-off-by: Marc Tuduri <marctc@protonmail.com> * use log level debug for loggers Signed-off-by: Marc Tuduri <marctc@protonmail.com> * Fix linter Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Remove noisy debug-level logs; add 'reason' label to drop metrics Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Remove accidentally committed files Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Propagate logger to buildWriteRequest to log dropped data Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Fix docs comment Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Make drop reason more specific Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Remove unnecessary pass of logger Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Use snake_case for reason label Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> * Fix dropped samples metric Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> --------- Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Signed-off-by: Marc Tuduri <marctc@protonmail.com> Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com> Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com> * fix(discovery): allow requireUpdate util to timeout in discovery/file/file_test.go. The loop ran indefinitely if the condition isn't met. Before, each iteration created a new timer channel which was always outpaced by the other timer channel with smaller duration. minor detail: There was a memory leak: resources of the ~10 previous timers were constantly kept. With the fix, we may keep the resources of one timer around for defaultWait but this isn't worth the changes to make it right. Signed-off-by: machine424 <ayoubmrini424@gmail.com> * Merge pull request #13371 from kevinmingtarja/fix-isHeatmapData ui: fix handling of scalar and string in isHeatmapData * tsdb/{index,compact}: allow using custom postings encoding format (#13242) * tsdb/{index,compact}: allow using custom postings encoding format We would like to experiment with a different postings encoding format in Thanos so in this change I am proposing adding another argument to `NewWriter` which would allow users to change the format if needed. Also, wire the leveled compactor so that it would be possible to change the format there too. Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> * tsdb/compact: use a struct for leveled compactor options As discussed on Slack, let's use a struct for the options in leveled compactor. Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> * tsdb: make changes after Bryan's review - Make changes less intrusive - Turn the postings encoder type into a function - Add NewWriterWithEncoder() Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> --------- Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> * Cut 2.49.0-rc.2 Signed-off-by: bwplotka <bwplotka@gmail.com> * build(deps): bump actions/setup-go from 3.5.0 to 5.0.0 in /scripts (#13362) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3.5.0 to 5.0.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](6edd4406fa...0c52d547c9
) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump github/codeql-action from 2.22.8 to 3.22.12 (#13358) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.22.8 to 3.22.12. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](407ffafae6...012739e508
) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * put @nexucis has a release shepherd (#13383) Signed-off-by: Augustin Husson <augustin.husson@amadeus.com> * Add analyze histograms command to promtool (#12331) Add `query analyze` command to promtool This command analyzes the buckets of classic and native histograms, based on data queried from the Prometheus query API, i.e. it doesn't require direct access to the TSDB files. Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com> --------- Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com> * included instance in all necessary descriptions Signed-off-by: Erik Sommer <ersotech@posteo.de> * tsdb/compact: fix passing merge func Fixing a very small logical problem I've introduced :(. Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> * tsdb: add enable overlapping compaction This functionality is needed in downstream projects because they have a separate component that does compaction. Upstreaming7c8e9a2a76/tsdb/compact.go (L323-L325)
. Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> * Cut 2.49.0 Signed-off-by: bwplotka <bwplotka@gmail.com> * promtool: allow setting multiple matchers to "promtool tsdb dump" command. (#13296) Conditions are ANDed inside the same matcher but matchers are ORed Including unit tests for "promtool tsdb dump". Refactor some matchers scraping utils. Signed-off-by: machine424 <ayoubmrini424@gmail.com> * Fixed changelog Signed-off-by: bwplotka <bwplotka@gmail.com> * tsdb/main: wire "EnableOverlappingCompaction" to tsdb.Options (#13398) This added the https://github.com/prometheus/prometheus/pull/13393 "EnableOverlappingCompaction" parameter to the compactor code but not to the tsdb.Options. I forgot about that. Add it to `tsdb.Options` too and set it to `true` in Prometheus. Copy/paste the description from https://github.com/prometheus/prometheus/pull/13393#issuecomment-1891787986 Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> * Issue #13268: fix quality value in accept header Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com> * Cut 2.49.1 with scrape q= bugfix. Signed-off-by: bwplotka <bwplotka@gmail.com> * Cut 2.49.1 web package. Signed-off-by: bwplotka <bwplotka@gmail.com> * Restore more efficient version of NewPossibleNonCounterInfo annotation (#13022) Restore more efficient version of NewPossibleNonCounterInfo annotation Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com> --------- Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com> * Fix regressions introduced by #13242 Signed-off-by: Marco Pracucci <marco@pracucci.com> * fix slice copy in 1.20 (#13389) The slices package is added to the standard library in Go 1.21; we need to import from the exp area to maintain compatibility with Go 1.20. Signed-off-by: tyltr <tylitianrui@126.com> * Docs: Query Basics: link to rate (#10538) Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu> * chore(kubernetes): check preconditions earlier and avoid unnecessary checks or iterations Signed-off-by: machine424 <ayoubmrini424@gmail.com> * Examples: link to `rate` for new users (#10535) * Examples: link to `rate` for new users Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com Co-authored-by: Bryan Boreham <bjboreham@gmail.com> * promql: use natural sort in sort_by_label and sort_by_label_desc (#13411) These functions are intended for humans, as robots can already sort the results however they please. Humans like things sorted "naturally": * https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/ A similar thing has been done to Grafana, which is also used by humans: * https://github.com/grafana/grafana/pull/78024 * https://github.com/grafana/grafana/pull/78494 Signed-off-by: Ivan Babrou <github@ivan.computer> * TestLabelValuesWithMatchers: Add test case Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * remove obsolete build tag Signed-off-by: tyltr <tylitianrui@126.com> * Upgrade some golang dependencies for resty 2.11 Signed-off-by: Israel Blancas <iblancasa@gmail.com> * Native Histograms: support `native_histogram_min_bucket_factor` in scrape_config (#13222) Native Histograms: support native_histogram_min_bucket_factor in scrape_config --------- Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> Signed-off-by: Björn Rabenstein <github@rabenste.in> Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com> Co-authored-by: Björn Rabenstein <github@rabenste.in> * Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram (#13392) Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram --------- Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com> * Minor fixes to otlp vendor update script Signed-off-by: Goutham <gouthamve@gmail.com> * build(deps): bump github.com/hetznercloud/hcloud-go/v2 Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.4.0 to 2.6.0. - [Release notes](https://github.com/hetznercloud/hcloud-go/releases) - [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.4.0...v2.6.0) --- updated-dependencies: - dependency-name: github.com/hetznercloud/hcloud-go/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> * Enhanced visibility for `promtool test rules` with JSON colored formatting (#13342) * Added diff flag for unit test to improvise readability & debugging Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * Removed blank spaces Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * Fixed linting error Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * Added cli flags to documentation Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * Revert unrrelated linting fixes Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * Fixed review suggestions Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * Cleanup Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * Updated flag description Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * Updated flag description Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> --------- Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> * storage: skip merging when no remote storage configured Prometheus is hard-coded to use a fanout storage between TSDB and a remote storage which by default is empty. This change detects the empty storage and skips merging between result sets, which would make `Select()` sort results. Bottom line: we skip a sort unless there really is some remote storage configured. Signed-off-by: Bryan Boreham <bjboreham@gmail.com> * Remove csmarchbanks from remote write owners (#13432) I have not had the time to keep up with remote write and have no plans to work on it in the near future so I am withdrawing my maintainership of that part of the codebase. I continue to focus on client_python. Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com> * add more context cancellation check at evaluation time Signed-off-by: Ben Ye <benye@amazon.com> * Optimize label values with matchers by taking shortcuts (#13426) Don't calculate postings beforehand: we may not need them. If all matchers are for the requested label, we can just filter its values. Also, if there are no values at all, no need to run any kind of logic. Also add more labelValuesWithMatchers benchmarks Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Add automatic memory limit handling Enable automatic detection of memory limits and configure GOMEMLIMIT to match. * Also includes a flag to allow controlling the reserved ratio. Signed-off-by: SuperQ <superq@gmail.com> * Update OSSF badge link (#13433) Provide a more user friendly interface Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> * SD Managers taking over responsibility for registration of debug metrics (#13375) SD Managers take over responsibility for SD metrics registration --------- Signed-off-by: Paulin Todev <paulin.todev@gmail.com> Signed-off-by: Björn Rabenstein <github@rabenste.in> Co-authored-by: Björn Rabenstein <github@rabenste.in> * Optimize histogram iterators (#13340) Optimize histogram iterators Histogram iterators allocate new objects in the AtHistogram and AtFloatHistogram methods, which makes calculating rates over long ranges expensive. In #13215 we allowed an existing object to be reused when converting an integer histogram to a float histogram. This commit follows the same idea and allows injecting an existing object in the AtHistogram and AtFloatHistogram methods. When the injected value is nil, iterators allocate new histograms, otherwise they populate and return the injected object. The commit also adds a CopyTo method to Histogram and FloatHistogram which is used in the BufferedIterator to overwrite items in the ring instead of making new copies. Note that a specialized HPoint pool is needed for all of this to work (`matrixSelectorHPool`). --------- Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com> Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com> * doc: Mark `mad_over_time` as experimental (#13440) We forgot to do that in https://github.com/prometheus/prometheus/pull/13059 Signed-off-by: beorn7 <beorn@grafana.com> * Change metric label for Puppetdb from 'http' to 'puppetdb' Signed-off-by: Paulin Todev <paulin.todev@gmail.com> * mirror metrics.proto change & generate code Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> * TestHeadLabelValuesWithMatchers: Add test case (#13414) Add test case to TestHeadLabelValuesWithMatchers, while fixing a couple of typos in other test cases. Also enclosing some implicit sub-tests in a `t.Run` call to make them explicitly sub-tests. Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * update all go dependencies (#13438) Signed-off-by: Augustin Husson <husson.augustin@gmail.com> * build(deps): bump the k8s-io group with 2 updates (#13454) Bumps the k8s-io group with 2 updates: [k8s.io/api](https://github.com/kubernetes/api) and [k8s.io/client-go](https://github.com/kubernetes/client-go). Updates `k8s.io/api` from 0.28.4 to 0.29.1 - [Commits](https://github.com/kubernetes/api/compare/v0.28.4...v0.29.1) Updates `k8s.io/client-go` from 0.28.4 to 0.29.1 - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.28.4...v0.29.1) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-io - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-io ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump the go-opentelemetry-io group with 1 update (#13453) Bumps the go-opentelemetry-io group with 1 update: [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector). Updates `go.opentelemetry.io/collector/semconv` from 0.92.0 to 0.93.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.92.0...v0.93.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/collector/semconv dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump actions/upload-artifact from 3.1.3 to 4.0.0 (#13355) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3.1.3 to 4.0.0. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](a8a3f3ad30...c7d193f32e
) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump bufbuild/buf-push-action (#13357) Bumps [bufbuild/buf-push-action](https://github.com/bufbuild/buf-push-action) from 342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 to a654ff18effe4641ebea4a4ce242c49800728459. - [Release notes](https://github.com/bufbuild/buf-push-action/releases) - [Commits](342fc4cdcf...a654ff18ef
) --- updated-dependencies: - dependency-name: bufbuild/buf-push-action dependency-type: direct:production ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Labels: Add DropMetricName function, used in PromQL (#13446) This function is called very frequently when executing PromQL functions, and we can do it much more efficiently inside Labels. In the common case that `__name__` comes first in the labels, we simply re-point to start at the next label, which is nearly free. `DropMetricName` is now so cheap I removed the cache - benchmarks show everything still goes faster. Signed-off-by: Bryan Boreham <bjboreham@gmail.com> * tsdb: simplify internal series delete function (#13261) Lifting an optimisation from Agent code, `seriesHashmap.del` can use the unique series reference, doesn't need to check Labels. Also streamline the logic for deleting from `unique` and `conflicts` maps, and add some comments to help the next person. Signed-off-by: Bryan Boreham <bjboreham@gmail.com> * otlptranslator/update-copy.sh: Fix sed command lines Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * Rollback k8s.io requirements (#13462) Rollback k8s.io Go modules to v0.28.6 to avoid forcing upgrade of Go to 1.21. This allows us to keep compatibility with the currently supported upstream Go releases. Signed-off-by: SuperQ <superq@gmail.com> * Make update-copy.sh work for both OSX and GNU sed Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * Name @beorn7 and @krajorama as maintainers for native histograms I have been the de-facto maintainer for native histograms from the beginning. So let's put this into MAINTAINERS.md. In addition, I hereby proposose George Krajcsovits AKA Krajo as a co-maintainer. He has contributed a lot of native histogram code, but more importantly, he has contributed substantially to reviewing other contributors' native histogram code, up to a point where I was merely rubberstamping the PRs he had already reviewed. I'm confident that he is ready to to be granted commit rights as outlined in the "Maintainers" section of the governance: https://prometheus.io/governance/#maintainers According to the same section of the governance, I will announce the proposed change on the developers mailing list and will give some time for lazy consensus before merging this PR. Signed-off-by: beorn7 <beorn@grafana.com> * ui/fix: correct url handling for stacked graphs (#13460) Signed-off-by: Yury Moladau <yurymolodov@gmail.com> * tsdb: use cheaper Mutex on series Mutex is 8 bytes; RWMutex is 24 bytes and much more complicated. Since `RLock` is only used in two places, `UpdateMetadata` and `Delete`, neither of which are hotspots, we should use the cheaper one. Signed-off-by: Bryan Boreham <bjboreham@gmail.com> * Fix last_over_time for native histograms The last_over_time retains a histogram sample without making a copy. This sample is now coming from the buffered iterator used for windowing functions, and can be reused for reading subsequent samples as the iterator progresses. I would propose copying the sample in the last_over_time function, similar to how it is done for rate, sum_over_time and others. Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com> * Implementation NOTE: Rebased from main after refactor in #13014 Signed-off-by: Danny Kopping <danny.kopping@grafana.com> * Add feature flag Signed-off-by: Danny Kopping <danny.kopping@grafana.com> * Refactor concurrency control Signed-off-by: Danny Kopping <danny.kopping@grafana.com> * Optimising dependencies/dependents funcs to not produce new slices each request Signed-off-by: Danny Kopping <danny.kopping@grafana.com> * Refactoring Signed-off-by: Danny Kopping <danny.kopping@grafana.com> * Rename flag Signed-off-by: Danny Kopping <danny.kopping@grafana.com> * Refactoring for performance, and to allow controller to be overridden Signed-off-by: Danny Kopping <danny.kopping@grafana.com> * Block until all rules, both sync & async, have completed evaluating Updated & added tests Review feedback nits Return empty map if not indeterminate Use highWatermark to track inflight requests counter Appease the linter Clarify feature flag Signed-off-by: Danny Kopping <danny.kopping@grafana.com> * Fix typo in CLI flag description Signed-off-by: Marco Pracucci <marco@pracucci.com> * Fixed auto-generated doc Signed-off-by: Marco Pracucci <marco@pracucci.com> * Improve doc Signed-off-by: Marco Pracucci <marco@pracucci.com> * Simplify the design to update concurrency controller once the rule evaluation has done Signed-off-by: Marco Pracucci <marco@pracucci.com> * Add more test cases to TestDependenciesEdgeCases Signed-off-by: Marco Pracucci <marco@pracucci.com> * Added more test cases to TestDependenciesEdgeCases Signed-off-by: Marco Pracucci <marco@pracucci.com> * Improved RuleConcurrencyController interface doc Signed-off-by: Marco Pracucci <marco@pracucci.com> * Introduced sequentialRuleEvalController Signed-off-by: Marco Pracucci <marco@pracucci.com> * Remove superfluous nil check in Group.metrics Signed-off-by: Marco Pracucci <marco@pracucci.com> * api: Serialize discovered and target labels into JSON directly (#13469) Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com> * api: Serialize discovered labels into JSON directly in dropped targets (#13484) Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com> * Add ShardedPostings() support to TSDB (#10421) This PR is a reference implementation of the proposal described in #10420. In addition to what described in #10420, in this PR I've introduced labels.StableHash(). The idea is to offer an hashing function which doesn't change over time, and that's used by query sharding in order to get a stable behaviour over time. The implementation of labels.StableHash() is the hashing function used by Prometheus before stringlabels, and what's used by Grafana Mimir for query sharding (because built before stringlabels was a thing). Follow up work As mentioned in #10420, if this PR is accepted I'm also open to upload another foundamental piece used by Grafana Mimir query sharding to accelerate the query execution: an optional, configurable and fast in-memory cache for the series hashes. Signed-off-by: Marco Pracucci <marco@pracucci.com> * storage/remote: document why two benchmarks are skipped One was silently doing nothing; one was doing something but the work didn't go up linearly with iteration count. Signed-off-by: Bryan Boreham <bjboreham@gmail.com> * Pod status changes not discovered by Kube Endpoints SD (#13337) * fix(discovery/kubernetes/endpoints): react to changes on Pods because some modifications can occur on them without triggering an update on the related Endpoints (The Pod phase changing from Pending to Running e.g.). --------- Signed-off-by: machine424 <ayoubmrini424@gmail.com> Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com> * Small improvements, add const, remove copypasta (#8106) Signed-off-by: Mikhail Fesenko <proggga@gmail.com> Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com> * Proposal to improve FPointSlice and HPointSlice allocation. (#13448) * Reusing points slice from previous series when the slice is under utilized * Adding comments on the bench test Signed-off-by: Alan Protasio <alanprot@gmail.com> * lint Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * go mod tidy Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> --------- Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com> Signed-off-by: Bryan Boreham <bjboreham@gmail.com> Signed-off-by: Erik Sommer <ersotech@posteo.de> Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com> Signed-off-by: bwplotka <bwplotka@gmail.com> Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it> Signed-off-by: machine424 <ayoubmrini424@gmail.com> Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com> Signed-off-by: Daniel Kerbel <nmdanny@gmail.com> Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: Jan Fajerski <jfajersk@redhat.com> Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com> Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Signed-off-by: Marc Tuduri <marctc@protonmail.com> Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com> Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> Signed-off-by: Augustin Husson <augustin.husson@amadeus.com> Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com> Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com> Signed-off-by: Marco Pracucci <marco@pracucci.com> Signed-off-by: tyltr <tylitianrui@126.com> Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com Signed-off-by: Ivan Babrou <github@ivan.computer> Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> Signed-off-by: Israel Blancas <iblancasa@gmail.com> Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> Signed-off-by: Björn Rabenstein <github@rabenste.in> Signed-off-by: Goutham <gouthamve@gmail.com> Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com> Signed-off-by: Ben Ye <benye@amazon.com> Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> Signed-off-by: SuperQ <superq@gmail.com> Signed-off-by: Ben Kochie <superq@gmail.com> Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> Signed-off-by: Paulin Todev <paulin.todev@gmail.com> Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com> Signed-off-by: beorn7 <beorn@grafana.com> Signed-off-by: Augustin Husson <husson.augustin@gmail.com> Signed-off-by: Yury Moladau <yurymolodov@gmail.com> Signed-off-by: Danny Kopping <danny.kopping@grafana.com> Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com> Signed-off-by: Mikhail Fesenko <proggga@gmail.com> Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com> Signed-off-by: Alan Protasio <alanprot@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> Co-authored-by: Julian Wiedmann <jwi@linux.ibm.com> Co-authored-by: Bryan Boreham <bjboreham@gmail.com> Co-authored-by: Erik Sommer <ersotech@posteo.de> Co-authored-by: Linas Medziunas <linas.medziunas@gmail.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com> Co-authored-by: Arianna Vespri <arianna.vespri@yahoo.it> Co-authored-by: machine424 <ayoubmrini424@gmail.com> Co-authored-by: daniel-resdiary <109083091+daniel-resdiary@users.noreply.github.com> Co-authored-by: Daniel Kerbel <nmdanny@gmail.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Jan Fajerski <jfajersk@redhat.com> Co-authored-by: Kevin Mingtarja <kevin.mingtarja@gmail.com> Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com> Co-authored-by: Marc Tudurí <marctc@protonmail.com> Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Co-authored-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> Co-authored-by: Augustin Husson <husson.augustin@gmail.com> Co-authored-by: Björn Rabenstein <beorn@grafana.com> Co-authored-by: zenador <zenador@users.noreply.github.com> Co-authored-by: gotjosh <josue.abreu@gmail.com> Co-authored-by: Ben Kochie <superq@gmail.com> Co-authored-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com> Co-authored-by: Marco Pracucci <marco@pracucci.com> Co-authored-by: tyltr <tylitianrui@126.com> Co-authored-by: Ted Robertson <10043369+tredondo@users.noreply.github.com> Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu> Co-authored-by: Matthias Loibl <mail@matthiasloibl.com> Co-authored-by: Ivan Babrou <github@ivan.computer> Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com> Co-authored-by: Israel Blancas <iblancasa@gmail.com> Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com> Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com> Co-authored-by: Björn Rabenstein <github@rabenste.in> Co-authored-by: Goutham <gouthamve@gmail.com> Co-authored-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> Co-authored-by: Chris Marchbanks <csmarchbanks@gmail.com> Co-authored-by: Ben Ye <benye@amazon.com> Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com> Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com> Co-authored-by: Paulin Todev <paulin.todev@gmail.com> Co-authored-by: Filip Petkovski <filip.petkovsky@gmail.com> Co-authored-by: Yury Molodov <yurymolodov@gmail.com> Co-authored-by: Danny Kopping <danny.kopping@grafana.com> Co-authored-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com> Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com> Co-authored-by: Mikhail Fesenko <proggga@gmail.com> Co-authored-by: Alan Protasio <alanprot@gmail.com> * remote write 2.0 - follow up improvements (#13478) * move remote write proto version config from a remote storage config to a per remote write configuration option Signed-off-by: Callum Styan <callumstyan@gmail.com> * rename scrape config for metadata, fix 2.0 header var name/value (was 1.1), and more clean up Signed-off-by: Callum Styan <callumstyan@gmail.com> * address review comments, mostly lint fixes Signed-off-by: Callum Styan <callumstyan@gmail.com> * another lint fix Signed-off-by: Callum Styan <callumstyan@gmail.com> * lint imports Signed-off-by: Callum Styan <callumstyan@gmail.com> --------- Signed-off-by: Callum Styan <callumstyan@gmail.com> * go mod tidy Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * Added commmentary to RW 2.0 protocol for easier adoption and explicit semantics. (#13502) * Added commmentary to RW 2.0 protocol for easier adoption and explicit semantics. Signed-off-by: bwplotka <bwplotka@gmail.com> * Apply suggestions from code review Co-authored-by: Nico Pazos <32206519+npazosmendez@users.noreply.github.com> Signed-off-by: Callum Styan <callumstyan@gmail.com> --------- Signed-off-by: bwplotka <bwplotka@gmail.com> Signed-off-by: Callum Styan <callumstyan@gmail.com> Co-authored-by: Callum Styan <callumstyan@gmail.com> Co-authored-by: Nico Pazos <32206519+npazosmendez@users.noreply.github.com> * prw2.0: Added support for "custom" layouts for native histogram proto (#13558) * prw2.0: Added support for "custom" layouts for native histogram. Result of the discussions: * https://github.com/prometheus/prometheus/issues/13475#issuecomment-1931496924 * https://cloud-native.slack.com/archives/C02KR205UMU/p1707301006347199 Signed-off-by: bwplotka <bwplotka@gmail.com> * prw2.0: Added support for "custom" layouts for native histogram. Result of the discussions: * https://github.com/prometheus/prometheus/issues/13475#issuecomment-1931496924 * https://cloud-native.slack.com/archives/C02KR205UMU/p1707301006347199 Signed-off-by: bwplotka <bwplotka@gmail.com> # Conflicts: # prompb/write/v2/types.pb.go * Update prompb/write/v2/types.proto Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com> Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed comments, fixed test. Signed-off-by: bwplotka <bwplotka@gmail.com> --------- Signed-off-by: bwplotka <bwplotka@gmail.com> Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com> * first draft of content negotiation Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com> * Lint Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com> * Fix race in test Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com> * Fix another test race Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com> * Almost done with lint Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com> * Fix todos around 405 HEAD handling Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com> * Changes based on review comments Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com> * Update storage/remote/client.go Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com> Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com> * Latest updates to review comments Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com> * latest tweaks Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com> * remote write 2.0 - content negotiation remediation (#13921) * Consolidate renegotiation error into one, fix tests Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com> * fix metric name and actuall increment counter Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com> --------- Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com> * Fixes after main sync. Signed-off-by: bwplotka <bwplotka@gmail.com> * [PRW 2.0] Moved rw2 proto to the full path (both package name and placement) (#13973) undefined * [PRW2.0] Remove benchmark scripts (#13949) See rationales on https://docs.google.com/document/d/1Bpf7mYjrHUhPHkie0qlnZFxzgqf_L32kM8ZOknSdJrU/edit Signed-off-by: bwplotka <bwplotka@gmail.com> * rw20: Update prw commentary after Callum spec review (#14136) * rw20: Update prw commentary after Callum spec review Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Update types.proto Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> --------- Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * [PRW 2.0] Updated spec proto (2.0-rc.1); deterministic v1 interop; to be sympathetic with implementation. (#14330) * [PRW 2.0] Updated spec proto (2.0-rc.1); deterministic v1 interop; to be sympathetic with implementation. Signed-off-by: bwplotka <bwplotka@gmail.com> * update custom marshalling Signed-off-by: bwplotka <bwplotka@gmail.com> * Removed confusing comments. Signed-off-by: bwplotka <bwplotka@gmail.com> --------- Signed-off-by: bwplotka <bwplotka@gmail.com> * [PRW-2.0] (chain1) New Remote Write 2.0 Config options for 2.0-rc.1 spec. (#14335) NOTE: For simple review this change does not touch remote/ packages, only main and configs. Spec: https://prometheus.io/docs/specs/remote_write_spec_2_0 Supersedes https://github.com/prometheus/prometheus/pull/13968 Signed-off-by: bwplotka <bwplotka@gmail.com> * [PRW-2.0] (part 2) Removed automatic negotiation, updates for the latest spec semantics in remote pkg (#14329) * [PRW-2.0] (part2) Moved to latest basic negotiation & spec semantics. Spec: https://github.com/prometheus/docs/pull/2462 Supersedes https://github.com/prometheus/prometheus/pull/13968 Signed-off-by: bwplotka <bwplotka@gmail.com> # Conflicts: # config/config.go # docs/configuration/configuration.md # storage/remote/queue_manager_test.go # storage/remote/write.go # web/api/v1/api.go * Addressed comments. Signed-off-by: bwplotka <bwplotka@gmail.com> --------- Signed-off-by: bwplotka <bwplotka@gmail.com> * lint Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * storage/remote tests: refactor: extract function newTestQueueManager To reduce repetition. Signed-off-by: Bryan Boreham <bjboreham@gmail.com> Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * use newTestQueueManager for test Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * go mod tidy Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> * [PRW 2.0] (part3) moved type specific conversions to prompb and writev2 codecs. Signed-off-by: bwplotka <bwplotka@gmail.com> * Added test for rwProtoMsgFlagParser; fixed TODO comment. Signed-off-by: bwplotka <bwplotka@gmail.com> * Renamed DecodeV2WriteRequestStr to DecodeWriteV2Request (with tests). Signed-off-by: bwplotka <bwplotka@gmail.com> * Addressed comments on remote_storage example, updated it for 2.0 Signed-off-by: bwplotka <bwplotka@gmail.com> * Fixed `--enable-feature=metadata-wal-records` docs and error when using PRW 2.0 without it. Signed-off-by: bwplotka <bwplotka@gmail.com> * Addressed Callum comments on custom*.go Signed-off-by: bwplotka <bwplotka@gmail.com> * Added TODO to genproto. Signed-off-by: bwplotka <bwplotka@gmail.com> * Addressed Callum comments in remote pkg. Signed-off-by: bwplotka <bwplotka@gmail.com> * Added metadata validation to write handler test; fixed ToMetadata. Signed-off-by: bwplotka <bwplotka@gmail.com> * Addressed rest of Callum comments. Signed-off-by: bwplotka <bwplotka@gmail.com> * Fixed writev2.FromMetadataType (was wrongly using prompb). Signed-off-by: bwplotka <bwplotka@gmail.com> * fix a few import whitespaces Signed-off-by: Callum Styan <callumstyan@gmail.com> * add a default case with an error to the example RW receiver Signed-off-by: Callum Styan <callumstyan@gmail.com> * more minor import whitespace chagnes Signed-off-by: Callum Styan <callumstyan@gmail.com> * Apply suggestions from code review Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Update storage/remote/queue_manager_test.go Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> --------- Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com> Signed-off-by: Callum Styan <callumstyan@gmail.com> Signed-off-by: bwplotka <bwplotka@gmail.com> Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com> Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com> Signed-off-by: Bryan Boreham <bjboreham@gmail.com> Signed-off-by: Erik Sommer <ersotech@posteo.de> Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com> Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it> Signed-off-by: machine424 <ayoubmrini424@gmail.com> Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com> Signed-off-by: Daniel Kerbel <nmdanny@gmail.com> Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: Jan Fajerski <jfajersk@redhat.com> Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com> Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Signed-off-by: Marc Tuduri <marctc@protonmail.com> Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com> Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> Signed-off-by: Augustin Husson <augustin.husson@amadeus.com> Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com> Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com> Signed-off-by: Marco Pracucci <marco@pracucci.com> Signed-off-by: tyltr <tylitianrui@126.com> Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com Signed-off-by: Ivan Babrou <github@ivan.computer> Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> Signed-off-by: Israel Blancas <iblancasa@gmail.com> Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> Signed-off-by: Björn Rabenstein <github@rabenste.in> Signed-off-by: Goutham <gouthamve@gmail.com> Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com> Signed-off-by: Ben Ye <benye@amazon.com> Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> Signed-off-by: SuperQ <superq@gmail.com> Signed-off-by: Ben Kochie <superq@gmail.com> Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> Signed-off-by: Paulin Todev <paulin.todev@gmail.com> Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com> Signed-off-by: beorn7 <beorn@grafana.com> Signed-off-by: Augustin Husson <husson.augustin@gmail.com> Signed-off-by: Yury Moladau <yurymolodov@gmail.com> Signed-off-by: Danny Kopping <danny.kopping@grafana.com> Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com> Signed-off-by: Mikhail Fesenko <proggga@gmail.com> Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com> Signed-off-by: Alan Protasio <alanprot@gmail.com> Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com> Co-authored-by: Nicolás Pazos <32206519+npazosmendez@users.noreply.github.com> Co-authored-by: Callum Styan <callumstyan@gmail.com> Co-authored-by: Nicolás Pazos <npazosmendez@gmail.com> Co-authored-by: alexgreenbank <alex.greenbank@grafana.com> Co-authored-by: Marco Pracucci <marco@pracucci.com> Co-authored-by: Paschalis Tsilias <paschalist0@gmail.com> Co-authored-by: Julian Wiedmann <jwi@linux.ibm.com> Co-authored-by: Bryan Boreham <bjboreham@gmail.com> Co-authored-by: Erik Sommer <ersotech@posteo.de> Co-authored-by: Linas Medziunas <linas.medziunas@gmail.com> Co-authored-by: Arianna Vespri <arianna.vespri@yahoo.it> Co-authored-by: machine424 <ayoubmrini424@gmail.com> Co-authored-by: daniel-resdiary <109083091+daniel-resdiary@users.noreply.github.com> Co-authored-by: Daniel Kerbel <nmdanny@gmail.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Jan Fajerski <jfajersk@redhat.com> Co-authored-by: Kevin Mingtarja <kevin.mingtarja@gmail.com> Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com> Co-authored-by: Marc Tudurí <marctc@protonmail.com> Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com> Co-authored-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com> Co-authored-by: Augustin Husson <husson.augustin@gmail.com> Co-authored-by: Björn Rabenstein <beorn@grafana.com> Co-authored-by: zenador <zenador@users.noreply.github.com> Co-authored-by: gotjosh <josue.abreu@gmail.com> Co-authored-by: Ben Kochie <superq@gmail.com> Co-authored-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com> Co-authored-by: tyltr <tylitianrui@126.com> Co-authored-by: Ted Robertson <10043369+tredondo@users.noreply.github.com> Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu> Co-authored-by: Matthias Loibl <mail@matthiasloibl.com> Co-authored-by: Ivan Babrou <github@ivan.computer> Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com> Co-authored-by: Israel Blancas <iblancasa@gmail.com> Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com> Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com> Co-authored-by: Björn Rabenstein <github@rabenste.in> Co-authored-by: Goutham <gouthamve@gmail.com> Co-authored-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com> Co-authored-by: Chris Marchbanks <csmarchbanks@gmail.com> Co-authored-by: Ben Ye <benye@amazon.com> Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com> Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com> Co-authored-by: Paulin Todev <paulin.todev@gmail.com> Co-authored-by: Filip Petkovski <filip.petkovsky@gmail.com> Co-authored-by: Yury Molodov <yurymolodov@gmail.com> Co-authored-by: Danny Kopping <danny.kopping@grafana.com> Co-authored-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com> Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com> Co-authored-by: Mikhail Fesenko <proggga@gmail.com> Co-authored-by: Alan Protasio <alanprot@gmail.com>
This commit is contained in:
parent
c9bc1c2be0
commit
9198952f7c
|
@ -194,6 +194,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
||||||
case "extra-scrape-metrics":
|
case "extra-scrape-metrics":
|
||||||
c.scrape.ExtraMetrics = true
|
c.scrape.ExtraMetrics = true
|
||||||
level.Info(logger).Log("msg", "Experimental additional scrape metrics enabled")
|
level.Info(logger).Log("msg", "Experimental additional scrape metrics enabled")
|
||||||
|
case "metadata-wal-records":
|
||||||
|
c.scrape.AppendMetadata = true
|
||||||
|
level.Info(logger).Log("msg", "Experimental metadata records in WAL enabled, required for remote write 2.0")
|
||||||
case "new-service-discovery-manager":
|
case "new-service-discovery-manager":
|
||||||
c.enableNewSDManager = true
|
c.enableNewSDManager = true
|
||||||
level.Info(logger).Log("msg", "Experimental service discovery manager")
|
level.Info(logger).Log("msg", "Experimental service discovery manager")
|
||||||
|
@ -322,9 +325,15 @@ func main() {
|
||||||
a.Flag("web.enable-admin-api", "Enable API endpoints for admin control actions.").
|
a.Flag("web.enable-admin-api", "Enable API endpoints for admin control actions.").
|
||||||
Default("false").BoolVar(&cfg.web.EnableAdminAPI)
|
Default("false").BoolVar(&cfg.web.EnableAdminAPI)
|
||||||
|
|
||||||
|
// TODO(bwplotka): Consider allowing those remote receive flags to be changed in config.
|
||||||
|
// See https://github.com/prometheus/prometheus/issues/14410
|
||||||
a.Flag("web.enable-remote-write-receiver", "Enable API endpoint accepting remote write requests.").
|
a.Flag("web.enable-remote-write-receiver", "Enable API endpoint accepting remote write requests.").
|
||||||
Default("false").BoolVar(&cfg.web.EnableRemoteWriteReceiver)
|
Default("false").BoolVar(&cfg.web.EnableRemoteWriteReceiver)
|
||||||
|
|
||||||
|
supportedRemoteWriteProtoMsgs := config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2}
|
||||||
|
a.Flag("web.remote-write-receiver.accepted-protobuf-messages", fmt.Sprintf("List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: %v", supportedRemoteWriteProtoMsgs.String())).
|
||||||
|
Default(supportedRemoteWriteProtoMsgs.Strings()...).SetValue(rwProtoMsgFlagValue(&cfg.web.AcceptRemoteWriteProtoMsgs))
|
||||||
|
|
||||||
a.Flag("web.console.templates", "Path to the console template directory, available at /consoles.").
|
a.Flag("web.console.templates", "Path to the console template directory, available at /consoles.").
|
||||||
Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath)
|
Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath)
|
||||||
|
|
||||||
|
@ -646,7 +655,7 @@ func main() {
|
||||||
var (
|
var (
|
||||||
localStorage = &readyStorage{stats: tsdb.NewDBStats()}
|
localStorage = &readyStorage{stats: tsdb.NewDBStats()}
|
||||||
scraper = &readyScrapeManager{}
|
scraper = &readyScrapeManager{}
|
||||||
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper)
|
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata)
|
||||||
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
|
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1767,3 +1776,39 @@ type discoveryManager interface {
|
||||||
Run() error
|
Run() error
|
||||||
SyncCh() <-chan map[string][]*targetgroup.Group
|
SyncCh() <-chan map[string][]*targetgroup.Group
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// rwProtoMsgFlagParser is a custom parser for config.RemoteWriteProtoMsg enum.
|
||||||
|
type rwProtoMsgFlagParser struct {
|
||||||
|
msgs *[]config.RemoteWriteProtoMsg
|
||||||
|
}
|
||||||
|
|
||||||
|
func rwProtoMsgFlagValue(msgs *[]config.RemoteWriteProtoMsg) kingpin.Value {
|
||||||
|
return &rwProtoMsgFlagParser{msgs: msgs}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCumulative is used by kingpin to tell if it's an array or not.
|
||||||
|
func (p *rwProtoMsgFlagParser) IsCumulative() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *rwProtoMsgFlagParser) String() string {
|
||||||
|
ss := make([]string, 0, len(*p.msgs))
|
||||||
|
for _, t := range *p.msgs {
|
||||||
|
ss = append(ss, string(t))
|
||||||
|
}
|
||||||
|
return strings.Join(ss, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *rwProtoMsgFlagParser) Set(opt string) error {
|
||||||
|
t := config.RemoteWriteProtoMsg(opt)
|
||||||
|
if err := t.Validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, prev := range *p.msgs {
|
||||||
|
if prev == t {
|
||||||
|
return fmt.Errorf("duplicated %v flag value, got %v already", t, *p.msgs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*p.msgs = append(*p.msgs, t)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -30,11 +30,13 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alecthomas/kingpin/v2"
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/notifier"
|
"github.com/prometheus/prometheus/notifier"
|
||||||
"github.com/prometheus/prometheus/rules"
|
"github.com/prometheus/prometheus/rules"
|
||||||
|
@ -499,3 +501,65 @@ func TestDocumentation(t *testing.T) {
|
||||||
|
|
||||||
require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.")
|
require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRwProtoMsgFlagParser(t *testing.T) {
|
||||||
|
defaultOpts := config.RemoteWriteProtoMsgs{
|
||||||
|
config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tcase := range []struct {
|
||||||
|
args []string
|
||||||
|
expected []config.RemoteWriteProtoMsg
|
||||||
|
expectedErr error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
args: nil,
|
||||||
|
expected: defaultOpts,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
args: []string{"--test-proto-msgs", "test"},
|
||||||
|
expectedErr: errors.New("unknown remote write protobuf message test, supported: prometheus.WriteRequest, io.prometheus.write.v2.Request"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
args: []string{"--test-proto-msgs", "io.prometheus.write.v2.Request"},
|
||||||
|
expected: config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
args: []string{
|
||||||
|
"--test-proto-msgs", "io.prometheus.write.v2.Request",
|
||||||
|
"--test-proto-msgs", "io.prometheus.write.v2.Request",
|
||||||
|
},
|
||||||
|
expectedErr: errors.New("duplicated io.prometheus.write.v2.Request flag value, got [io.prometheus.write.v2.Request] already"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
args: []string{
|
||||||
|
"--test-proto-msgs", "io.prometheus.write.v2.Request",
|
||||||
|
"--test-proto-msgs", "prometheus.WriteRequest",
|
||||||
|
},
|
||||||
|
expected: config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV2, config.RemoteWriteProtoMsgV1},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
args: []string{
|
||||||
|
"--test-proto-msgs", "io.prometheus.write.v2.Request",
|
||||||
|
"--test-proto-msgs", "prometheus.WriteRequest",
|
||||||
|
"--test-proto-msgs", "io.prometheus.write.v2.Request",
|
||||||
|
},
|
||||||
|
expectedErr: errors.New("duplicated io.prometheus.write.v2.Request flag value, got [io.prometheus.write.v2.Request prometheus.WriteRequest] already"),
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(strings.Join(tcase.args, ","), func(t *testing.T) {
|
||||||
|
a := kingpin.New("test", "")
|
||||||
|
var opt []config.RemoteWriteProtoMsg
|
||||||
|
a.Flag("test-proto-msgs", "").Default(defaultOpts.Strings()...).SetValue(rwProtoMsgFlagValue(&opt))
|
||||||
|
|
||||||
|
_, err := a.Parse(tcase.args)
|
||||||
|
if tcase.expectedErr != nil {
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Equal(t, tcase.expectedErr, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, tcase.expected, opt)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -180,6 +180,7 @@ var (
|
||||||
// DefaultRemoteWriteConfig is the default remote write configuration.
|
// DefaultRemoteWriteConfig is the default remote write configuration.
|
||||||
DefaultRemoteWriteConfig = RemoteWriteConfig{
|
DefaultRemoteWriteConfig = RemoteWriteConfig{
|
||||||
RemoteTimeout: model.Duration(30 * time.Second),
|
RemoteTimeout: model.Duration(30 * time.Second),
|
||||||
|
ProtobufMessage: RemoteWriteProtoMsgV1,
|
||||||
QueueConfig: DefaultQueueConfig,
|
QueueConfig: DefaultQueueConfig,
|
||||||
MetadataConfig: DefaultMetadataConfig,
|
MetadataConfig: DefaultMetadataConfig,
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
|
@ -279,7 +280,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
|
||||||
|
|
||||||
jobNames := map[string]string{}
|
jobNames := map[string]string{}
|
||||||
for i, scfg := range c.ScrapeConfigs {
|
for i, scfg := range c.ScrapeConfigs {
|
||||||
// We do these checks for library users that would not call Validate in
|
// We do these checks for library users that would not call validate in
|
||||||
// Unmarshal.
|
// Unmarshal.
|
||||||
if err := scfg.Validate(c.GlobalConfig); err != nil {
|
if err := scfg.Validate(c.GlobalConfig); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1055,6 +1056,49 @@ func CheckTargetAddress(address model.LabelValue) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RemoteWriteProtoMsg represents the known protobuf message for the remote write
|
||||||
|
// 1.0 and 2.0 specs.
|
||||||
|
type RemoteWriteProtoMsg string
|
||||||
|
|
||||||
|
// Validate returns error if the given reference for the protobuf message is not supported.
|
||||||
|
func (s RemoteWriteProtoMsg) Validate() error {
|
||||||
|
switch s {
|
||||||
|
case RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2:
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown remote write protobuf message %v, supported: %v", s, RemoteWriteProtoMsgs{RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2}.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type RemoteWriteProtoMsgs []RemoteWriteProtoMsg
|
||||||
|
|
||||||
|
func (m RemoteWriteProtoMsgs) Strings() []string {
|
||||||
|
ret := make([]string, 0, len(m))
|
||||||
|
for _, typ := range m {
|
||||||
|
ret = append(ret, string(typ))
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m RemoteWriteProtoMsgs) String() string {
|
||||||
|
return strings.Join(m.Strings(), ", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// RemoteWriteProtoMsgV1 represents the deprecated `prometheus.WriteRequest` protobuf
|
||||||
|
// message introduced in the https://prometheus.io/docs/specs/remote_write_spec/.
|
||||||
|
//
|
||||||
|
// NOTE: This string is used for both HTTP header values and config value, so don't change
|
||||||
|
// this reference.
|
||||||
|
RemoteWriteProtoMsgV1 RemoteWriteProtoMsg = "prometheus.WriteRequest"
|
||||||
|
// RemoteWriteProtoMsgV2 represents the `io.prometheus.write.v2.Request` protobuf
|
||||||
|
// message introduced in https://prometheus.io/docs/specs/remote_write_spec_2_0/
|
||||||
|
//
|
||||||
|
// NOTE: This string is used for both HTTP header values and config value, so don't change
|
||||||
|
// this reference.
|
||||||
|
RemoteWriteProtoMsgV2 RemoteWriteProtoMsg = "io.prometheus.write.v2.Request"
|
||||||
|
)
|
||||||
|
|
||||||
// RemoteWriteConfig is the configuration for writing to remote storage.
|
// RemoteWriteConfig is the configuration for writing to remote storage.
|
||||||
type RemoteWriteConfig struct {
|
type RemoteWriteConfig struct {
|
||||||
URL *config.URL `yaml:"url"`
|
URL *config.URL `yaml:"url"`
|
||||||
|
@ -1064,6 +1108,9 @@ type RemoteWriteConfig struct {
|
||||||
Name string `yaml:"name,omitempty"`
|
Name string `yaml:"name,omitempty"`
|
||||||
SendExemplars bool `yaml:"send_exemplars,omitempty"`
|
SendExemplars bool `yaml:"send_exemplars,omitempty"`
|
||||||
SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"`
|
SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"`
|
||||||
|
// ProtobufMessage specifies the protobuf message to use against the remote
|
||||||
|
// receiver as specified in https://prometheus.io/docs/specs/remote_write_spec_2_0/
|
||||||
|
ProtobufMessage RemoteWriteProtoMsg `yaml:"protobuf_message,omitempty"`
|
||||||
|
|
||||||
// We cannot do proper Go type embedding below as the parser will then parse
|
// We cannot do proper Go type embedding below as the parser will then parse
|
||||||
// values arbitrarily into the overflow maps of further-down types.
|
// values arbitrarily into the overflow maps of further-down types.
|
||||||
|
@ -1098,6 +1145,10 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := c.ProtobufMessage.Validate(); err != nil {
|
||||||
|
return fmt.Errorf("invalid protobuf_message value: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
|
// The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
|
||||||
// We cannot make it a pointer as the parser panics for inlined pointer structs.
|
// We cannot make it a pointer as the parser panics for inlined pointer structs.
|
||||||
// Thus we just do its validation here.
|
// Thus we just do its validation here.
|
||||||
|
|
|
@ -108,9 +108,10 @@ var expectedConf = &Config{
|
||||||
|
|
||||||
RemoteWriteConfigs: []*RemoteWriteConfig{
|
RemoteWriteConfigs: []*RemoteWriteConfig{
|
||||||
{
|
{
|
||||||
URL: mustParseURL("http://remote1/push"),
|
URL: mustParseURL("http://remote1/push"),
|
||||||
RemoteTimeout: model.Duration(30 * time.Second),
|
ProtobufMessage: RemoteWriteProtoMsgV1,
|
||||||
Name: "drop_expensive",
|
RemoteTimeout: model.Duration(30 * time.Second),
|
||||||
|
Name: "drop_expensive",
|
||||||
WriteRelabelConfigs: []*relabel.Config{
|
WriteRelabelConfigs: []*relabel.Config{
|
||||||
{
|
{
|
||||||
SourceLabels: model.LabelNames{"__name__"},
|
SourceLabels: model.LabelNames{"__name__"},
|
||||||
|
@ -137,11 +138,12 @@ var expectedConf = &Config{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
URL: mustParseURL("http://remote2/push"),
|
URL: mustParseURL("http://remote2/push"),
|
||||||
RemoteTimeout: model.Duration(30 * time.Second),
|
ProtobufMessage: RemoteWriteProtoMsgV2,
|
||||||
QueueConfig: DefaultQueueConfig,
|
RemoteTimeout: model.Duration(30 * time.Second),
|
||||||
MetadataConfig: DefaultMetadataConfig,
|
QueueConfig: DefaultQueueConfig,
|
||||||
Name: "rw_tls",
|
MetadataConfig: DefaultMetadataConfig,
|
||||||
|
Name: "rw_tls",
|
||||||
HTTPClientConfig: config.HTTPClientConfig{
|
HTTPClientConfig: config.HTTPClientConfig{
|
||||||
TLSConfig: config.TLSConfig{
|
TLSConfig: config.TLSConfig{
|
||||||
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
||||||
|
@ -1800,6 +1802,10 @@ var expectedErrors = []struct {
|
||||||
filename: "remote_write_authorization_header.bad.yml",
|
filename: "remote_write_authorization_header.bad.yml",
|
||||||
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`,
|
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
filename: "remote_write_wrong_msg.bad.yml",
|
||||||
|
errMsg: `invalid protobuf_message value: unknown remote write protobuf message io.prometheus.writet.v2.Request, supported: prometheus.WriteRequest, io.prometheus.write.v2.Request`,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
filename: "remote_write_url_missing.bad.yml",
|
filename: "remote_write_url_missing.bad.yml",
|
||||||
errMsg: `url for remote_write is empty`,
|
errMsg: `url for remote_write is empty`,
|
||||||
|
|
1
config/testdata/conf.good.yml
vendored
1
config/testdata/conf.good.yml
vendored
|
@ -37,6 +37,7 @@ remote_write:
|
||||||
key_file: valid_key_file
|
key_file: valid_key_file
|
||||||
|
|
||||||
- url: http://remote2/push
|
- url: http://remote2/push
|
||||||
|
protobuf_message: io.prometheus.write.v2.Request
|
||||||
name: rw_tls
|
name: rw_tls
|
||||||
tls_config:
|
tls_config:
|
||||||
cert_file: valid_cert_file
|
cert_file: valid_cert_file
|
||||||
|
|
3
config/testdata/remote_write_wrong_msg.bad.yml
vendored
Normal file
3
config/testdata/remote_write_wrong_msg.bad.yml
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
remote_write:
|
||||||
|
- url: localhost:9090
|
||||||
|
protobuf_message: io.prometheus.writet.v2.Request # typo in 'write"
|
|
@ -26,6 +26,7 @@ The Prometheus monitoring server
|
||||||
| <code class="text-nowrap">--web.enable-lifecycle</code> | Enable shutdown and reload via HTTP request. | `false` |
|
| <code class="text-nowrap">--web.enable-lifecycle</code> | Enable shutdown and reload via HTTP request. | `false` |
|
||||||
| <code class="text-nowrap">--web.enable-admin-api</code> | Enable API endpoints for admin control actions. | `false` |
|
| <code class="text-nowrap">--web.enable-admin-api</code> | Enable API endpoints for admin control actions. | `false` |
|
||||||
| <code class="text-nowrap">--web.enable-remote-write-receiver</code> | Enable API endpoint accepting remote write requests. | `false` |
|
| <code class="text-nowrap">--web.enable-remote-write-receiver</code> | Enable API endpoint accepting remote write requests. | `false` |
|
||||||
|
| <code class="text-nowrap">--web.remote-write-receiver.accepted-protobuf-messages</code> | List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: prometheus.WriteRequest, io.prometheus.write.v2.Request | `prometheus.WriteRequest` |
|
||||||
| <code class="text-nowrap">--web.console.templates</code> | Path to the console template directory, available at /consoles. | `consoles` |
|
| <code class="text-nowrap">--web.console.templates</code> | Path to the console template directory, available at /consoles. | `consoles` |
|
||||||
| <code class="text-nowrap">--web.console.libraries</code> | Path to the console library directory. | `console_libraries` |
|
| <code class="text-nowrap">--web.console.libraries</code> | Path to the console library directory. | `console_libraries` |
|
||||||
| <code class="text-nowrap">--web.page-title</code> | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` |
|
| <code class="text-nowrap">--web.page-title</code> | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` |
|
||||||
|
|
|
@ -3575,6 +3575,17 @@ this functionality.
|
||||||
# The URL of the endpoint to send samples to.
|
# The URL of the endpoint to send samples to.
|
||||||
url: <string>
|
url: <string>
|
||||||
|
|
||||||
|
# protobuf message to use when writing to the remote write endpoint.
|
||||||
|
#
|
||||||
|
# * The `prometheus.WriteRequest` represents the message introduced in Remote Write 1.0, which
|
||||||
|
# will be deprecated eventually.
|
||||||
|
# * The `io.prometheus.write.v2.Request` was introduced in Remote Write 2.0 and replaces the former,
|
||||||
|
# by improving efficiency and sending metadata, created timestamp and native histograms by default.
|
||||||
|
#
|
||||||
|
# Before changing this value, consult with your remote storage provider (or test) what message it supports.
|
||||||
|
# Read more on https://prometheus.io/docs/specs/remote_write_spec_2_0/#io-prometheus-write-v2-request
|
||||||
|
[ protobuf_message: <prometheus.WriteRequest | io.prometheus.write.v2.Request> | default = prometheus.WriteRequest ]
|
||||||
|
|
||||||
# Timeout for requests to the remote write endpoint.
|
# Timeout for requests to the remote write endpoint.
|
||||||
[ remote_timeout: <duration> | default = 30s ]
|
[ remote_timeout: <duration> | default = 30s ]
|
||||||
|
|
||||||
|
@ -3596,6 +3607,7 @@ write_relabel_configs:
|
||||||
[ send_exemplars: <boolean> | default = false ]
|
[ send_exemplars: <boolean> | default = false ]
|
||||||
|
|
||||||
# Enables sending of native histograms, also known as sparse histograms, over remote write.
|
# Enables sending of native histograms, also known as sparse histograms, over remote write.
|
||||||
|
# For the `io.prometheus.write.v2.Request` message, this option is noop (always true).
|
||||||
[ send_native_histograms: <boolean> | default = false ]
|
[ send_native_histograms: <boolean> | default = false ]
|
||||||
|
|
||||||
# Sets the `Authorization` header on every remote write request with the
|
# Sets the `Authorization` header on every remote write request with the
|
||||||
|
@ -3609,7 +3621,7 @@ basic_auth:
|
||||||
# Optional `Authorization` header configuration.
|
# Optional `Authorization` header configuration.
|
||||||
authorization:
|
authorization:
|
||||||
# Sets the authentication type.
|
# Sets the authentication type.
|
||||||
[ type: <string> | default: Bearer ]
|
[ type: <string> | default = Bearer ]
|
||||||
# Sets the credentials. It is mutually exclusive with
|
# Sets the credentials. It is mutually exclusive with
|
||||||
# `credentials_file`.
|
# `credentials_file`.
|
||||||
[ credentials: <secret> ]
|
[ credentials: <secret> ]
|
||||||
|
@ -3673,7 +3685,7 @@ tls_config:
|
||||||
# contain port numbers.
|
# contain port numbers.
|
||||||
[ no_proxy: <string> ]
|
[ no_proxy: <string> ]
|
||||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||||
[ proxy_from_environment: <boolean> | default: false ]
|
[ proxy_from_environment: <boolean> | default = false ]
|
||||||
# Specifies headers to send to proxies during CONNECT requests.
|
# Specifies headers to send to proxies during CONNECT requests.
|
||||||
[ proxy_connect_header:
|
[ proxy_connect_header:
|
||||||
[ <string>: [<secret>, ...] ] ]
|
[ <string>: [<secret>, ...] ] ]
|
||||||
|
@ -3682,7 +3694,7 @@ tls_config:
|
||||||
[ follow_redirects: <boolean> | default = true ]
|
[ follow_redirects: <boolean> | default = true ]
|
||||||
|
|
||||||
# Whether to enable HTTP2.
|
# Whether to enable HTTP2.
|
||||||
[ enable_http2: <boolean> | default: true ]
|
[ enable_http2: <boolean> | default = true ]
|
||||||
|
|
||||||
# Configures the queue used to write to remote storage.
|
# Configures the queue used to write to remote storage.
|
||||||
queue_config:
|
queue_config:
|
||||||
|
@ -3712,7 +3724,10 @@ queue_config:
|
||||||
# which means that all samples are sent.
|
# which means that all samples are sent.
|
||||||
[ sample_age_limit: <duration> | default = 0s ]
|
[ sample_age_limit: <duration> | default = 0s ]
|
||||||
|
|
||||||
# Configures the sending of series metadata to remote storage.
|
# Configures the sending of series metadata to remote storage
|
||||||
|
# if the `prometheus.WriteRequest` message was chosen. When
|
||||||
|
# `io.prometheus.write.v2.Request` is used, metadata is always sent.
|
||||||
|
#
|
||||||
# Metadata configuration is subject to change at any point
|
# Metadata configuration is subject to change at any point
|
||||||
# or be removed in future releases.
|
# or be removed in future releases.
|
||||||
metadata_config:
|
metadata_config:
|
||||||
|
|
|
@ -224,3 +224,13 @@ When the `concurrent-rule-eval` feature flag is enabled, rules without any depen
|
||||||
This has the potential to improve rule group evaluation latency and resource utilization at the expense of adding more concurrent query load.
|
This has the potential to improve rule group evaluation latency and resource utilization at the expense of adding more concurrent query load.
|
||||||
|
|
||||||
The number of concurrent rule evaluations can be configured with `--rules.max-concurrent-rule-evals`, which is set to `4` by default.
|
The number of concurrent rule evaluations can be configured with `--rules.max-concurrent-rule-evals`, which is set to `4` by default.
|
||||||
|
|
||||||
|
## Metadata WAL Records
|
||||||
|
|
||||||
|
`--enable-feature=metadata-wal-records`
|
||||||
|
|
||||||
|
When enabled, Prometheus will store metadata in-memory and keep track of
|
||||||
|
metadata changes as WAL records on a per-series basis.
|
||||||
|
|
||||||
|
This must be used if
|
||||||
|
you are also using remote write 2.0 as it will only gather metadata from the WAL.
|
||||||
|
|
|
@ -7,6 +7,7 @@ To use it:
|
||||||
|
|
||||||
```
|
```
|
||||||
go build
|
go build
|
||||||
|
|
||||||
./example_write_adapter
|
./example_write_adapter
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -15,10 +16,19 @@ go build
|
||||||
```yaml
|
```yaml
|
||||||
remote_write:
|
remote_write:
|
||||||
- url: "http://localhost:1234/receive"
|
- url: "http://localhost:1234/receive"
|
||||||
|
protobuf_message: "io.prometheus.write.v2.Request"
|
||||||
```
|
```
|
||||||
|
|
||||||
Then start Prometheus:
|
or for deprecated Remote Write 1.0 message:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
remote_write:
|
||||||
|
- url: "http://localhost:1234/receive"
|
||||||
|
protobuf_message: "prometheus.WriteRequest"
|
||||||
|
```
|
||||||
|
|
||||||
|
Then start Prometheus (in separate terminal):
|
||||||
|
|
||||||
```
|
```
|
||||||
./prometheus
|
./prometheus --enable-feature=metadata-wal-records
|
||||||
```
|
```
|
||||||
|
|
|
@ -18,44 +18,103 @@ import (
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/prompb"
|
||||||
|
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||||
"github.com/prometheus/prometheus/storage/remote"
|
"github.com/prometheus/prometheus/storage/remote"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
http.HandleFunc("/receive", func(w http.ResponseWriter, r *http.Request) {
|
http.HandleFunc("/receive", func(w http.ResponseWriter, r *http.Request) {
|
||||||
req, err := remote.DecodeWriteRequest(r.Body)
|
enc := r.Header.Get("Content-Encoding")
|
||||||
if err != nil {
|
if enc == "" {
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, "missing Content-Encoding header", http.StatusUnsupportedMediaType)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if enc != "snappy" {
|
||||||
|
http.Error(w, "unknown encoding, only snappy supported", http.StatusUnsupportedMediaType)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ts := range req.Timeseries {
|
contentType := r.Header.Get("Content-Type")
|
||||||
m := make(model.Metric, len(ts.Labels))
|
if contentType == "" {
|
||||||
for _, l := range ts.Labels {
|
http.Error(w, "missing Content-Type header", http.StatusUnsupportedMediaType)
|
||||||
m[model.LabelName(l.Name)] = model.LabelValue(l.Value)
|
}
|
||||||
}
|
|
||||||
fmt.Println(m)
|
|
||||||
|
|
||||||
for _, s := range ts.Samples {
|
defer func() { _ = r.Body.Close() }()
|
||||||
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, e := range ts.Exemplars {
|
// Very simplistic content parsing, see
|
||||||
m := make(model.Metric, len(e.Labels))
|
// storage/remote/write_handler.go#WriteHandler.ServeHTTP for production example.
|
||||||
for _, l := range e.Labels {
|
switch contentType {
|
||||||
m[model.LabelName(l.Name)] = model.LabelValue(l.Value)
|
case "application/x-protobuf", "application/x-protobuf;proto=prometheus.WriteRequest":
|
||||||
}
|
req, err := remote.DecodeWriteRequest(r.Body)
|
||||||
fmt.Printf("\tExemplar: %+v %f %d\n", m, e.Value, e.Timestamp)
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
printV1(req)
|
||||||
for _, hp := range ts.Histograms {
|
case "application/x-protobuf;proto=io.prometheus.write.v2.Request":
|
||||||
h := remote.HistogramProtoToHistogram(hp)
|
req, err := remote.DecodeWriteV2Request(r.Body)
|
||||||
fmt.Printf("\tHistogram: %s\n", h.String())
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
printV2(req)
|
||||||
|
default:
|
||||||
|
msg := fmt.Sprintf("Unknown remote write content type: %s", contentType)
|
||||||
|
fmt.Println(msg)
|
||||||
|
http.Error(w, msg, http.StatusBadRequest)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
log.Fatal(http.ListenAndServe(":1234", nil))
|
log.Fatal(http.ListenAndServe(":1234", nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func printV1(req *prompb.WriteRequest) {
|
||||||
|
b := labels.NewScratchBuilder(0)
|
||||||
|
for _, ts := range req.Timeseries {
|
||||||
|
fmt.Println(ts.ToLabels(&b, nil))
|
||||||
|
|
||||||
|
for _, s := range ts.Samples {
|
||||||
|
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
|
||||||
|
}
|
||||||
|
for _, ep := range ts.Exemplars {
|
||||||
|
e := ep.ToExemplar(&b, nil)
|
||||||
|
fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp)
|
||||||
|
}
|
||||||
|
for _, hp := range ts.Histograms {
|
||||||
|
if hp.IsFloatHistogram() {
|
||||||
|
h := hp.ToFloatHistogram()
|
||||||
|
fmt.Printf("\tHistogram: %s\n", h.String())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
h := hp.ToIntHistogram()
|
||||||
|
fmt.Printf("\tHistogram: %s\n", h.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func printV2(req *writev2.Request) {
|
||||||
|
b := labels.NewScratchBuilder(0)
|
||||||
|
for _, ts := range req.Timeseries {
|
||||||
|
l := ts.ToLabels(&b, req.Symbols)
|
||||||
|
m := ts.ToMetadata(req.Symbols)
|
||||||
|
fmt.Println(l, m)
|
||||||
|
|
||||||
|
for _, s := range ts.Samples {
|
||||||
|
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
|
||||||
|
}
|
||||||
|
for _, ep := range ts.Exemplars {
|
||||||
|
e := ep.ToExemplar(&b, req.Symbols)
|
||||||
|
fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp)
|
||||||
|
}
|
||||||
|
for _, hp := range ts.Histograms {
|
||||||
|
if hp.IsFloatHistogram() {
|
||||||
|
h := hp.ToFloatHistogram()
|
||||||
|
fmt.Printf("\tHistogram: %s\n", h.String())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
h := hp.ToIntHistogram()
|
||||||
|
fmt.Printf("\tHistogram: %s\n", h.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -17,10 +17,10 @@ require (
|
||||||
require (
|
require (
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect
|
||||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||||
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
|
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
|
||||||
github.com/aws/aws-sdk-go v1.51.25 // indirect
|
github.com/aws/aws-sdk-go v1.53.16 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
|
@ -31,8 +31,7 @@ require (
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
|
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
|
||||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/jpillora/backoff v1.0.0 // indirect
|
github.com/jpillora/backoff v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
|
@ -49,13 +48,12 @@ require (
|
||||||
github.com/prometheus/common/sigv4 v0.1.0 // indirect
|
github.com/prometheus/common/sigv4 v0.1.0 // indirect
|
||||||
github.com/prometheus/procfs v0.15.1 // indirect
|
github.com/prometheus/procfs v0.15.1 // indirect
|
||||||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||||
go.opentelemetry.io/collector/featuregate v1.5.0 // indirect
|
go.opentelemetry.io/collector/pdata v1.8.0 // indirect
|
||||||
go.opentelemetry.io/collector/pdata v1.5.0 // indirect
|
go.opentelemetry.io/collector/semconv v0.101.0 // indirect
|
||||||
go.opentelemetry.io/collector/semconv v0.98.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect
|
go.opentelemetry.io/otel v1.27.0 // indirect
|
||||||
go.opentelemetry.io/otel v1.25.0 // indirect
|
go.opentelemetry.io/otel/metric v1.27.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.25.0 // indirect
|
go.opentelemetry.io/otel/trace v1.27.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.25.0 // indirect
|
|
||||||
go.uber.org/atomic v1.11.0 // indirect
|
go.uber.org/atomic v1.11.0 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
golang.org/x/crypto v0.24.0 // indirect
|
golang.org/x/crypto v0.24.0 // indirect
|
||||||
|
@ -64,8 +62,8 @@ require (
|
||||||
golang.org/x/sys v0.21.0 // indirect
|
golang.org/x/sys v0.21.0 // indirect
|
||||||
golang.org/x/text v0.16.0 // indirect
|
golang.org/x/text v0.16.0 // indirect
|
||||||
golang.org/x/time v0.5.0 // indirect
|
golang.org/x/time v0.5.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
|
||||||
google.golang.org/grpc v1.63.2 // indirect
|
google.golang.org/grpc v1.64.0 // indirect
|
||||||
google.golang.org/protobuf v1.34.2 // indirect
|
google.golang.org/protobuf v1.34.2 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
@ -82,4 +80,10 @@ exclude (
|
||||||
cloud.google.com/go v0.34.0
|
cloud.google.com/go v0.34.0
|
||||||
cloud.google.com/go v0.65.0
|
cloud.google.com/go v0.65.0
|
||||||
cloud.google.com/go v0.82.0
|
cloud.google.com/go v0.82.0
|
||||||
|
|
||||||
|
// Fixing ambiguous import: found package google.golang.org/genproto/googleapis/api/annotations in multiple modules.
|
||||||
|
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TODO(bwplotka): Move to main branch commit or perhaps released version.
|
||||||
|
replace github.com/prometheus/prometheus => github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c
|
||||||
|
|
|
@ -2,10 +2,10 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqb
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE=
|
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA=
|
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
|
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk=
|
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk=
|
||||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
|
||||||
|
@ -26,8 +26,8 @@ github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8V
|
||||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||||
github.com/aws/aws-sdk-go v1.51.25 h1:DjTT8mtmsachhV6yrXR8+yhnG6120dazr720nopRsls=
|
github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc=
|
||||||
github.com/aws/aws-sdk-go v1.51.25/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
|
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
|
||||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
|
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
|
@ -37,8 +37,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
|
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
|
||||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
|
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
@ -46,14 +46,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||||
github.com/digitalocean/godo v1.113.0 h1:CLtCxlP4wDAjKIQ+Hshht/UNbgAp8/J/XBH1ZtDCF9Y=
|
github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw=
|
||||||
github.com/digitalocean/godo v1.113.0/go.mod h1:Z2mTP848Vi3IXXl5YbPekUgr4j4tOePomA+OE1Ag98w=
|
github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
|
||||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||||
github.com/docker/docker v26.0.1+incompatible h1:t39Hm6lpXuXtgkF0dm1t9a5HkbUfdGy6XbWexmGr+hA=
|
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
|
||||||
github.com/docker/docker v26.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||||
|
@ -68,8 +68,8 @@ github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU
|
||||||
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
|
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
|
||||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
|
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
|
||||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc=
|
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc=
|
||||||
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
|
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||||
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
|
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||||
|
@ -95,8 +95,8 @@ github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdX
|
||||||
github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=
|
github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=
|
||||||
github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE=
|
github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE=
|
||||||
github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
|
github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
|
||||||
github.com/go-resty/resty/v2 v2.12.0 h1:rsVL8P90LFvkUYq/V5BTVe203WfRIU4gvcf+yfzJzGA=
|
github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g=
|
||||||
github.com/go-resty/resty/v2 v2.12.0/go.mod h1:o0yGPrkS3lOe1+eFajk6kBW8ScXzwU3hD69/gt2yB/0=
|
github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
|
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
|
||||||
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
||||||
|
@ -135,40 +135,38 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/gophercloud/gophercloud v1.11.0 h1:ls0O747DIq1D8SUHc7r2vI8BFbMLeLFuENaAIfEx7OM=
|
github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g=
|
||||||
github.com/gophercloud/gophercloud v1.11.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
|
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
|
||||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
|
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
|
||||||
github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8=
|
github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc=
|
||||||
github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE=
|
github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI=
|
||||||
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
|
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
|
||||||
github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
|
github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
|
||||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||||
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
|
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
||||||
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
|
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
|
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
||||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||||
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
|
|
||||||
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
|
||||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
||||||
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||||
github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7 h1:pjE59CS2C9Bg+Xby0ROrnZSSBWtKwx3Sf9gqsrvIFSA=
|
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc=
|
||||||
github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
|
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
|
||||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||||
github.com/hetznercloud/hcloud-go/v2 v2.7.2 h1:UlE7n1GQZacCfyjv9tDVUN7HZfOXErPIfM/M039u9A0=
|
github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY=
|
||||||
github.com/hetznercloud/hcloud-go/v2 v2.7.2/go.mod h1:49tIV+pXRJTUC7fbFZ03s45LKqSQdOPP5y91eOnJo/k=
|
github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg=
|
||||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||||
github.com/influxdata/influxdb v1.11.5 h1:+em5VOl6lhAZubXj5o6SobCwvrRs3XDlBx/MUI4schI=
|
github.com/influxdata/influxdb v1.11.5 h1:+em5VOl6lhAZubXj5o6SobCwvrRs3XDlBx/MUI4schI=
|
||||||
|
@ -208,14 +206,14 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||||
github.com/linode/linodego v1.32.0 h1:OmZzB3iON6uu84VtLFf64uKmAQqJJarvmsVguroioPI=
|
github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do=
|
||||||
github.com/linode/linodego v1.32.0/go.mod h1:y8GDP9uLVH4jTB9qyrgw79qfKdYJmNCGUOJmfuiOcmI=
|
github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0=
|
||||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs=
|
github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs=
|
||||||
github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk=
|
github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk=
|
||||||
|
@ -243,8 +241,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
||||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||||
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
||||||
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||||
github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0=
|
github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI=
|
||||||
github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY=
|
github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
|
||||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
@ -279,12 +277,12 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
|
||||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||||
github.com/prometheus/prometheus v0.52.1 h1:BrQ29YG+mzdGh8DgHPirHbeMGNqtL+INe0rqg7ttBJ4=
|
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc=
|
||||||
github.com/prometheus/prometheus v0.52.1/go.mod h1:3z74cVsmVH0iXOR5QBjB7Pa6A0KJeEAK5A6UsmAFb1g=
|
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8=
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26 h1:F+GIVtGqCFxPxO46ujf8cEOP574MBoRm3gNbPXECbxs=
|
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg=
|
||||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||||
|
@ -306,20 +304,18 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8
|
||||||
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
|
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
go.opentelemetry.io/collector/featuregate v1.5.0 h1:uK8qnYQKz1TMkK+FDTFsywg/EybW/gbnOUaPNUkRznM=
|
go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764=
|
||||||
go.opentelemetry.io/collector/featuregate v1.5.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w=
|
go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY=
|
||||||
go.opentelemetry.io/collector/pdata v1.5.0 h1:1fKTmUpr0xCOhP/B0VEvtz7bYPQ45luQ8XFyA07j8LE=
|
go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q=
|
||||||
go.opentelemetry.io/collector/pdata v1.5.0/go.mod h1:TYj8aKRWZyT/KuKQXKyqSEvK/GV+slFaDMEI+Ke64Yw=
|
go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
|
||||||
go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
|
||||||
go.opentelemetry.io/collector/semconv v0.98.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 h1:cEPbyTSEHlQR89XVlyo78gqluF8Y3oMeBkXGWzQsfXY=
|
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0/go.mod h1:DKdbWcT4GH1D0Y3Sqt/PFXt2naRKDWtU+eE6oLdFNA8=
|
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
|
||||||
go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k=
|
go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
|
||||||
go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg=
|
go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
|
||||||
go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA=
|
go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
|
||||||
go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s=
|
go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
|
||||||
go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM=
|
|
||||||
go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I=
|
|
||||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
|
@ -336,8 +332,8 @@ golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRj
|
||||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
|
||||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
@ -397,21 +393,20 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
|
||||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
|
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be h1:Zz7rLWqp0ApfsR/l7+zSHhY3PMiH2xqgxlfYfAfNpoU=
|
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be/go.mod h1:dvdCTIoAGbkWbcIKBniID56/7XHTt6WfxXNMxuziJ+w=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
|
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
|
||||||
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
|
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
|
||||||
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
|
201
prompb/codec.go
Normal file
201
prompb/codec.go
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
// Copyright 2024 Prometheus Team
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prompb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon.
|
||||||
|
|
||||||
|
// ToLabels return model labels.Labels from timeseries' remote labels.
|
||||||
|
func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels {
|
||||||
|
return labelProtosToLabels(b, m.GetLabels())
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToLabels return model labels.Labels from timeseries' remote labels.
|
||||||
|
func (m ChunkedSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels {
|
||||||
|
return labelProtosToLabels(b, m.GetLabels())
|
||||||
|
}
|
||||||
|
|
||||||
|
func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []Label) labels.Labels {
|
||||||
|
b.Reset()
|
||||||
|
for _, l := range labelPairs {
|
||||||
|
b.Add(l.Name, l.Value)
|
||||||
|
}
|
||||||
|
b.Sort()
|
||||||
|
return b.Labels()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromLabels transforms labels into prompb labels. The buffer slice
|
||||||
|
// will be used to avoid allocations if it is big enough to store the labels.
|
||||||
|
func FromLabels(lbls labels.Labels, buf []Label) []Label {
|
||||||
|
result := buf[:0]
|
||||||
|
lbls.Range(func(l labels.Label) {
|
||||||
|
result = append(result, Label{
|
||||||
|
Name: l.Name,
|
||||||
|
Value: l.Value,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromMetadataType transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum.
|
||||||
|
func FromMetadataType(t model.MetricType) MetricMetadata_MetricType {
|
||||||
|
mt := strings.ToUpper(string(t))
|
||||||
|
v, ok := MetricMetadata_MetricType_value[mt]
|
||||||
|
if !ok {
|
||||||
|
return MetricMetadata_UNKNOWN
|
||||||
|
}
|
||||||
|
return MetricMetadata_MetricType(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsFloatHistogram returns true if the histogram is float.
|
||||||
|
func (h Histogram) IsFloatHistogram() bool {
|
||||||
|
_, ok := h.GetCount().(*Histogram_CountFloat)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToIntHistogram returns integer Prometheus histogram from the remote implementation
|
||||||
|
// of integer histogram. If it's a float histogram, the method returns nil.
|
||||||
|
func (h Histogram) ToIntHistogram() *histogram.Histogram {
|
||||||
|
if h.IsFloatHistogram() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &histogram.Histogram{
|
||||||
|
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||||
|
Schema: h.Schema,
|
||||||
|
ZeroThreshold: h.ZeroThreshold,
|
||||||
|
ZeroCount: h.GetZeroCountInt(),
|
||||||
|
Count: h.GetCountInt(),
|
||||||
|
Sum: h.Sum,
|
||||||
|
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||||
|
PositiveBuckets: h.GetPositiveDeltas(),
|
||||||
|
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||||
|
NegativeBuckets: h.GetNegativeDeltas(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToFloatHistogram returns float Prometheus histogram from the remote implementation
|
||||||
|
// of float histogram. If the underlying implementation is an integer histogram, a
|
||||||
|
// conversion is performed.
|
||||||
|
func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
|
||||||
|
if h.IsFloatHistogram() {
|
||||||
|
return &histogram.FloatHistogram{
|
||||||
|
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||||
|
Schema: h.Schema,
|
||||||
|
ZeroThreshold: h.ZeroThreshold,
|
||||||
|
ZeroCount: h.GetZeroCountFloat(),
|
||||||
|
Count: h.GetCountFloat(),
|
||||||
|
Sum: h.Sum,
|
||||||
|
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||||
|
PositiveBuckets: h.GetPositiveCounts(),
|
||||||
|
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||||
|
NegativeBuckets: h.GetNegativeCounts(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Conversion from integer histogram.
|
||||||
|
return &histogram.FloatHistogram{
|
||||||
|
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||||
|
Schema: h.Schema,
|
||||||
|
ZeroThreshold: h.ZeroThreshold,
|
||||||
|
ZeroCount: float64(h.GetZeroCountInt()),
|
||||||
|
Count: float64(h.GetCountInt()),
|
||||||
|
Sum: h.Sum,
|
||||||
|
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||||
|
PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()),
|
||||||
|
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||||
|
NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
|
||||||
|
spans := make([]histogram.Span, len(s))
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
|
||||||
|
}
|
||||||
|
|
||||||
|
return spans
|
||||||
|
}
|
||||||
|
|
||||||
|
func deltasToCounts(deltas []int64) []float64 {
|
||||||
|
counts := make([]float64, len(deltas))
|
||||||
|
var cur float64
|
||||||
|
for i, d := range deltas {
|
||||||
|
cur += float64(d)
|
||||||
|
counts[i] = cur
|
||||||
|
}
|
||||||
|
return counts
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromIntHistogram returns remote Histogram from the integer Histogram.
|
||||||
|
func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram {
|
||||||
|
return Histogram{
|
||||||
|
Count: &Histogram_CountInt{CountInt: h.Count},
|
||||||
|
Sum: h.Sum,
|
||||||
|
Schema: h.Schema,
|
||||||
|
ZeroThreshold: h.ZeroThreshold,
|
||||||
|
ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
|
||||||
|
NegativeSpans: spansToSpansProto(h.NegativeSpans),
|
||||||
|
NegativeDeltas: h.NegativeBuckets,
|
||||||
|
PositiveSpans: spansToSpansProto(h.PositiveSpans),
|
||||||
|
PositiveDeltas: h.PositiveBuckets,
|
||||||
|
ResetHint: Histogram_ResetHint(h.CounterResetHint),
|
||||||
|
Timestamp: timestamp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromFloatHistogram returns remote Histogram from the float Histogram.
|
||||||
|
func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram {
|
||||||
|
return Histogram{
|
||||||
|
Count: &Histogram_CountFloat{CountFloat: fh.Count},
|
||||||
|
Sum: fh.Sum,
|
||||||
|
Schema: fh.Schema,
|
||||||
|
ZeroThreshold: fh.ZeroThreshold,
|
||||||
|
ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
|
||||||
|
NegativeSpans: spansToSpansProto(fh.NegativeSpans),
|
||||||
|
NegativeCounts: fh.NegativeBuckets,
|
||||||
|
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
|
||||||
|
PositiveCounts: fh.PositiveBuckets,
|
||||||
|
ResetHint: Histogram_ResetHint(fh.CounterResetHint),
|
||||||
|
Timestamp: timestamp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func spansToSpansProto(s []histogram.Span) []BucketSpan {
|
||||||
|
spans := make([]BucketSpan, len(s))
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
|
||||||
|
}
|
||||||
|
|
||||||
|
return spans
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToExemplar converts remote exemplar to model exemplar.
|
||||||
|
func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, _ []string) exemplar.Exemplar {
|
||||||
|
timestamp := m.Timestamp
|
||||||
|
|
||||||
|
return exemplar.Exemplar{
|
||||||
|
Labels: labelProtosToLabels(b, m.GetLabels()),
|
||||||
|
Value: m.Value,
|
||||||
|
Ts: timestamp,
|
||||||
|
HasTs: timestamp != 0,
|
||||||
|
}
|
||||||
|
}
|
|
@ -17,14 +17,6 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (m Sample) T() int64 { return m.Timestamp }
|
|
||||||
func (m Sample) V() float64 { return m.Value }
|
|
||||||
|
|
||||||
func (h Histogram) IsFloatHistogram() bool {
|
|
||||||
_, ok := h.GetCount().(*Histogram_CountFloat)
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) {
|
func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) {
|
||||||
size := r.Size()
|
size := r.Size()
|
||||||
data, ok := p.Get().(*[]byte)
|
data, ok := p.Get().(*[]byte)
|
||||||
|
|
213
prompb/io/prometheus/write/v2/codec.go
Normal file
213
prompb/io/prometheus/write/v2/codec.go
Normal file
|
@ -0,0 +1,213 @@
|
||||||
|
// Copyright 2024 Prometheus Team
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package writev2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon.
|
||||||
|
|
||||||
|
// ToLabels return model labels.Labels from timeseries' remote labels.
|
||||||
|
func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels {
|
||||||
|
return desymbolizeLabels(b, m.GetLabelsRefs(), symbols)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToMetadata return model metadata from timeseries' remote metadata.
|
||||||
|
func (m TimeSeries) ToMetadata(symbols []string) metadata.Metadata {
|
||||||
|
typ := model.MetricTypeUnknown
|
||||||
|
switch m.Metadata.Type {
|
||||||
|
case Metadata_METRIC_TYPE_COUNTER:
|
||||||
|
typ = model.MetricTypeCounter
|
||||||
|
case Metadata_METRIC_TYPE_GAUGE:
|
||||||
|
typ = model.MetricTypeGauge
|
||||||
|
case Metadata_METRIC_TYPE_HISTOGRAM:
|
||||||
|
typ = model.MetricTypeHistogram
|
||||||
|
case Metadata_METRIC_TYPE_GAUGEHISTOGRAM:
|
||||||
|
typ = model.MetricTypeGaugeHistogram
|
||||||
|
case Metadata_METRIC_TYPE_SUMMARY:
|
||||||
|
typ = model.MetricTypeSummary
|
||||||
|
case Metadata_METRIC_TYPE_INFO:
|
||||||
|
typ = model.MetricTypeInfo
|
||||||
|
case Metadata_METRIC_TYPE_STATESET:
|
||||||
|
typ = model.MetricTypeStateset
|
||||||
|
}
|
||||||
|
return metadata.Metadata{
|
||||||
|
Type: typ,
|
||||||
|
Unit: symbols[m.Metadata.UnitRef],
|
||||||
|
Help: symbols[m.Metadata.HelpRef],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromMetadataType transforms a Prometheus metricType into writev2 metricType.
|
||||||
|
// Since the former is a string we need to transform it to an enum.
|
||||||
|
func FromMetadataType(t model.MetricType) Metadata_MetricType {
|
||||||
|
switch t {
|
||||||
|
case model.MetricTypeCounter:
|
||||||
|
return Metadata_METRIC_TYPE_COUNTER
|
||||||
|
case model.MetricTypeGauge:
|
||||||
|
return Metadata_METRIC_TYPE_GAUGE
|
||||||
|
case model.MetricTypeHistogram:
|
||||||
|
return Metadata_METRIC_TYPE_HISTOGRAM
|
||||||
|
case model.MetricTypeGaugeHistogram:
|
||||||
|
return Metadata_METRIC_TYPE_GAUGEHISTOGRAM
|
||||||
|
case model.MetricTypeSummary:
|
||||||
|
return Metadata_METRIC_TYPE_SUMMARY
|
||||||
|
case model.MetricTypeInfo:
|
||||||
|
return Metadata_METRIC_TYPE_INFO
|
||||||
|
case model.MetricTypeStateset:
|
||||||
|
return Metadata_METRIC_TYPE_STATESET
|
||||||
|
default:
|
||||||
|
return Metadata_METRIC_TYPE_UNSPECIFIED
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsFloatHistogram returns true if the histogram is float.
|
||||||
|
func (h Histogram) IsFloatHistogram() bool {
|
||||||
|
_, ok := h.GetCount().(*Histogram_CountFloat)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToIntHistogram returns integer Prometheus histogram from the remote implementation
|
||||||
|
// of integer histogram. If it's a float histogram, the method returns nil.
|
||||||
|
// TODO(bwplotka): Add support for incoming NHCB.
|
||||||
|
func (h Histogram) ToIntHistogram() *histogram.Histogram {
|
||||||
|
if h.IsFloatHistogram() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &histogram.Histogram{
|
||||||
|
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||||
|
Schema: h.Schema,
|
||||||
|
ZeroThreshold: h.ZeroThreshold,
|
||||||
|
ZeroCount: h.GetZeroCountInt(),
|
||||||
|
Count: h.GetCountInt(),
|
||||||
|
Sum: h.Sum,
|
||||||
|
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||||
|
PositiveBuckets: h.GetPositiveDeltas(),
|
||||||
|
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||||
|
NegativeBuckets: h.GetNegativeDeltas(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToFloatHistogram returns float Prometheus histogram from the remote implementation
|
||||||
|
// of float histogram. If the underlying implementation is an integer histogram, a
|
||||||
|
// conversion is performed.
|
||||||
|
// TODO(bwplotka): Add support for incoming NHCB.
|
||||||
|
func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
|
||||||
|
if h.IsFloatHistogram() {
|
||||||
|
return &histogram.FloatHistogram{
|
||||||
|
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||||
|
Schema: h.Schema,
|
||||||
|
ZeroThreshold: h.ZeroThreshold,
|
||||||
|
ZeroCount: h.GetZeroCountFloat(),
|
||||||
|
Count: h.GetCountFloat(),
|
||||||
|
Sum: h.Sum,
|
||||||
|
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||||
|
PositiveBuckets: h.GetPositiveCounts(),
|
||||||
|
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||||
|
NegativeBuckets: h.GetNegativeCounts(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Conversion from integer histogram.
|
||||||
|
return &histogram.FloatHistogram{
|
||||||
|
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||||
|
Schema: h.Schema,
|
||||||
|
ZeroThreshold: h.ZeroThreshold,
|
||||||
|
ZeroCount: float64(h.GetZeroCountInt()),
|
||||||
|
Count: float64(h.GetCountInt()),
|
||||||
|
Sum: h.Sum,
|
||||||
|
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||||
|
PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()),
|
||||||
|
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||||
|
NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
|
||||||
|
spans := make([]histogram.Span, len(s))
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
|
||||||
|
}
|
||||||
|
|
||||||
|
return spans
|
||||||
|
}
|
||||||
|
|
||||||
|
func deltasToCounts(deltas []int64) []float64 {
|
||||||
|
counts := make([]float64, len(deltas))
|
||||||
|
var cur float64
|
||||||
|
for i, d := range deltas {
|
||||||
|
cur += float64(d)
|
||||||
|
counts[i] = cur
|
||||||
|
}
|
||||||
|
return counts
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromIntHistogram returns remote Histogram from the integer Histogram.
|
||||||
|
func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram {
|
||||||
|
return Histogram{
|
||||||
|
Count: &Histogram_CountInt{CountInt: h.Count},
|
||||||
|
Sum: h.Sum,
|
||||||
|
Schema: h.Schema,
|
||||||
|
ZeroThreshold: h.ZeroThreshold,
|
||||||
|
ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
|
||||||
|
NegativeSpans: spansToSpansProto(h.NegativeSpans),
|
||||||
|
NegativeDeltas: h.NegativeBuckets,
|
||||||
|
PositiveSpans: spansToSpansProto(h.PositiveSpans),
|
||||||
|
PositiveDeltas: h.PositiveBuckets,
|
||||||
|
ResetHint: Histogram_ResetHint(h.CounterResetHint),
|
||||||
|
Timestamp: timestamp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromFloatHistogram returns remote Histogram from the float Histogram.
|
||||||
|
func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram {
|
||||||
|
return Histogram{
|
||||||
|
Count: &Histogram_CountFloat{CountFloat: fh.Count},
|
||||||
|
Sum: fh.Sum,
|
||||||
|
Schema: fh.Schema,
|
||||||
|
ZeroThreshold: fh.ZeroThreshold,
|
||||||
|
ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
|
||||||
|
NegativeSpans: spansToSpansProto(fh.NegativeSpans),
|
||||||
|
NegativeCounts: fh.NegativeBuckets,
|
||||||
|
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
|
||||||
|
PositiveCounts: fh.PositiveBuckets,
|
||||||
|
ResetHint: Histogram_ResetHint(fh.CounterResetHint),
|
||||||
|
Timestamp: timestamp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func spansToSpansProto(s []histogram.Span) []BucketSpan {
|
||||||
|
spans := make([]BucketSpan, len(s))
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
|
||||||
|
}
|
||||||
|
|
||||||
|
return spans
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, symbols []string) exemplar.Exemplar {
|
||||||
|
timestamp := m.Timestamp
|
||||||
|
|
||||||
|
return exemplar.Exemplar{
|
||||||
|
Labels: desymbolizeLabels(b, m.LabelsRefs, symbols),
|
||||||
|
Value: m.Value,
|
||||||
|
Ts: timestamp,
|
||||||
|
HasTs: timestamp != 0,
|
||||||
|
}
|
||||||
|
}
|
165
prompb/io/prometheus/write/v2/custom.go
Normal file
165
prompb/io/prometheus/write/v2/custom.go
Normal file
|
@ -0,0 +1,165 @@
|
||||||
|
// Copyright 2024 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package writev2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"slices"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (m Sample) T() int64 { return m.Timestamp }
|
||||||
|
func (m Sample) V() float64 { return m.Value }
|
||||||
|
|
||||||
|
func (m *Request) OptimizedMarshal(dst []byte) ([]byte, error) {
|
||||||
|
siz := m.Size()
|
||||||
|
if cap(dst) < siz {
|
||||||
|
dst = make([]byte, siz)
|
||||||
|
}
|
||||||
|
n, err := m.OptimizedMarshalToSizedBuffer(dst[:siz])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dst[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer,
|
||||||
|
// but calls OptimizedMarshalToSizedBuffer on the timeseries.
|
||||||
|
func (m *Request) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
i -= len(m.XXX_unrecognized)
|
||||||
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
if len(m.Timeseries) > 0 {
|
||||||
|
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
|
{
|
||||||
|
size, err := m.Timeseries[iNdEx].OptimizedMarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x2a
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(m.Symbols) > 0 {
|
||||||
|
for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
|
i -= len(m.Symbols[iNdEx])
|
||||||
|
copy(dAtA[i:], m.Symbols[iNdEx])
|
||||||
|
i = encodeVarintTypes(dAtA, i, uint64(len(m.Symbols[iNdEx])))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x22
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer,
|
||||||
|
// but marshals m.LabelsRefs in place without extra allocations.
|
||||||
|
func (m *TimeSeries) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.XXX_unrecognized != nil {
|
||||||
|
i -= len(m.XXX_unrecognized)
|
||||||
|
copy(dAtA[i:], m.XXX_unrecognized)
|
||||||
|
}
|
||||||
|
if m.CreatedTimestamp != 0 {
|
||||||
|
i = encodeVarintTypes(dAtA, i, uint64(m.CreatedTimestamp))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x30
|
||||||
|
}
|
||||||
|
{
|
||||||
|
size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x2a
|
||||||
|
if len(m.Histograms) > 0 {
|
||||||
|
for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
|
{
|
||||||
|
size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x1a
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(m.Exemplars) > 0 {
|
||||||
|
for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
|
{
|
||||||
|
size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x22
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(m.Samples) > 0 {
|
||||||
|
for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
|
{
|
||||||
|
size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i -= size
|
||||||
|
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(m.LabelsRefs) > 0 {
|
||||||
|
// This is the trick: encode the varints in reverse order to make it easier
|
||||||
|
// to do it in place. Then reverse the whole thing.
|
||||||
|
var j10 int
|
||||||
|
start := i
|
||||||
|
for _, num := range m.LabelsRefs {
|
||||||
|
for num >= 1<<7 {
|
||||||
|
dAtA[i-1] = uint8(uint64(num)&0x7f | 0x80)
|
||||||
|
num >>= 7
|
||||||
|
i--
|
||||||
|
j10++
|
||||||
|
}
|
||||||
|
dAtA[i-1] = uint8(num)
|
||||||
|
i--
|
||||||
|
j10++
|
||||||
|
}
|
||||||
|
slices.Reverse(dAtA[i:start])
|
||||||
|
// --- end of trick
|
||||||
|
|
||||||
|
i = encodeVarintTypes(dAtA, i, uint64(j10))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
}
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
97
prompb/io/prometheus/write/v2/custom_test.go
Normal file
97
prompb/io/prometheus/write/v2/custom_test.go
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
package writev2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestOptimizedMarshal(t *testing.T) {
|
||||||
|
for _, tt := range []struct {
|
||||||
|
name string
|
||||||
|
m *Request
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty",
|
||||||
|
m: &Request{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "simple",
|
||||||
|
m: &Request{
|
||||||
|
Timeseries: []TimeSeries{
|
||||||
|
{
|
||||||
|
LabelsRefs: []uint32{
|
||||||
|
0, 1,
|
||||||
|
2, 3,
|
||||||
|
4, 5,
|
||||||
|
6, 7,
|
||||||
|
8, 9,
|
||||||
|
10, 11,
|
||||||
|
12, 13,
|
||||||
|
14, 15,
|
||||||
|
},
|
||||||
|
|
||||||
|
Samples: []Sample{{Value: 1, Timestamp: 0}},
|
||||||
|
Exemplars: []Exemplar{{LabelsRefs: []uint32{0, 1}, Value: 1, Timestamp: 0}},
|
||||||
|
Histograms: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
LabelsRefs: []uint32{
|
||||||
|
0, 1,
|
||||||
|
2, 3,
|
||||||
|
4, 5,
|
||||||
|
6, 7,
|
||||||
|
8, 9,
|
||||||
|
10, 11,
|
||||||
|
12, 13,
|
||||||
|
14, 15,
|
||||||
|
},
|
||||||
|
Samples: []Sample{{Value: 2, Timestamp: 1}},
|
||||||
|
Exemplars: []Exemplar{{LabelsRefs: []uint32{0, 1}, Value: 2, Timestamp: 1}},
|
||||||
|
Histograms: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Symbols: []string{
|
||||||
|
"a", "b",
|
||||||
|
"c", "d",
|
||||||
|
"e", "f",
|
||||||
|
"g", "h",
|
||||||
|
"i", "j",
|
||||||
|
"k", "l",
|
||||||
|
"m", "n",
|
||||||
|
"o", "p",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
// Keep the slice allocated to mimic what std Marshal
|
||||||
|
// would give to sized Marshal.
|
||||||
|
got := make([]byte, 0)
|
||||||
|
|
||||||
|
// Should be the same as the standard marshal.
|
||||||
|
expected, err := tt.m.Marshal()
|
||||||
|
require.NoError(t, err)
|
||||||
|
got, err = tt.m.OptimizedMarshal(got)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expected, got)
|
||||||
|
|
||||||
|
// Unmarshal should work too.
|
||||||
|
m := &Request{}
|
||||||
|
require.NoError(t, m.Unmarshal(got))
|
||||||
|
require.Equal(t, tt.m, m)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
83
prompb/io/prometheus/write/v2/symbols.go
Normal file
83
prompb/io/prometheus/write/v2/symbols.go
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
// Copyright 2024 Prometheus Team
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package writev2
|
||||||
|
|
||||||
|
import "github.com/prometheus/prometheus/model/labels"
|
||||||
|
|
||||||
|
// SymbolsTable implements table for easy symbol use.
|
||||||
|
type SymbolsTable struct {
|
||||||
|
strings []string
|
||||||
|
symbolsMap map[string]uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSymbolTable returns a symbol table.
|
||||||
|
func NewSymbolTable() SymbolsTable {
|
||||||
|
return SymbolsTable{
|
||||||
|
// Empty string is required as a first element.
|
||||||
|
symbolsMap: map[string]uint32{"": 0},
|
||||||
|
strings: []string{""},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Symbolize adds (if not added before) a string to the symbols table,
|
||||||
|
// while returning its reference number.
|
||||||
|
func (t *SymbolsTable) Symbolize(str string) uint32 {
|
||||||
|
if ref, ok := t.symbolsMap[str]; ok {
|
||||||
|
return ref
|
||||||
|
}
|
||||||
|
ref := uint32(len(t.strings))
|
||||||
|
t.strings = append(t.strings, str)
|
||||||
|
t.symbolsMap[str] = ref
|
||||||
|
return ref
|
||||||
|
}
|
||||||
|
|
||||||
|
// SymbolizeLabels symbolize Prometheus labels.
|
||||||
|
func (t *SymbolsTable) SymbolizeLabels(lbls labels.Labels, buf []uint32) []uint32 {
|
||||||
|
result := buf[:0]
|
||||||
|
lbls.Range(func(l labels.Label) {
|
||||||
|
off := t.Symbolize(l.Name)
|
||||||
|
result = append(result, off)
|
||||||
|
off = t.Symbolize(l.Value)
|
||||||
|
result = append(result, off)
|
||||||
|
})
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Symbols returns computes symbols table to put in e.g. Request.Symbols.
|
||||||
|
// As per spec, order does not matter.
|
||||||
|
func (t *SymbolsTable) Symbols() []string {
|
||||||
|
return t.strings
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset clears symbols table.
|
||||||
|
func (t *SymbolsTable) Reset() {
|
||||||
|
// NOTE: Make sure to keep empty symbol.
|
||||||
|
t.strings = t.strings[:1]
|
||||||
|
for k := range t.symbolsMap {
|
||||||
|
if k == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
delete(t.symbolsMap, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// desymbolizeLabels decodes label references, with given symbols to labels.
|
||||||
|
func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) labels.Labels {
|
||||||
|
b.Reset()
|
||||||
|
for i := 0; i < len(labelRefs); i += 2 {
|
||||||
|
b.Add(symbols[labelRefs[i]], symbols[labelRefs[i+1]])
|
||||||
|
}
|
||||||
|
b.Sort()
|
||||||
|
return b.Labels()
|
||||||
|
}
|
60
prompb/io/prometheus/write/v2/symbols_test.go
Normal file
60
prompb/io/prometheus/write/v2/symbols_test.go
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
// Copyright 2024 Prometheus Team
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package writev2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSymbolsTable(t *testing.T) {
|
||||||
|
s := NewSymbolTable()
|
||||||
|
require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist")
|
||||||
|
require.Equal(t, uint32(0), s.Symbolize(""))
|
||||||
|
require.Equal(t, []string{""}, s.Symbols())
|
||||||
|
|
||||||
|
require.Equal(t, uint32(1), s.Symbolize("abc"))
|
||||||
|
require.Equal(t, []string{"", "abc"}, s.Symbols())
|
||||||
|
|
||||||
|
require.Equal(t, uint32(2), s.Symbolize("__name__"))
|
||||||
|
require.Equal(t, []string{"", "abc", "__name__"}, s.Symbols())
|
||||||
|
|
||||||
|
require.Equal(t, uint32(3), s.Symbolize("foo"))
|
||||||
|
require.Equal(t, []string{"", "abc", "__name__", "foo"}, s.Symbols())
|
||||||
|
|
||||||
|
s.Reset()
|
||||||
|
require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist")
|
||||||
|
require.Equal(t, uint32(0), s.Symbolize(""))
|
||||||
|
|
||||||
|
require.Equal(t, uint32(1), s.Symbolize("__name__"))
|
||||||
|
require.Equal(t, []string{"", "__name__"}, s.Symbols())
|
||||||
|
|
||||||
|
require.Equal(t, uint32(2), s.Symbolize("abc"))
|
||||||
|
require.Equal(t, []string{"", "__name__", "abc"}, s.Symbols())
|
||||||
|
|
||||||
|
ls := labels.FromStrings("__name__", "qwer", "zxcv", "1234")
|
||||||
|
encoded := s.SymbolizeLabels(ls, nil)
|
||||||
|
require.Equal(t, []uint32{1, 3, 4, 5}, encoded)
|
||||||
|
b := labels.NewScratchBuilder(len(encoded))
|
||||||
|
decoded := desymbolizeLabels(&b, encoded, s.Symbols())
|
||||||
|
require.Equal(t, ls, decoded)
|
||||||
|
|
||||||
|
// Different buf.
|
||||||
|
ls = labels.FromStrings("__name__", "qwer", "zxcv2222", "1234")
|
||||||
|
encoded = s.SymbolizeLabels(ls, []uint32{1, 3, 4, 5})
|
||||||
|
require.Equal(t, []uint32{1, 3, 6, 5}, encoded)
|
||||||
|
}
|
3241
prompb/io/prometheus/write/v2/types.pb.go
Normal file
3241
prompb/io/prometheus/write/v2/types.pb.go
Normal file
File diff suppressed because it is too large
Load diff
260
prompb/io/prometheus/write/v2/types.proto
Normal file
260
prompb/io/prometheus/write/v2/types.proto
Normal file
|
@ -0,0 +1,260 @@
|
||||||
|
// Copyright 2024 Prometheus Team
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// NOTE: This file is also available on https://buf.build/prometheus/prometheus/docs/main:io.prometheus.write.v2
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
package io.prometheus.write.v2;
|
||||||
|
|
||||||
|
option go_package = "writev2";
|
||||||
|
|
||||||
|
import "gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
// Request represents a request to write the given timeseries to a remote destination.
|
||||||
|
// This message was introduced in the Remote Write 2.0 specification:
|
||||||
|
// https://prometheus.io/docs/concepts/remote_write_spec_2_0/
|
||||||
|
//
|
||||||
|
// The canonical Content-Type request header value for this message is
|
||||||
|
// "application/x-protobuf;proto=io.prometheus.write.v2.Request"
|
||||||
|
//
|
||||||
|
// NOTE: gogoproto options might change in future for this file, they
|
||||||
|
// are not part of the spec proto (they only modify the generated Go code, not
|
||||||
|
// the serialized message). See: https://github.com/prometheus/prometheus/issues/11908
|
||||||
|
message Request {
|
||||||
|
// Since Request supersedes 1.0 spec's prometheus.WriteRequest, we reserve the top-down message
|
||||||
|
// for the deterministic interop between those two, see types_test.go for details.
|
||||||
|
// Generally it's not needed, because Receivers must use the Content-Type header, but we want to
|
||||||
|
// be sympathetic to adopters with mistaken implementations and have deterministic error (empty
|
||||||
|
// message if you use the wrong proto schema).
|
||||||
|
reserved 1 to 3;
|
||||||
|
|
||||||
|
// symbols contains a de-duplicated array of string elements used for various
|
||||||
|
// items in a Request message, like labels and metadata items. For the sender's convenience
|
||||||
|
// around empty values for optional fields like unit_ref, symbols array MUST start with
|
||||||
|
// empty string.
|
||||||
|
//
|
||||||
|
// To decode each of the symbolized strings, referenced, by "ref(s)" suffix, you
|
||||||
|
// need to lookup the actual string by index from symbols array. The order of
|
||||||
|
// strings is up to the sender. The receiver should not assume any particular encoding.
|
||||||
|
repeated string symbols = 4;
|
||||||
|
// timeseries represents an array of distinct series with 0 or more samples.
|
||||||
|
repeated TimeSeries timeseries = 5 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeSeries represents a single series.
|
||||||
|
message TimeSeries {
|
||||||
|
// labels_refs is a list of label name-value pair references, encoded
|
||||||
|
// as indices to the Request.symbols array. This list's length is always
|
||||||
|
// a multiple of two, and the underlying labels should be sorted lexicographically.
|
||||||
|
//
|
||||||
|
// Note that there might be multiple TimeSeries objects in the same
|
||||||
|
// Requests with the same labels e.g. for different exemplars, metadata
|
||||||
|
// or created timestamp.
|
||||||
|
repeated uint32 labels_refs = 1;
|
||||||
|
|
||||||
|
// Timeseries messages can either specify samples or (native) histogram samples
|
||||||
|
// (histogram field), but not both. For a typical sender (real-time metric
|
||||||
|
// streaming), in healthy cases, there will be only one sample or histogram.
|
||||||
|
//
|
||||||
|
// Samples and histograms are sorted by timestamp (older first).
|
||||||
|
repeated Sample samples = 2 [(gogoproto.nullable) = false];
|
||||||
|
repeated Histogram histograms = 3 [(gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// exemplars represents an optional set of exemplars attached to this series' samples.
|
||||||
|
repeated Exemplar exemplars = 4 [(gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// metadata represents the metadata associated with the given series' samples.
|
||||||
|
Metadata metadata = 5 [(gogoproto.nullable) = false];
|
||||||
|
|
||||||
|
// created_timestamp represents an optional created timestamp associated with
|
||||||
|
// this series' samples in ms format, typically for counter or histogram type
|
||||||
|
// metrics. Created timestamp represents the time when the counter started
|
||||||
|
// counting (sometimes referred to as start timestamp), which can increase
|
||||||
|
// the accuracy of query results.
|
||||||
|
//
|
||||||
|
// Note that some receivers might require this and in return fail to
|
||||||
|
// ingest such samples within the Request.
|
||||||
|
//
|
||||||
|
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
|
||||||
|
// for conversion from/to time.Time to Prometheus timestamp.
|
||||||
|
//
|
||||||
|
// Note that the "optional" keyword is omitted due to
|
||||||
|
// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
|
||||||
|
// Zero value means value not set. If you need to use exactly zero value for
|
||||||
|
// the timestamp, use 1 millisecond before or after.
|
||||||
|
int64 created_timestamp = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exemplar is an additional information attached to some series' samples.
|
||||||
|
// It is typically used to attach an example trace or request ID associated with
|
||||||
|
// the metric changes.
|
||||||
|
message Exemplar {
|
||||||
|
// labels_refs is an optional list of label name-value pair references, encoded
|
||||||
|
// as indices to the Request.symbols array. This list's len is always
|
||||||
|
// a multiple of 2, and the underlying labels should be sorted lexicographically.
|
||||||
|
// If the exemplar references a trace it should use the `trace_id` label name, as a best practice.
|
||||||
|
repeated uint32 labels_refs = 1;
|
||||||
|
// value represents an exact example value. This can be useful when the exemplar
|
||||||
|
// is attached to a histogram, which only gives an estimated value through buckets.
|
||||||
|
double value = 2;
|
||||||
|
// timestamp represents an optional timestamp of the sample in ms.
|
||||||
|
//
|
||||||
|
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
|
||||||
|
// for conversion from/to time.Time to Prometheus timestamp.
|
||||||
|
//
|
||||||
|
// Note that the "optional" keyword is omitted due to
|
||||||
|
// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
|
||||||
|
// Zero value means value not set. If you need to use exactly zero value for
|
||||||
|
// the timestamp, use 1 millisecond before or after.
|
||||||
|
int64 timestamp = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sample represents series sample.
|
||||||
|
message Sample {
|
||||||
|
// value of the sample.
|
||||||
|
double value = 1;
|
||||||
|
// timestamp represents timestamp of the sample in ms.
|
||||||
|
//
|
||||||
|
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
|
||||||
|
// for conversion from/to time.Time to Prometheus timestamp.
|
||||||
|
int64 timestamp = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metadata represents the metadata associated with the given series' samples.
|
||||||
|
message Metadata {
|
||||||
|
enum MetricType {
|
||||||
|
METRIC_TYPE_UNSPECIFIED = 0;
|
||||||
|
METRIC_TYPE_COUNTER = 1;
|
||||||
|
METRIC_TYPE_GAUGE = 2;
|
||||||
|
METRIC_TYPE_HISTOGRAM = 3;
|
||||||
|
METRIC_TYPE_GAUGEHISTOGRAM = 4;
|
||||||
|
METRIC_TYPE_SUMMARY = 5;
|
||||||
|
METRIC_TYPE_INFO = 6;
|
||||||
|
METRIC_TYPE_STATESET = 7;
|
||||||
|
}
|
||||||
|
MetricType type = 1;
|
||||||
|
// help_ref is a reference to the Request.symbols array representing help
|
||||||
|
// text for the metric. Help is optional, reference should point to an empty string in
|
||||||
|
// such a case.
|
||||||
|
uint32 help_ref = 3;
|
||||||
|
// unit_ref is a reference to the Request.symbols array representing a unit
|
||||||
|
// for the metric. Unit is optional, reference should point to an empty string in
|
||||||
|
// such a case.
|
||||||
|
uint32 unit_ref = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
// A native histogram, also known as a sparse histogram.
|
||||||
|
// Original design doc:
|
||||||
|
// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
|
||||||
|
// The appendix of this design doc also explains the concept of float
|
||||||
|
// histograms. This Histogram message can represent both, the usual
|
||||||
|
// integer histogram as well as a float histogram.
|
||||||
|
message Histogram {
|
||||||
|
enum ResetHint {
|
||||||
|
RESET_HINT_UNSPECIFIED = 0; // Need to test for a counter reset explicitly.
|
||||||
|
RESET_HINT_YES = 1; // This is the 1st histogram after a counter reset.
|
||||||
|
RESET_HINT_NO = 2; // There was no counter reset between this and the previous Histogram.
|
||||||
|
RESET_HINT_GAUGE = 3; // This is a gauge histogram where counter resets don't happen.
|
||||||
|
}
|
||||||
|
|
||||||
|
oneof count { // Count of observations in the histogram.
|
||||||
|
uint64 count_int = 1;
|
||||||
|
double count_float = 2;
|
||||||
|
}
|
||||||
|
double sum = 3; // Sum of observations in the histogram.
|
||||||
|
|
||||||
|
// The schema defines the bucket schema. Currently, valid numbers
|
||||||
|
// are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be
|
||||||
|
// added in future for new bucketing layouts.
|
||||||
|
//
|
||||||
|
// The schema equal to -53 means custom buckets. See
|
||||||
|
// custom_values field description for more details.
|
||||||
|
//
|
||||||
|
// Values between -4 and 8 represent base-2 bucket schema, where 1
|
||||||
|
// is a bucket boundary in each case, and then each power of two is
|
||||||
|
// divided into 2^n (n is schema value) logarithmic buckets. Or in other words,
|
||||||
|
// each bucket boundary is the previous boundary times 2^(2^-n).
|
||||||
|
sint32 schema = 4;
|
||||||
|
double zero_threshold = 5; // Breadth of the zero bucket.
|
||||||
|
oneof zero_count { // Count in zero bucket.
|
||||||
|
uint64 zero_count_int = 6;
|
||||||
|
double zero_count_float = 7;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Negative Buckets.
|
||||||
|
repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false];
|
||||||
|
// Use either "negative_deltas" or "negative_counts", the former for
|
||||||
|
// regular histograms with integer counts, the latter for
|
||||||
|
// float histograms.
|
||||||
|
repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||||
|
repeated double negative_counts = 10; // Absolute count of each bucket.
|
||||||
|
|
||||||
|
// Positive Buckets.
|
||||||
|
//
|
||||||
|
// In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows:
|
||||||
|
// * The span offset+length points to an the index of the custom_values array
|
||||||
|
// or +Inf if pointing to the len of the array.
|
||||||
|
// * The counts and deltas have the same meaning as for exponential histograms.
|
||||||
|
repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false];
|
||||||
|
// Use either "positive_deltas" or "positive_counts", the former for
|
||||||
|
// regular histograms with integer counts, the latter for
|
||||||
|
// float histograms.
|
||||||
|
repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||||
|
repeated double positive_counts = 13; // Absolute count of each bucket.
|
||||||
|
|
||||||
|
ResetHint reset_hint = 14;
|
||||||
|
// timestamp represents timestamp of the sample in ms.
|
||||||
|
//
|
||||||
|
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
|
||||||
|
// for conversion from/to time.Time to Prometheus timestamp.
|
||||||
|
int64 timestamp = 15;
|
||||||
|
|
||||||
|
// custom_values is an additional field used by non-exponential bucketing layouts.
|
||||||
|
//
|
||||||
|
// For custom buckets (-53 schema value) custom_values specify monotonically
|
||||||
|
// increasing upper inclusive boundaries for the bucket counts with arbitrary
|
||||||
|
// widths for this histogram. In other words, custom_values represents custom,
|
||||||
|
// explicit bucketing that could have been converted from the classic histograms.
|
||||||
|
//
|
||||||
|
// Those bounds are then referenced by spans in positive_spans with corresponding positive
|
||||||
|
// counts of deltas (refer to positive_spans for more details). This way we can
|
||||||
|
// have encode sparse histograms with custom bucketing (many buckets are often
|
||||||
|
// not used).
|
||||||
|
//
|
||||||
|
// Note that for custom bounds, even negative observations are placed in the positive
|
||||||
|
// counts to simplify the implementation and avoid ambiguity of where to place
|
||||||
|
// an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and
|
||||||
|
// the zero bucket are unused, if the schema indicates custom bucketing.
|
||||||
|
//
|
||||||
|
// For each upper boundary the previous boundary represent the lower exclusive
|
||||||
|
// boundary for that bucket. The first element is the upper inclusive boundary
|
||||||
|
// for the first bucket, which implicitly has a lower inclusive bound of -Inf.
|
||||||
|
// This is similar to "le" label semantics on classic histograms. You may add a
|
||||||
|
// bucket with an upper bound of 0 to make sure that you really have no negative
|
||||||
|
// observations, but in practice, native histogram rendering will show both with
|
||||||
|
// or without first upper boundary 0 and no negative counts as the same case.
|
||||||
|
//
|
||||||
|
// The last element is not only the upper inclusive bound of the last regular
|
||||||
|
// bucket, but implicitly the lower exclusive bound of the +Inf bucket.
|
||||||
|
repeated double custom_values = 16;
|
||||||
|
}
|
||||||
|
|
||||||
|
// A BucketSpan defines a number of consecutive buckets with their
|
||||||
|
// offset. Logically, it would be more straightforward to include the
|
||||||
|
// bucket counts in the Span. However, the protobuf representation is
|
||||||
|
// more compact in the way the data is structured here (with all the
|
||||||
|
// buckets in a single array separate from the Spans).
|
||||||
|
message BucketSpan {
|
||||||
|
sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
|
||||||
|
uint32 length = 2; // Length of consecutive buckets.
|
||||||
|
}
|
97
prompb/io/prometheus/write/v2/types_test.go
Normal file
97
prompb/io/prometheus/write/v2/types_test.go
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
// Copyright 2024 Prometheus Team
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package writev2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/prompb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInteropV2UnmarshalWithV1_DeterministicEmpty(t *testing.T) {
|
||||||
|
expectedV1Empty := &prompb.WriteRequest{}
|
||||||
|
for _, tc := range []struct{ incoming *Request }{
|
||||||
|
{
|
||||||
|
incoming: &Request{}, // Technically wrong, should be at least empty string in symbol.
|
||||||
|
},
|
||||||
|
{
|
||||||
|
incoming: &Request{
|
||||||
|
Symbols: []string{""},
|
||||||
|
}, // NOTE: Without reserved fields, failed with "corrupted" ghost TimeSeries element.
|
||||||
|
},
|
||||||
|
{
|
||||||
|
incoming: &Request{
|
||||||
|
Symbols: []string{"", "__name__", "metric1"},
|
||||||
|
Timeseries: []TimeSeries{
|
||||||
|
{LabelsRefs: []uint32{1, 2}},
|
||||||
|
{Samples: []Sample{{Value: 21.4, Timestamp: time.Now().UnixMilli()}}},
|
||||||
|
}, // NOTE: Without reserved fields, proto: illegal wireType 7
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run("", func(t *testing.T) {
|
||||||
|
in, err := proto.Marshal(tc.incoming)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Test accidental unmarshal of v2 payload with v1 proto.
|
||||||
|
out := &prompb.WriteRequest{}
|
||||||
|
require.NoError(t, proto.Unmarshal(in, out))
|
||||||
|
|
||||||
|
// Drop unknowns, we expect them when incoming payload had some fields.
|
||||||
|
// This field & method will be likely gone after gogo removal.
|
||||||
|
out.XXX_unrecognized = nil // NOTE: out.XXX_DiscardUnknown() does not work with nullables.
|
||||||
|
|
||||||
|
require.Equal(t, expectedV1Empty, out)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInteropV1UnmarshalWithV2_DeterministicEmpty(t *testing.T) {
|
||||||
|
expectedV2Empty := &Request{}
|
||||||
|
for _, tc := range []struct{ incoming *prompb.WriteRequest }{
|
||||||
|
{
|
||||||
|
incoming: &prompb.WriteRequest{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
incoming: &prompb.WriteRequest{
|
||||||
|
Timeseries: []prompb.TimeSeries{
|
||||||
|
{
|
||||||
|
Labels: []prompb.Label{{Name: "__name__", Value: "metric1"}},
|
||||||
|
Samples: []prompb.Sample{{Value: 21.4, Timestamp: time.Now().UnixMilli()}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// NOTE: Without reserved fields, results in corrupted v2.Request.Symbols.
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run("", func(t *testing.T) {
|
||||||
|
in, err := proto.Marshal(tc.incoming)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Test accidental unmarshal of v1 payload with v2 proto.
|
||||||
|
out := &Request{}
|
||||||
|
require.NoError(t, proto.Unmarshal(in, out))
|
||||||
|
|
||||||
|
// Drop unknowns, we expect them when incoming payload had some fields.
|
||||||
|
// This field & method will be likely gone after gogo removal.
|
||||||
|
out.XXX_unrecognized = nil // NOTE: out.XXX_DiscardUnknown() does not work with nullables.
|
||||||
|
|
||||||
|
require.Equal(t, expectedV2Empty, out)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
282
prompb/rwcommon/codec_test.go
Normal file
282
prompb/rwcommon/codec_test.go
Normal file
|
@ -0,0 +1,282 @@
|
||||||
|
// Copyright 2024 Prometheus Team
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package rwcommon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
|
"github.com/prometheus/prometheus/prompb"
|
||||||
|
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestToLabels(t *testing.T) {
|
||||||
|
expected := labels.FromStrings("__name__", "metric1", "foo", "bar")
|
||||||
|
|
||||||
|
t.Run("v1", func(t *testing.T) {
|
||||||
|
ts := prompb.TimeSeries{Labels: []prompb.Label{{Name: "__name__", Value: "metric1"}, {Name: "foo", Value: "bar"}}}
|
||||||
|
b := labels.NewScratchBuilder(2)
|
||||||
|
require.Equal(t, expected, ts.ToLabels(&b, nil))
|
||||||
|
require.Equal(t, ts.Labels, prompb.FromLabels(expected, nil))
|
||||||
|
require.Equal(t, ts.Labels, prompb.FromLabels(expected, ts.Labels))
|
||||||
|
})
|
||||||
|
t.Run("v2", func(t *testing.T) {
|
||||||
|
v2Symbols := []string{"", "__name__", "metric1", "foo", "bar"}
|
||||||
|
ts := writev2.TimeSeries{LabelsRefs: []uint32{1, 2, 3, 4}}
|
||||||
|
b := labels.NewScratchBuilder(2)
|
||||||
|
require.Equal(t, expected, ts.ToLabels(&b, v2Symbols))
|
||||||
|
// No need for FromLabels in our prod code as we use symbol table to do so.
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFromMetadataType(t *testing.T) {
|
||||||
|
for _, tc := range []struct {
|
||||||
|
desc string
|
||||||
|
input model.MetricType
|
||||||
|
expectedV1 prompb.MetricMetadata_MetricType
|
||||||
|
expectedV2 writev2.Metadata_MetricType
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "with a single-word metric",
|
||||||
|
input: model.MetricTypeCounter,
|
||||||
|
expectedV1: prompb.MetricMetadata_COUNTER,
|
||||||
|
expectedV2: writev2.Metadata_METRIC_TYPE_COUNTER,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with a two-word metric",
|
||||||
|
input: model.MetricTypeStateset,
|
||||||
|
expectedV1: prompb.MetricMetadata_STATESET,
|
||||||
|
expectedV2: writev2.Metadata_METRIC_TYPE_STATESET,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "with an unknown metric",
|
||||||
|
input: "not-known",
|
||||||
|
expectedV1: prompb.MetricMetadata_UNKNOWN,
|
||||||
|
expectedV2: writev2.Metadata_METRIC_TYPE_UNSPECIFIED,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
|
t.Run("v1", func(t *testing.T) {
|
||||||
|
require.Equal(t, tc.expectedV1, prompb.FromMetadataType(tc.input))
|
||||||
|
})
|
||||||
|
t.Run("v2", func(t *testing.T) {
|
||||||
|
require.Equal(t, tc.expectedV2, writev2.FromMetadataType(tc.input))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestToMetadata(t *testing.T) {
|
||||||
|
sym := writev2.NewSymbolTable()
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
input writev2.Metadata
|
||||||
|
expected metadata.Metadata
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
input: writev2.Metadata{},
|
||||||
|
expected: metadata.Metadata{
|
||||||
|
Type: model.MetricTypeUnknown,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: writev2.Metadata{
|
||||||
|
Type: 12414, // Unknown.
|
||||||
|
},
|
||||||
|
expected: metadata.Metadata{
|
||||||
|
Type: model.MetricTypeUnknown,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: writev2.Metadata{
|
||||||
|
Type: writev2.Metadata_METRIC_TYPE_COUNTER,
|
||||||
|
HelpRef: sym.Symbolize("help1"),
|
||||||
|
UnitRef: sym.Symbolize("unit1"),
|
||||||
|
},
|
||||||
|
expected: metadata.Metadata{
|
||||||
|
Type: model.MetricTypeCounter,
|
||||||
|
Help: "help1",
|
||||||
|
Unit: "unit1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: writev2.Metadata{
|
||||||
|
Type: writev2.Metadata_METRIC_TYPE_STATESET,
|
||||||
|
HelpRef: sym.Symbolize("help2"),
|
||||||
|
},
|
||||||
|
expected: metadata.Metadata{
|
||||||
|
Type: model.MetricTypeStateset,
|
||||||
|
Help: "help2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run("", func(t *testing.T) {
|
||||||
|
ts := writev2.TimeSeries{Metadata: tc.input}
|
||||||
|
require.Equal(t, tc.expected, ts.ToMetadata(sym.Symbols()))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestToHistogram_Empty(t *testing.T) {
|
||||||
|
t.Run("v1", func(t *testing.T) {
|
||||||
|
require.NotNilf(t, prompb.Histogram{}.ToIntHistogram(), "")
|
||||||
|
require.NotNilf(t, prompb.Histogram{}.ToFloatHistogram(), "")
|
||||||
|
})
|
||||||
|
t.Run("v2", func(t *testing.T) {
|
||||||
|
require.NotNilf(t, writev2.Histogram{}.ToIntHistogram(), "")
|
||||||
|
require.NotNilf(t, writev2.Histogram{}.ToFloatHistogram(), "")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testIntHistogram() histogram.Histogram {
|
||||||
|
return histogram.Histogram{
|
||||||
|
CounterResetHint: histogram.GaugeType,
|
||||||
|
Schema: 0,
|
||||||
|
Count: 19,
|
||||||
|
Sum: 2.7,
|
||||||
|
ZeroThreshold: 1e-128,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 5},
|
||||||
|
{Offset: 1, Length: 0},
|
||||||
|
{Offset: 0, Length: 1},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []int64{1, 2, -2, 1, -1, 0},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testFloatHistogram() histogram.FloatHistogram {
|
||||||
|
return histogram.FloatHistogram{
|
||||||
|
CounterResetHint: histogram.GaugeType,
|
||||||
|
Schema: 0,
|
||||||
|
Count: 19,
|
||||||
|
Sum: 2.7,
|
||||||
|
ZeroThreshold: 1e-128,
|
||||||
|
PositiveSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 4},
|
||||||
|
{Offset: 0, Length: 0},
|
||||||
|
{Offset: 0, Length: 3},
|
||||||
|
},
|
||||||
|
PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1},
|
||||||
|
NegativeSpans: []histogram.Span{
|
||||||
|
{Offset: 0, Length: 5},
|
||||||
|
{Offset: 1, Length: 0},
|
||||||
|
{Offset: 0, Length: 1},
|
||||||
|
},
|
||||||
|
NegativeBuckets: []float64{1, 3, 1, 2, 1, 1},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFromIntToFloatOrIntHistogram(t *testing.T) {
|
||||||
|
testIntHist := testIntHistogram()
|
||||||
|
testFloatHist := testFloatHistogram()
|
||||||
|
|
||||||
|
t.Run("v1", func(t *testing.T) {
|
||||||
|
h := prompb.FromIntHistogram(123, testIntHist.Copy())
|
||||||
|
require.False(t, h.IsFloatHistogram())
|
||||||
|
require.Equal(t, int64(123), h.Timestamp)
|
||||||
|
require.Equal(t, testIntHist, *h.ToIntHistogram())
|
||||||
|
require.Equal(t, testFloatHist, *h.ToFloatHistogram())
|
||||||
|
})
|
||||||
|
t.Run("v2", func(t *testing.T) {
|
||||||
|
h := writev2.FromIntHistogram(123, testIntHist.Copy())
|
||||||
|
require.False(t, h.IsFloatHistogram())
|
||||||
|
require.Equal(t, int64(123), h.Timestamp)
|
||||||
|
require.Equal(t, testIntHist, *h.ToIntHistogram())
|
||||||
|
require.Equal(t, testFloatHist, *h.ToFloatHistogram())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFromFloatToFloatHistogram(t *testing.T) {
|
||||||
|
testFloatHist := testFloatHistogram()
|
||||||
|
|
||||||
|
t.Run("v1", func(t *testing.T) {
|
||||||
|
h := prompb.FromFloatHistogram(123, testFloatHist.Copy())
|
||||||
|
require.True(t, h.IsFloatHistogram())
|
||||||
|
require.Equal(t, int64(123), h.Timestamp)
|
||||||
|
require.Nil(t, h.ToIntHistogram())
|
||||||
|
require.Equal(t, testFloatHist, *h.ToFloatHistogram())
|
||||||
|
})
|
||||||
|
t.Run("v2", func(t *testing.T) {
|
||||||
|
h := writev2.FromFloatHistogram(123, testFloatHist.Copy())
|
||||||
|
require.True(t, h.IsFloatHistogram())
|
||||||
|
require.Equal(t, int64(123), h.Timestamp)
|
||||||
|
require.Nil(t, h.ToIntHistogram())
|
||||||
|
require.Equal(t, testFloatHist, *h.ToFloatHistogram())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFromIntOrFloatHistogram_ResetHint(t *testing.T) {
|
||||||
|
for _, tc := range []struct {
|
||||||
|
input histogram.CounterResetHint
|
||||||
|
expectedV1 prompb.Histogram_ResetHint
|
||||||
|
expectedV2 writev2.Histogram_ResetHint
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
input: histogram.UnknownCounterReset,
|
||||||
|
expectedV1: prompb.Histogram_UNKNOWN,
|
||||||
|
expectedV2: writev2.Histogram_RESET_HINT_UNSPECIFIED,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: histogram.CounterReset,
|
||||||
|
expectedV1: prompb.Histogram_YES,
|
||||||
|
expectedV2: writev2.Histogram_RESET_HINT_YES,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: histogram.NotCounterReset,
|
||||||
|
expectedV1: prompb.Histogram_NO,
|
||||||
|
expectedV2: writev2.Histogram_RESET_HINT_NO,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: histogram.GaugeType,
|
||||||
|
expectedV1: prompb.Histogram_GAUGE,
|
||||||
|
expectedV2: writev2.Histogram_RESET_HINT_GAUGE,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run("", func(t *testing.T) {
|
||||||
|
t.Run("v1", func(t *testing.T) {
|
||||||
|
h := testIntHistogram()
|
||||||
|
h.CounterResetHint = tc.input
|
||||||
|
got := prompb.FromIntHistogram(1337, &h)
|
||||||
|
require.Equal(t, tc.expectedV1, got.GetResetHint())
|
||||||
|
|
||||||
|
fh := testFloatHistogram()
|
||||||
|
fh.CounterResetHint = tc.input
|
||||||
|
got2 := prompb.FromFloatHistogram(1337, &fh)
|
||||||
|
require.Equal(t, tc.expectedV1, got2.GetResetHint())
|
||||||
|
})
|
||||||
|
t.Run("v2", func(t *testing.T) {
|
||||||
|
h := testIntHistogram()
|
||||||
|
h.CounterResetHint = tc.input
|
||||||
|
got := writev2.FromIntHistogram(1337, &h)
|
||||||
|
require.Equal(t, tc.expectedV2, got.GetResetHint())
|
||||||
|
|
||||||
|
fh := testFloatHistogram()
|
||||||
|
fh.CounterResetHint = tc.input
|
||||||
|
got2 := writev2.FromFloatHistogram(1337, &fh)
|
||||||
|
require.Equal(t, tc.expectedV2, got2.GetResetHint())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -73,9 +73,11 @@ type Options struct {
|
||||||
// Option used by downstream scraper users like OpenTelemetry Collector
|
// Option used by downstream scraper users like OpenTelemetry Collector
|
||||||
// to help lookup metric metadata. Should be false for Prometheus.
|
// to help lookup metric metadata. Should be false for Prometheus.
|
||||||
PassMetadataInContext bool
|
PassMetadataInContext bool
|
||||||
// Option to enable the experimental in-memory metadata storage and append
|
// Option to enable appending of scraped Metadata to the TSDB/other appenders. Individual appenders
|
||||||
// metadata to the WAL.
|
// can decide what to do with metadata, but for practical purposes this flag exists so that metadata
|
||||||
EnableMetadataStorage bool
|
// can be written to the WAL and thus read for remote write.
|
||||||
|
// TODO: implement some form of metadata storage
|
||||||
|
AppendMetadata bool
|
||||||
// Option to increase the interval used by scrape manager to throttle target groups updates.
|
// Option to increase the interval used by scrape manager to throttle target groups updates.
|
||||||
DiscoveryReloadInterval model.Duration
|
DiscoveryReloadInterval model.Duration
|
||||||
// Option to enable the ingestion of the created timestamp as a synthetic zero sample.
|
// Option to enable the ingestion of the created timestamp as a synthetic zero sample.
|
||||||
|
|
|
@ -181,7 +181,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
|
||||||
options.EnableNativeHistogramsIngestion,
|
options.EnableNativeHistogramsIngestion,
|
||||||
options.EnableCreatedTimestampZeroIngestion,
|
options.EnableCreatedTimestampZeroIngestion,
|
||||||
options.ExtraMetrics,
|
options.ExtraMetrics,
|
||||||
options.EnableMetadataStorage,
|
options.AppendMetadata,
|
||||||
opts.target,
|
opts.target,
|
||||||
options.PassMetadataInContext,
|
options.PassMetadataInContext,
|
||||||
metrics,
|
metrics,
|
||||||
|
|
|
@ -10,8 +10,9 @@ if ! [[ "$0" =~ "scripts/genproto.sh" ]]; then
|
||||||
exit 255
|
exit 255
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# TODO(bwplotka): Move to buf, this is not OSS agnostic, likely won't work locally.
|
||||||
if ! [[ $(protoc --version) =~ "3.15.8" ]]; then
|
if ! [[ $(protoc --version) =~ "3.15.8" ]]; then
|
||||||
echo "could not find protoc 3.15.8, is it installed + in PATH?"
|
echo "could not find protoc 3.15.8, is it installed + in PATH? Consider commenting out this check for local flow"
|
||||||
exit 255
|
exit 255
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -40,6 +41,9 @@ for dir in ${DIRS}; do
|
||||||
-I="${PROM_PATH}" \
|
-I="${PROM_PATH}" \
|
||||||
-I="${GRPC_GATEWAY_ROOT}/third_party/googleapis" \
|
-I="${GRPC_GATEWAY_ROOT}/third_party/googleapis" \
|
||||||
./*.proto
|
./*.proto
|
||||||
|
protoc --gogofast_out=plugins=grpc:. -I=. \
|
||||||
|
-I="${GOGOPROTO_PATH}" \
|
||||||
|
./io/prometheus/write/v2/*.proto
|
||||||
protoc --gogofast_out=Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,paths=source_relative:. -I=. \
|
protoc --gogofast_out=Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,paths=source_relative:. -I=. \
|
||||||
-I="${GOGOPROTO_PATH}" \
|
-I="${GOGOPROTO_PATH}" \
|
||||||
./io/prometheus/client/*.proto
|
./io/prometheus/client/*.proto
|
||||||
|
|
|
@ -35,13 +35,40 @@ import (
|
||||||
"go.opentelemetry.io/otel"
|
"go.opentelemetry.io/otel"
|
||||||
"go.opentelemetry.io/otel/trace"
|
"go.opentelemetry.io/otel/trace"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/prompb"
|
"github.com/prometheus/prometheus/prompb"
|
||||||
"github.com/prometheus/prometheus/storage/remote/azuread"
|
"github.com/prometheus/prometheus/storage/remote/azuread"
|
||||||
)
|
)
|
||||||
|
|
||||||
const maxErrMsgLen = 1024
|
const maxErrMsgLen = 1024
|
||||||
|
|
||||||
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
const (
|
||||||
|
RemoteWriteVersionHeader = "X-Prometheus-Remote-Write-Version"
|
||||||
|
RemoteWriteVersion1HeaderValue = "0.1.0"
|
||||||
|
RemoteWriteVersion20HeaderValue = "2.0.0"
|
||||||
|
appProtoContentType = "application/x-protobuf"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Compression represents the encoding. Currently remote storage supports only
|
||||||
|
// one, but we experiment with more, thus leaving the compression scaffolding
|
||||||
|
// for now.
|
||||||
|
// NOTE(bwplotka): Keeping it public, as a non-stable help for importers to use.
|
||||||
|
type Compression string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// SnappyBlockCompression represents https://github.com/google/snappy/blob/2c94e11145f0b7b184b831577c93e5a41c4c0346/format_description.txt
|
||||||
|
SnappyBlockCompression Compression = "snappy"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// UserAgent represents Prometheus version to use for user agent header.
|
||||||
|
UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||||
|
|
||||||
|
remoteWriteContentTypeHeaders = map[config.RemoteWriteProtoMsg]string{
|
||||||
|
config.RemoteWriteProtoMsgV1: appProtoContentType, // Also application/x-protobuf;proto=prometheus.WriteRequest but simplified for compatibility with 1.x spec.
|
||||||
|
config.RemoteWriteProtoMsgV2: appProtoContentType + ";proto=io.prometheus.write.v2.Request",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
remoteReadQueriesTotal = prometheus.NewCounterVec(
|
remoteReadQueriesTotal = prometheus.NewCounterVec(
|
||||||
|
@ -93,6 +120,9 @@ type Client struct {
|
||||||
readQueries prometheus.Gauge
|
readQueries prometheus.Gauge
|
||||||
readQueriesTotal *prometheus.CounterVec
|
readQueriesTotal *prometheus.CounterVec
|
||||||
readQueriesDuration prometheus.Observer
|
readQueriesDuration prometheus.Observer
|
||||||
|
|
||||||
|
writeProtoMsg config.RemoteWriteProtoMsg
|
||||||
|
writeCompression Compression // Not exposed by ClientConfig for now.
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientConfig configures a client.
|
// ClientConfig configures a client.
|
||||||
|
@ -104,6 +134,7 @@ type ClientConfig struct {
|
||||||
AzureADConfig *azuread.AzureADConfig
|
AzureADConfig *azuread.AzureADConfig
|
||||||
Headers map[string]string
|
Headers map[string]string
|
||||||
RetryOnRateLimit bool
|
RetryOnRateLimit bool
|
||||||
|
WriteProtoMsg config.RemoteWriteProtoMsg
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadClient uses the SAMPLES method of remote read to read series samples from remote server.
|
// ReadClient uses the SAMPLES method of remote read to read series samples from remote server.
|
||||||
|
@ -162,14 +193,20 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
httpClient.Transport = otelhttp.NewTransport(t)
|
writeProtoMsg := config.RemoteWriteProtoMsgV1
|
||||||
|
if conf.WriteProtoMsg != "" {
|
||||||
|
writeProtoMsg = conf.WriteProtoMsg
|
||||||
|
}
|
||||||
|
|
||||||
|
httpClient.Transport = otelhttp.NewTransport(t)
|
||||||
return &Client{
|
return &Client{
|
||||||
remoteName: name,
|
remoteName: name,
|
||||||
urlString: conf.URL.String(),
|
urlString: conf.URL.String(),
|
||||||
Client: httpClient,
|
Client: httpClient,
|
||||||
retryOnRateLimit: conf.RetryOnRateLimit,
|
retryOnRateLimit: conf.RetryOnRateLimit,
|
||||||
timeout: time.Duration(conf.Timeout),
|
timeout: time.Duration(conf.Timeout),
|
||||||
|
writeProtoMsg: writeProtoMsg,
|
||||||
|
writeCompression: SnappyBlockCompression,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -206,10 +243,16 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
httpReq.Header.Add("Content-Encoding", "snappy")
|
httpReq.Header.Add("Content-Encoding", string(c.writeCompression))
|
||||||
httpReq.Header.Set("Content-Type", "application/x-protobuf")
|
httpReq.Header.Set("Content-Type", remoteWriteContentTypeHeaders[c.writeProtoMsg])
|
||||||
httpReq.Header.Set("User-Agent", UserAgent)
|
httpReq.Header.Set("User-Agent", UserAgent)
|
||||||
httpReq.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
|
if c.writeProtoMsg == config.RemoteWriteProtoMsgV1 {
|
||||||
|
// Compatibility mode for 1.0.
|
||||||
|
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion1HeaderValue)
|
||||||
|
} else {
|
||||||
|
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
||||||
|
}
|
||||||
|
|
||||||
if attempt > 0 {
|
if attempt > 0 {
|
||||||
httpReq.Header.Set("Retry-Attempt", strconv.Itoa(attempt))
|
httpReq.Header.Set("Retry-Attempt", strconv.Itoa(attempt))
|
||||||
}
|
}
|
||||||
|
@ -265,12 +308,12 @@ func retryAfterDuration(t string) model.Duration {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name uniquely identifies the client.
|
// Name uniquely identifies the client.
|
||||||
func (c Client) Name() string {
|
func (c *Client) Name() string {
|
||||||
return c.remoteName
|
return c.remoteName
|
||||||
}
|
}
|
||||||
|
|
||||||
// Endpoint is the remote read or write endpoint.
|
// Endpoint is the remote read or write endpoint.
|
||||||
func (c Client) Endpoint() string {
|
func (c *Client) Endpoint() string {
|
||||||
return c.urlString
|
return c.urlString
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,6 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"slices"
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/gogo/protobuf/proto"
|
"github.com/gogo/protobuf/proto"
|
||||||
|
@ -30,10 +29,10 @@ import (
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
|
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/prompb"
|
"github.com/prometheus/prometheus/prompb"
|
||||||
|
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
|
@ -153,10 +152,10 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
|
||||||
})
|
})
|
||||||
case chunkenc.ValHistogram:
|
case chunkenc.ValHistogram:
|
||||||
ts, h := iter.AtHistogram(nil)
|
ts, h := iter.AtHistogram(nil)
|
||||||
histograms = append(histograms, HistogramToHistogramProto(ts, h))
|
histograms = append(histograms, prompb.FromIntHistogram(ts, h))
|
||||||
case chunkenc.ValFloatHistogram:
|
case chunkenc.ValFloatHistogram:
|
||||||
ts, fh := iter.AtFloatHistogram(nil)
|
ts, fh := iter.AtFloatHistogram(nil)
|
||||||
histograms = append(histograms, FloatHistogramToHistogramProto(ts, fh))
|
histograms = append(histograms, prompb.FromFloatHistogram(ts, fh))
|
||||||
default:
|
default:
|
||||||
return nil, ss.Warnings(), fmt.Errorf("unrecognized value type: %s", valType)
|
return nil, ss.Warnings(), fmt.Errorf("unrecognized value type: %s", valType)
|
||||||
}
|
}
|
||||||
|
@ -166,7 +165,7 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
|
||||||
}
|
}
|
||||||
|
|
||||||
resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{
|
resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{
|
||||||
Labels: LabelsToLabelsProto(series.Labels(), nil),
|
Labels: prompb.FromLabels(series.Labels(), nil),
|
||||||
Samples: samples,
|
Samples: samples,
|
||||||
Histograms: histograms,
|
Histograms: histograms,
|
||||||
})
|
})
|
||||||
|
@ -182,7 +181,7 @@ func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet
|
||||||
if err := validateLabelsAndMetricName(ts.Labels); err != nil {
|
if err := validateLabelsAndMetricName(ts.Labels); err != nil {
|
||||||
return errSeriesSet{err: err}
|
return errSeriesSet{err: err}
|
||||||
}
|
}
|
||||||
lbls := LabelProtosToLabels(&b, ts.Labels)
|
lbls := ts.ToLabels(&b, nil)
|
||||||
series = append(series, &concreteSeries{labels: lbls, floats: ts.Samples, histograms: ts.Histograms})
|
series = append(series, &concreteSeries{labels: lbls, floats: ts.Samples, histograms: ts.Histograms})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,7 +234,7 @@ func StreamChunkedReadResponses(
|
||||||
for ss.Next() {
|
for ss.Next() {
|
||||||
series := ss.At()
|
series := ss.At()
|
||||||
iter = series.Iterator(iter)
|
iter = series.Iterator(iter)
|
||||||
lbls = MergeLabels(LabelsToLabelsProto(series.Labels(), lbls), sortedExternalLabels)
|
lbls = MergeLabels(prompb.FromLabels(series.Labels(), lbls), sortedExternalLabels)
|
||||||
|
|
||||||
maxDataLength := maxBytesInFrame
|
maxDataLength := maxBytesInFrame
|
||||||
for _, lbl := range lbls {
|
for _, lbl := range lbls {
|
||||||
|
@ -481,21 +480,16 @@ func (c *concreteSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *hist
|
||||||
panic("iterator is not on an integer histogram sample")
|
panic("iterator is not on an integer histogram sample")
|
||||||
}
|
}
|
||||||
h := c.series.histograms[c.histogramsCur]
|
h := c.series.histograms[c.histogramsCur]
|
||||||
return h.Timestamp, HistogramProtoToHistogram(h)
|
return h.Timestamp, h.ToIntHistogram()
|
||||||
}
|
}
|
||||||
|
|
||||||
// AtFloatHistogram implements chunkenc.Iterator.
|
// AtFloatHistogram implements chunkenc.Iterator.
|
||||||
func (c *concreteSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
|
func (c *concreteSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
|
||||||
switch c.curValType {
|
if c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram {
|
||||||
case chunkenc.ValHistogram:
|
|
||||||
fh := c.series.histograms[c.histogramsCur]
|
fh := c.series.histograms[c.histogramsCur]
|
||||||
return fh.Timestamp, HistogramProtoToFloatHistogram(fh)
|
return fh.Timestamp, fh.ToFloatHistogram() // integer will be auto-converted.
|
||||||
case chunkenc.ValFloatHistogram:
|
|
||||||
fh := c.series.histograms[c.histogramsCur]
|
|
||||||
return fh.Timestamp, FloatHistogramProtoToFloatHistogram(fh)
|
|
||||||
default:
|
|
||||||
panic("iterator is not on a histogram sample")
|
|
||||||
}
|
}
|
||||||
|
panic("iterator is not on a histogram sample")
|
||||||
}
|
}
|
||||||
|
|
||||||
// AtT implements chunkenc.Iterator.
|
// AtT implements chunkenc.Iterator.
|
||||||
|
@ -618,141 +612,6 @@ func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, erro
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func exemplarProtoToExemplar(b *labels.ScratchBuilder, ep prompb.Exemplar) exemplar.Exemplar {
|
|
||||||
timestamp := ep.Timestamp
|
|
||||||
|
|
||||||
return exemplar.Exemplar{
|
|
||||||
Labels: LabelProtosToLabels(b, ep.Labels),
|
|
||||||
Value: ep.Value,
|
|
||||||
Ts: timestamp,
|
|
||||||
HasTs: timestamp != 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HistogramProtoToHistogram extracts a (normal integer) Histogram from the
|
|
||||||
// provided proto message. The caller has to make sure that the proto message
|
|
||||||
// represents an integer histogram and not a float histogram, or it panics.
|
|
||||||
func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram {
|
|
||||||
if hp.IsFloatHistogram() {
|
|
||||||
panic("HistogramProtoToHistogram called with a float histogram")
|
|
||||||
}
|
|
||||||
return &histogram.Histogram{
|
|
||||||
CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
|
|
||||||
Schema: hp.Schema,
|
|
||||||
ZeroThreshold: hp.ZeroThreshold,
|
|
||||||
ZeroCount: hp.GetZeroCountInt(),
|
|
||||||
Count: hp.GetCountInt(),
|
|
||||||
Sum: hp.Sum,
|
|
||||||
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
|
|
||||||
PositiveBuckets: hp.GetPositiveDeltas(),
|
|
||||||
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
|
|
||||||
NegativeBuckets: hp.GetNegativeDeltas(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FloatHistogramProtoToFloatHistogram extracts a float Histogram from the
|
|
||||||
// provided proto message to a Float Histogram. The caller has to make sure that
|
|
||||||
// the proto message represents a float histogram and not an integer histogram,
|
|
||||||
// or it panics.
|
|
||||||
func FloatHistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram {
|
|
||||||
if !hp.IsFloatHistogram() {
|
|
||||||
panic("FloatHistogramProtoToFloatHistogram called with an integer histogram")
|
|
||||||
}
|
|
||||||
return &histogram.FloatHistogram{
|
|
||||||
CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
|
|
||||||
Schema: hp.Schema,
|
|
||||||
ZeroThreshold: hp.ZeroThreshold,
|
|
||||||
ZeroCount: hp.GetZeroCountFloat(),
|
|
||||||
Count: hp.GetCountFloat(),
|
|
||||||
Sum: hp.Sum,
|
|
||||||
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
|
|
||||||
PositiveBuckets: hp.GetPositiveCounts(),
|
|
||||||
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
|
|
||||||
NegativeBuckets: hp.GetNegativeCounts(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HistogramProtoToFloatHistogram extracts and converts a (normal integer) histogram from the provided proto message
|
|
||||||
// to a float histogram. The caller has to make sure that the proto message represents an integer histogram and not a
|
|
||||||
// float histogram, or it panics.
|
|
||||||
func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram {
|
|
||||||
if hp.IsFloatHistogram() {
|
|
||||||
panic("HistogramProtoToFloatHistogram called with a float histogram")
|
|
||||||
}
|
|
||||||
return &histogram.FloatHistogram{
|
|
||||||
CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
|
|
||||||
Schema: hp.Schema,
|
|
||||||
ZeroThreshold: hp.ZeroThreshold,
|
|
||||||
ZeroCount: float64(hp.GetZeroCountInt()),
|
|
||||||
Count: float64(hp.GetCountInt()),
|
|
||||||
Sum: hp.Sum,
|
|
||||||
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
|
|
||||||
PositiveBuckets: deltasToCounts(hp.GetPositiveDeltas()),
|
|
||||||
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
|
|
||||||
NegativeBuckets: deltasToCounts(hp.GetNegativeDeltas()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func spansProtoToSpans(s []prompb.BucketSpan) []histogram.Span {
|
|
||||||
spans := make([]histogram.Span, len(s))
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
|
|
||||||
}
|
|
||||||
|
|
||||||
return spans
|
|
||||||
}
|
|
||||||
|
|
||||||
func deltasToCounts(deltas []int64) []float64 {
|
|
||||||
counts := make([]float64, len(deltas))
|
|
||||||
var cur float64
|
|
||||||
for i, d := range deltas {
|
|
||||||
cur += float64(d)
|
|
||||||
counts[i] = cur
|
|
||||||
}
|
|
||||||
return counts
|
|
||||||
}
|
|
||||||
|
|
||||||
func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) prompb.Histogram {
|
|
||||||
return prompb.Histogram{
|
|
||||||
Count: &prompb.Histogram_CountInt{CountInt: h.Count},
|
|
||||||
Sum: h.Sum,
|
|
||||||
Schema: h.Schema,
|
|
||||||
ZeroThreshold: h.ZeroThreshold,
|
|
||||||
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
|
|
||||||
NegativeSpans: spansToSpansProto(h.NegativeSpans),
|
|
||||||
NegativeDeltas: h.NegativeBuckets,
|
|
||||||
PositiveSpans: spansToSpansProto(h.PositiveSpans),
|
|
||||||
PositiveDeltas: h.PositiveBuckets,
|
|
||||||
ResetHint: prompb.Histogram_ResetHint(h.CounterResetHint),
|
|
||||||
Timestamp: timestamp,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogram) prompb.Histogram {
|
|
||||||
return prompb.Histogram{
|
|
||||||
Count: &prompb.Histogram_CountFloat{CountFloat: fh.Count},
|
|
||||||
Sum: fh.Sum,
|
|
||||||
Schema: fh.Schema,
|
|
||||||
ZeroThreshold: fh.ZeroThreshold,
|
|
||||||
ZeroCount: &prompb.Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
|
|
||||||
NegativeSpans: spansToSpansProto(fh.NegativeSpans),
|
|
||||||
NegativeCounts: fh.NegativeBuckets,
|
|
||||||
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
|
|
||||||
PositiveCounts: fh.PositiveBuckets,
|
|
||||||
ResetHint: prompb.Histogram_ResetHint(fh.CounterResetHint),
|
|
||||||
Timestamp: timestamp,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func spansToSpansProto(s []histogram.Span) []prompb.BucketSpan {
|
|
||||||
spans := make([]prompb.BucketSpan, len(s))
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
spans[i] = prompb.BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
|
|
||||||
}
|
|
||||||
|
|
||||||
return spans
|
|
||||||
}
|
|
||||||
|
|
||||||
// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric.
|
// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric.
|
||||||
func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
|
func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
|
||||||
metric := make(model.Metric, len(labelPairs))
|
metric := make(model.Metric, len(labelPairs))
|
||||||
|
@ -762,43 +621,9 @@ func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
|
||||||
return metric
|
return metric
|
||||||
}
|
}
|
||||||
|
|
||||||
// LabelProtosToLabels transforms prompb labels into labels. The labels builder
|
|
||||||
// will be used to build the returned labels.
|
|
||||||
func LabelProtosToLabels(b *labels.ScratchBuilder, labelPairs []prompb.Label) labels.Labels {
|
|
||||||
b.Reset()
|
|
||||||
for _, l := range labelPairs {
|
|
||||||
b.Add(l.Name, l.Value)
|
|
||||||
}
|
|
||||||
b.Sort()
|
|
||||||
return b.Labels()
|
|
||||||
}
|
|
||||||
|
|
||||||
// LabelsToLabelsProto transforms labels into prompb labels. The buffer slice
|
|
||||||
// will be used to avoid allocations if it is big enough to store the labels.
|
|
||||||
func LabelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label {
|
|
||||||
result := buf[:0]
|
|
||||||
lbls.Range(func(l labels.Label) {
|
|
||||||
result = append(result, prompb.Label{
|
|
||||||
Name: l.Name,
|
|
||||||
Value: l.Value,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// metricTypeToMetricTypeProto transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum.
|
|
||||||
func metricTypeToMetricTypeProto(t model.MetricType) prompb.MetricMetadata_MetricType {
|
|
||||||
mt := strings.ToUpper(string(t))
|
|
||||||
v, ok := prompb.MetricMetadata_MetricType_value[mt]
|
|
||||||
if !ok {
|
|
||||||
return prompb.MetricMetadata_UNKNOWN
|
|
||||||
}
|
|
||||||
|
|
||||||
return prompb.MetricMetadata_MetricType(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeWriteRequest from an io.Reader into a prompb.WriteRequest, handling
|
// DecodeWriteRequest from an io.Reader into a prompb.WriteRequest, handling
|
||||||
// snappy decompression.
|
// snappy decompression.
|
||||||
|
// Used also by documentation/examples/remote_storage.
|
||||||
func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) {
|
func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) {
|
||||||
compressed, err := io.ReadAll(r)
|
compressed, err := io.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -818,6 +643,28 @@ func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) {
|
||||||
return &req, nil
|
return &req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DecodeWriteV2Request from an io.Reader into a writev2.Request, handling
|
||||||
|
// snappy decompression.
|
||||||
|
// Used also by documentation/examples/remote_storage.
|
||||||
|
func DecodeWriteV2Request(r io.Reader) (*writev2.Request, error) {
|
||||||
|
compressed, err := io.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
reqBuf, err := snappy.Decode(nil, compressed)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var req writev2.Request
|
||||||
|
if err := proto.Unmarshal(reqBuf, &req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &req, nil
|
||||||
|
}
|
||||||
|
|
||||||
func DecodeOTLPWriteRequest(r *http.Request) (pmetricotlp.ExportRequest, error) {
|
func DecodeOTLPWriteRequest(r *http.Request) (pmetricotlp.ExportRequest, error) {
|
||||||
contentType := r.Header.Get("Content-Type")
|
contentType := r.Header.Get("Content-Type")
|
||||||
var decoderFunc func(buf []byte) (pmetricotlp.ExportRequest, error)
|
var decoderFunc func(buf []byte) (pmetricotlp.ExportRequest, error)
|
||||||
|
|
|
@ -19,13 +19,16 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/go-kit/log"
|
||||||
"github.com/gogo/protobuf/proto"
|
"github.com/gogo/protobuf/proto"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
"github.com/prometheus/prometheus/prompb"
|
"github.com/prometheus/prometheus/prompb"
|
||||||
|
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
|
@ -57,7 +60,7 @@ var writeRequestFixture = &prompb.WriteRequest{
|
||||||
},
|
},
|
||||||
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
|
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
|
||||||
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}},
|
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}},
|
||||||
Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat(nil))},
|
Histograms: []prompb.Histogram{prompb.FromIntHistogram(0, &testHistogram), prompb.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Labels: []prompb.Label{
|
Labels: []prompb.Label{
|
||||||
|
@ -69,11 +72,59 @@ var writeRequestFixture = &prompb.WriteRequest{
|
||||||
},
|
},
|
||||||
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
|
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
|
||||||
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}},
|
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}},
|
||||||
Histograms: []prompb.Histogram{HistogramToHistogramProto(2, &testHistogram), FloatHistogramToHistogramProto(3, testHistogram.ToFloat(nil))},
|
Histograms: []prompb.Histogram{prompb.FromIntHistogram(2, &testHistogram), prompb.FromFloatHistogram(3, testHistogram.ToFloat(nil))},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
writeV2RequestSeries1Metadata = metadata.Metadata{
|
||||||
|
Type: model.MetricTypeGauge,
|
||||||
|
Help: "Test gauge for test purposes",
|
||||||
|
Unit: "Maybe op/sec who knows (:",
|
||||||
|
}
|
||||||
|
writeV2RequestSeries2Metadata = metadata.Metadata{
|
||||||
|
Type: model.MetricTypeCounter,
|
||||||
|
Help: "Test counter for test purposes",
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeV2RequestFixture represents the same request as writeRequestFixture, but using the v2 representation.
|
||||||
|
writeV2RequestFixture = func() *writev2.Request {
|
||||||
|
st := writev2.NewSymbolTable()
|
||||||
|
b := labels.NewScratchBuilder(0)
|
||||||
|
labelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].ToLabels(&b, nil), nil)
|
||||||
|
exemplar1LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
|
||||||
|
exemplar2LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
|
||||||
|
return &writev2.Request{
|
||||||
|
Timeseries: []writev2.TimeSeries{
|
||||||
|
{
|
||||||
|
LabelsRefs: labelRefs,
|
||||||
|
Metadata: writev2.Metadata{
|
||||||
|
Type: writev2.Metadata_METRIC_TYPE_GAUGE, // Same as writeV2RequestSeries1Metadata.Type, but in writev2.
|
||||||
|
HelpRef: st.Symbolize(writeV2RequestSeries1Metadata.Help),
|
||||||
|
UnitRef: st.Symbolize(writeV2RequestSeries1Metadata.Unit),
|
||||||
|
},
|
||||||
|
Samples: []writev2.Sample{{Value: 1, Timestamp: 0}},
|
||||||
|
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 0}},
|
||||||
|
Histograms: []writev2.Histogram{writev2.FromIntHistogram(0, &testHistogram), writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
LabelsRefs: labelRefs,
|
||||||
|
Metadata: writev2.Metadata{
|
||||||
|
Type: writev2.Metadata_METRIC_TYPE_COUNTER, // Same as writeV2RequestSeries2Metadata.Type, but in writev2.
|
||||||
|
HelpRef: st.Symbolize(writeV2RequestSeries2Metadata.Help),
|
||||||
|
// No unit.
|
||||||
|
},
|
||||||
|
Samples: []writev2.Sample{{Value: 2, Timestamp: 1}},
|
||||||
|
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 1}},
|
||||||
|
Histograms: []writev2.Histogram{writev2.FromIntHistogram(2, &testHistogram), writev2.FromFloatHistogram(3, testHistogram.ToFloat(nil))},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Symbols: st.Symbols(),
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
)
|
||||||
|
|
||||||
func TestValidateLabelsAndMetricName(t *testing.T) {
|
func TestValidateLabelsAndMetricName(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
input []prompb.Label
|
input []prompb.Label
|
||||||
|
@ -268,7 +319,7 @@ func TestConcreteSeriesIterator_HistogramSamples(t *testing.T) {
|
||||||
} else {
|
} else {
|
||||||
ts = int64(i)
|
ts = int64(i)
|
||||||
}
|
}
|
||||||
histProtos[i] = HistogramToHistogramProto(ts, h)
|
histProtos[i] = prompb.FromIntHistogram(ts, h)
|
||||||
}
|
}
|
||||||
series := &concreteSeries{
|
series := &concreteSeries{
|
||||||
labels: labels.FromStrings("foo", "bar"),
|
labels: labels.FromStrings("foo", "bar"),
|
||||||
|
@ -319,9 +370,9 @@ func TestConcreteSeriesIterator_FloatAndHistogramSamples(t *testing.T) {
|
||||||
histProtos := make([]prompb.Histogram, len(histograms))
|
histProtos := make([]prompb.Histogram, len(histograms))
|
||||||
for i, h := range histograms {
|
for i, h := range histograms {
|
||||||
if i < 10 {
|
if i < 10 {
|
||||||
histProtos[i] = HistogramToHistogramProto(int64(i+1), h)
|
histProtos[i] = prompb.FromIntHistogram(int64(i+1), h)
|
||||||
} else {
|
} else {
|
||||||
histProtos[i] = HistogramToHistogramProto(int64(i+6), h)
|
histProtos[i] = prompb.FromIntHistogram(int64(i+6), h)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
series := &concreteSeries{
|
series := &concreteSeries{
|
||||||
|
@ -401,7 +452,7 @@ func TestConcreteSeriesIterator_FloatAndHistogramSamples(t *testing.T) {
|
||||||
require.Equal(t, chunkenc.ValHistogram, it.Next())
|
require.Equal(t, chunkenc.ValHistogram, it.Next())
|
||||||
ts, fh = it.AtFloatHistogram(nil)
|
ts, fh = it.AtFloatHistogram(nil)
|
||||||
require.Equal(t, int64(17), ts)
|
require.Equal(t, int64(17), ts)
|
||||||
expected := HistogramProtoToFloatHistogram(HistogramToHistogramProto(int64(17), histograms[11]))
|
expected := prompb.FromIntHistogram(int64(17), histograms[11]).ToFloatHistogram()
|
||||||
require.Equal(t, expected, fh)
|
require.Equal(t, expected, fh)
|
||||||
|
|
||||||
// Keep calling Next() until the end.
|
// Keep calling Next() until the end.
|
||||||
|
@ -485,39 +536,8 @@ func TestMergeLabels(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMetricTypeToMetricTypeProto(t *testing.T) {
|
|
||||||
tc := []struct {
|
|
||||||
desc string
|
|
||||||
input model.MetricType
|
|
||||||
expected prompb.MetricMetadata_MetricType
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
desc: "with a single-word metric",
|
|
||||||
input: model.MetricTypeCounter,
|
|
||||||
expected: prompb.MetricMetadata_COUNTER,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "with a two-word metric",
|
|
||||||
input: model.MetricTypeStateset,
|
|
||||||
expected: prompb.MetricMetadata_STATESET,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "with an unknown metric",
|
|
||||||
input: "not-known",
|
|
||||||
expected: prompb.MetricMetadata_UNKNOWN,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tc {
|
|
||||||
t.Run(tt.desc, func(t *testing.T) {
|
|
||||||
m := metricTypeToMetricTypeProto(tt.input)
|
|
||||||
require.Equal(t, tt.expected, m)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDecodeWriteRequest(t *testing.T) {
|
func TestDecodeWriteRequest(t *testing.T) {
|
||||||
buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil)
|
buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
actual, err := DecodeWriteRequest(bytes.NewReader(buf))
|
actual, err := DecodeWriteRequest(bytes.NewReader(buf))
|
||||||
|
@ -525,212 +545,18 @@ func TestDecodeWriteRequest(t *testing.T) {
|
||||||
require.Equal(t, writeRequestFixture, actual)
|
require.Equal(t, writeRequestFixture, actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNilHistogramProto(*testing.T) {
|
func TestDecodeWriteV2Request(t *testing.T) {
|
||||||
// This function will panic if it impromperly handles nil
|
buf, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
|
||||||
// values, causing the test to fail.
|
require.NoError(t, err)
|
||||||
HistogramProtoToHistogram(prompb.Histogram{})
|
|
||||||
HistogramProtoToFloatHistogram(prompb.Histogram{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func exampleHistogram() histogram.Histogram {
|
actual, err := DecodeWriteV2Request(bytes.NewReader(buf))
|
||||||
return histogram.Histogram{
|
require.NoError(t, err)
|
||||||
CounterResetHint: histogram.GaugeType,
|
require.Equal(t, writeV2RequestFixture, actual)
|
||||||
Schema: 0,
|
|
||||||
Count: 19,
|
|
||||||
Sum: 2.7,
|
|
||||||
PositiveSpans: []histogram.Span{
|
|
||||||
{Offset: 0, Length: 4},
|
|
||||||
{Offset: 0, Length: 0},
|
|
||||||
{Offset: 0, Length: 3},
|
|
||||||
},
|
|
||||||
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
|
||||||
NegativeSpans: []histogram.Span{
|
|
||||||
{Offset: 0, Length: 5},
|
|
||||||
{Offset: 1, Length: 0},
|
|
||||||
{Offset: 0, Length: 1},
|
|
||||||
},
|
|
||||||
NegativeBuckets: []int64{1, 2, -2, 1, -1, 0},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func exampleHistogramProto() prompb.Histogram {
|
|
||||||
return prompb.Histogram{
|
|
||||||
Count: &prompb.Histogram_CountInt{CountInt: 19},
|
|
||||||
Sum: 2.7,
|
|
||||||
Schema: 0,
|
|
||||||
ZeroThreshold: 0,
|
|
||||||
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0},
|
|
||||||
NegativeSpans: []prompb.BucketSpan{
|
|
||||||
{
|
|
||||||
Offset: 0,
|
|
||||||
Length: 5,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Offset: 1,
|
|
||||||
Length: 0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Offset: 0,
|
|
||||||
Length: 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
NegativeDeltas: []int64{1, 2, -2, 1, -1, 0},
|
|
||||||
PositiveSpans: []prompb.BucketSpan{
|
|
||||||
{
|
|
||||||
Offset: 0,
|
|
||||||
Length: 4,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Offset: 0,
|
|
||||||
Length: 0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Offset: 0,
|
|
||||||
Length: 3,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
PositiveDeltas: []int64{1, 2, -2, 1, -1, 0, 0},
|
|
||||||
ResetHint: prompb.Histogram_GAUGE,
|
|
||||||
Timestamp: 1337,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHistogramToProtoConvert(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
input histogram.CounterResetHint
|
|
||||||
expected prompb.Histogram_ResetHint
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
input: histogram.UnknownCounterReset,
|
|
||||||
expected: prompb.Histogram_UNKNOWN,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
input: histogram.CounterReset,
|
|
||||||
expected: prompb.Histogram_YES,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
input: histogram.NotCounterReset,
|
|
||||||
expected: prompb.Histogram_NO,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
input: histogram.GaugeType,
|
|
||||||
expected: prompb.Histogram_GAUGE,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
h := exampleHistogram()
|
|
||||||
h.CounterResetHint = test.input
|
|
||||||
p := exampleHistogramProto()
|
|
||||||
p.ResetHint = test.expected
|
|
||||||
|
|
||||||
require.Equal(t, p, HistogramToHistogramProto(1337, &h))
|
|
||||||
|
|
||||||
require.Equal(t, h, *HistogramProtoToHistogram(p))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func exampleFloatHistogram() histogram.FloatHistogram {
|
|
||||||
return histogram.FloatHistogram{
|
|
||||||
CounterResetHint: histogram.GaugeType,
|
|
||||||
Schema: 0,
|
|
||||||
Count: 19,
|
|
||||||
Sum: 2.7,
|
|
||||||
PositiveSpans: []histogram.Span{
|
|
||||||
{Offset: 0, Length: 4},
|
|
||||||
{Offset: 0, Length: 0},
|
|
||||||
{Offset: 0, Length: 3},
|
|
||||||
},
|
|
||||||
PositiveBuckets: []float64{1, 2, -2, 1, -1, 0, 0},
|
|
||||||
NegativeSpans: []histogram.Span{
|
|
||||||
{Offset: 0, Length: 5},
|
|
||||||
{Offset: 1, Length: 0},
|
|
||||||
{Offset: 0, Length: 1},
|
|
||||||
},
|
|
||||||
NegativeBuckets: []float64{1, 2, -2, 1, -1, 0},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func exampleFloatHistogramProto() prompb.Histogram {
|
|
||||||
return prompb.Histogram{
|
|
||||||
Count: &prompb.Histogram_CountFloat{CountFloat: 19},
|
|
||||||
Sum: 2.7,
|
|
||||||
Schema: 0,
|
|
||||||
ZeroThreshold: 0,
|
|
||||||
ZeroCount: &prompb.Histogram_ZeroCountFloat{ZeroCountFloat: 0},
|
|
||||||
NegativeSpans: []prompb.BucketSpan{
|
|
||||||
{
|
|
||||||
Offset: 0,
|
|
||||||
Length: 5,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Offset: 1,
|
|
||||||
Length: 0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Offset: 0,
|
|
||||||
Length: 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
NegativeCounts: []float64{1, 2, -2, 1, -1, 0},
|
|
||||||
PositiveSpans: []prompb.BucketSpan{
|
|
||||||
{
|
|
||||||
Offset: 0,
|
|
||||||
Length: 4,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Offset: 0,
|
|
||||||
Length: 0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Offset: 0,
|
|
||||||
Length: 3,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
PositiveCounts: []float64{1, 2, -2, 1, -1, 0, 0},
|
|
||||||
ResetHint: prompb.Histogram_GAUGE,
|
|
||||||
Timestamp: 1337,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFloatHistogramToProtoConvert(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
input histogram.CounterResetHint
|
|
||||||
expected prompb.Histogram_ResetHint
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
input: histogram.UnknownCounterReset,
|
|
||||||
expected: prompb.Histogram_UNKNOWN,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
input: histogram.CounterReset,
|
|
||||||
expected: prompb.Histogram_YES,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
input: histogram.NotCounterReset,
|
|
||||||
expected: prompb.Histogram_NO,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
input: histogram.GaugeType,
|
|
||||||
expected: prompb.Histogram_GAUGE,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
h := exampleFloatHistogram()
|
|
||||||
h.CounterResetHint = test.input
|
|
||||||
p := exampleFloatHistogramProto()
|
|
||||||
p.ResetHint = test.expected
|
|
||||||
|
|
||||||
require.Equal(t, p, FloatHistogramToHistogramProto(1337, &h))
|
|
||||||
|
|
||||||
require.Equal(t, h, *FloatHistogramProtoToFloatHistogram(p))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStreamResponse(t *testing.T) {
|
func TestStreamResponse(t *testing.T) {
|
||||||
lbs1 := LabelsToLabelsProto(labels.FromStrings("instance", "localhost1", "job", "demo1"), nil)
|
lbs1 := prompb.FromLabels(labels.FromStrings("instance", "localhost1", "job", "demo1"), nil)
|
||||||
lbs2 := LabelsToLabelsProto(labels.FromStrings("instance", "localhost2", "job", "demo2"), nil)
|
lbs2 := prompb.FromLabels(labels.FromStrings("instance", "localhost2", "job", "demo2"), nil)
|
||||||
chunk := prompb.Chunk{
|
chunk := prompb.Chunk{
|
||||||
Type: prompb.Chunk_XOR,
|
Type: prompb.Chunk_XOR,
|
||||||
Data: make([]byte, 100),
|
Data: make([]byte, 100),
|
||||||
|
@ -802,7 +628,7 @@ func (c *mockChunkSeriesSet) Next() bool {
|
||||||
|
|
||||||
func (c *mockChunkSeriesSet) At() storage.ChunkSeries {
|
func (c *mockChunkSeriesSet) At() storage.ChunkSeries {
|
||||||
return &storage.ChunkSeriesEntry{
|
return &storage.ChunkSeriesEntry{
|
||||||
Lset: LabelProtosToLabels(&c.builder, c.chunkedSeries[c.index].Labels),
|
Lset: c.chunkedSeries[c.index].ToLabels(&c.builder, nil),
|
||||||
ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator {
|
ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator {
|
||||||
return &mockChunkIterator{
|
return &mockChunkIterator{
|
||||||
chunks: c.chunkedSeries[c.index].Chunks,
|
chunks: c.chunkedSeries[c.index].Chunks,
|
||||||
|
|
|
@ -27,7 +27,7 @@ import (
|
||||||
|
|
||||||
// MetadataAppender is an interface used by the Metadata Watcher to send metadata, It is read from the scrape manager, on to somewhere else.
|
// MetadataAppender is an interface used by the Metadata Watcher to send metadata, It is read from the scrape manager, on to somewhere else.
|
||||||
type MetadataAppender interface {
|
type MetadataAppender interface {
|
||||||
AppendMetadata(context.Context, []scrape.MetricMetadata)
|
AppendWatcherMetadata(context.Context, []scrape.MetricMetadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Watchable represents from where we fetch active targets for metadata.
|
// Watchable represents from where we fetch active targets for metadata.
|
||||||
|
@ -146,7 +146,7 @@ func (mw *MetadataWatcher) collect() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Blocks until the metadata is sent to the remote write endpoint or hardShutdownContext is expired.
|
// Blocks until the metadata is sent to the remote write endpoint or hardShutdownContext is expired.
|
||||||
mw.writer.AppendMetadata(mw.hardShutdownCtx, metadata)
|
mw.writer.AppendWatcherMetadata(mw.hardShutdownCtx, metadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mw *MetadataWatcher) ready() bool {
|
func (mw *MetadataWatcher) ready() bool {
|
||||||
|
|
|
@ -57,7 +57,7 @@ type writeMetadataToMock struct {
|
||||||
metadataAppended int
|
metadataAppended int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mwtm *writeMetadataToMock) AppendMetadata(_ context.Context, m []scrape.MetricMetadata) {
|
func (mwtm *writeMetadataToMock) AppendWatcherMetadata(_ context.Context, m []scrape.MetricMetadata) {
|
||||||
mwtm.metadataAppended += len(m)
|
mwtm.metadataAppended += len(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,6 @@ import (
|
||||||
// Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units
|
// Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units
|
||||||
// OpenMetrics specification for units: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#units-and-base-units
|
// OpenMetrics specification for units: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#units-and-base-units
|
||||||
var unitMap = map[string]string{
|
var unitMap = map[string]string{
|
||||||
|
|
||||||
// Time
|
// Time
|
||||||
"d": "days",
|
"d": "days",
|
||||||
"h": "hours",
|
"h": "hours",
|
||||||
|
@ -111,7 +110,6 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix
|
||||||
|
|
||||||
// Build a normalized name for the specified metric
|
// Build a normalized name for the specified metric
|
||||||
func normalizeName(metric pmetric.Metric, namespace string) string {
|
func normalizeName(metric pmetric.Metric, namespace string) string {
|
||||||
|
|
||||||
// Split metric name in "tokens" (remove all non-alphanumeric)
|
// Split metric name in "tokens" (remove all non-alphanumeric)
|
||||||
nameTokens := strings.FieldsFunc(
|
nameTokens := strings.FieldsFunc(
|
||||||
metric.Name(),
|
metric.Name(),
|
||||||
|
|
|
@ -19,7 +19,6 @@ package prometheus
|
||||||
import "strings"
|
import "strings"
|
||||||
|
|
||||||
var wordToUCUM = map[string]string{
|
var wordToUCUM = map[string]string{
|
||||||
|
|
||||||
// Time
|
// Time
|
||||||
"days": "d",
|
"days": "d",
|
||||||
"hours": "h",
|
"hours": "h",
|
||||||
|
|
|
@ -36,9 +36,11 @@ import (
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
"github.com/prometheus/prometheus/model/relabel"
|
"github.com/prometheus/prometheus/model/relabel"
|
||||||
"github.com/prometheus/prometheus/model/timestamp"
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
"github.com/prometheus/prometheus/prompb"
|
"github.com/prometheus/prometheus/prompb"
|
||||||
|
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||||
"github.com/prometheus/prometheus/scrape"
|
"github.com/prometheus/prometheus/scrape"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
"github.com/prometheus/prometheus/tsdb/record"
|
"github.com/prometheus/prometheus/tsdb/record"
|
||||||
|
@ -389,7 +391,7 @@ func (m *queueManagerMetrics) unregister() {
|
||||||
// external timeseries database.
|
// external timeseries database.
|
||||||
type WriteClient interface {
|
type WriteClient interface {
|
||||||
// Store stores the given samples in the remote storage.
|
// Store stores the given samples in the remote storage.
|
||||||
Store(context.Context, []byte, int) error
|
Store(ctx context.Context, req []byte, retryAttempt int) error
|
||||||
// Name uniquely identifies the remote storage.
|
// Name uniquely identifies the remote storage.
|
||||||
Name() string
|
Name() string
|
||||||
// Endpoint is the remote read or write endpoint for the storage client.
|
// Endpoint is the remote read or write endpoint for the storage client.
|
||||||
|
@ -418,11 +420,14 @@ type QueueManager struct {
|
||||||
|
|
||||||
clientMtx sync.RWMutex
|
clientMtx sync.RWMutex
|
||||||
storeClient WriteClient
|
storeClient WriteClient
|
||||||
|
protoMsg config.RemoteWriteProtoMsg
|
||||||
|
enc Compression
|
||||||
|
|
||||||
seriesMtx sync.Mutex // Covers seriesLabels, droppedSeries and builder.
|
seriesMtx sync.Mutex // Covers seriesLabels, seriesMetadata, droppedSeries and builder.
|
||||||
seriesLabels map[chunks.HeadSeriesRef]labels.Labels
|
seriesLabels map[chunks.HeadSeriesRef]labels.Labels
|
||||||
droppedSeries map[chunks.HeadSeriesRef]struct{}
|
seriesMetadata map[chunks.HeadSeriesRef]*metadata.Metadata
|
||||||
builder *labels.Builder
|
droppedSeries map[chunks.HeadSeriesRef]struct{}
|
||||||
|
builder *labels.Builder
|
||||||
|
|
||||||
seriesSegmentMtx sync.Mutex // Covers seriesSegmentIndexes - if you also lock seriesMtx, take seriesMtx first.
|
seriesSegmentMtx sync.Mutex // Covers seriesSegmentIndexes - if you also lock seriesMtx, take seriesMtx first.
|
||||||
seriesSegmentIndexes map[chunks.HeadSeriesRef]int
|
seriesSegmentIndexes map[chunks.HeadSeriesRef]int
|
||||||
|
@ -463,6 +468,7 @@ func NewQueueManager(
|
||||||
sm ReadyScrapeManager,
|
sm ReadyScrapeManager,
|
||||||
enableExemplarRemoteWrite bool,
|
enableExemplarRemoteWrite bool,
|
||||||
enableNativeHistogramRemoteWrite bool,
|
enableNativeHistogramRemoteWrite bool,
|
||||||
|
protoMsg config.RemoteWriteProtoMsg,
|
||||||
) *QueueManager {
|
) *QueueManager {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
|
@ -487,6 +493,7 @@ func NewQueueManager(
|
||||||
sendNativeHistograms: enableNativeHistogramRemoteWrite,
|
sendNativeHistograms: enableNativeHistogramRemoteWrite,
|
||||||
|
|
||||||
seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels),
|
seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels),
|
||||||
|
seriesMetadata: make(map[chunks.HeadSeriesRef]*metadata.Metadata),
|
||||||
seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int),
|
seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int),
|
||||||
droppedSeries: make(map[chunks.HeadSeriesRef]struct{}),
|
droppedSeries: make(map[chunks.HeadSeriesRef]struct{}),
|
||||||
builder: labels.NewBuilder(labels.EmptyLabels()),
|
builder: labels.NewBuilder(labels.EmptyLabels()),
|
||||||
|
@ -503,9 +510,26 @@ func NewQueueManager(
|
||||||
metrics: metrics,
|
metrics: metrics,
|
||||||
interner: interner,
|
interner: interner,
|
||||||
highestRecvTimestamp: highestRecvTimestamp,
|
highestRecvTimestamp: highestRecvTimestamp,
|
||||||
|
|
||||||
|
protoMsg: protoMsg,
|
||||||
|
enc: SnappyBlockCompression, // Hardcoded for now, but scaffolding exists for likely future use.
|
||||||
|
}
|
||||||
|
|
||||||
|
walMetadata := false
|
||||||
|
if t.protoMsg != config.RemoteWriteProtoMsgV1 {
|
||||||
|
walMetadata = true
|
||||||
|
}
|
||||||
|
t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite, walMetadata)
|
||||||
|
|
||||||
|
// The current MetadataWatcher implementation is mutually exclusive
|
||||||
|
// with the new approach, which stores metadata as WAL records and
|
||||||
|
// ships them alongside series. If both mechanisms are set, the new one
|
||||||
|
// takes precedence by implicitly disabling the older one.
|
||||||
|
if t.mcfg.Send && t.protoMsg != config.RemoteWriteProtoMsgV1 {
|
||||||
|
level.Warn(logger).Log("msg", "usage of 'metadata_config.send' is redundant when using remote write v2 (or higher) as metadata will always be gathered from the WAL and included for every series within each write request")
|
||||||
|
t.mcfg.Send = false
|
||||||
}
|
}
|
||||||
|
|
||||||
t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite)
|
|
||||||
if t.mcfg.Send {
|
if t.mcfg.Send {
|
||||||
t.metadataWatcher = NewMetadataWatcher(logger, sm, client.Name(), t, t.mcfg.SendInterval, flushDeadline)
|
t.metadataWatcher = NewMetadataWatcher(logger, sm, client.Name(), t, t.mcfg.SendInterval, flushDeadline)
|
||||||
}
|
}
|
||||||
|
@ -514,14 +538,21 @@ func NewQueueManager(
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
// AppendMetadata sends metadata to the remote storage. Metadata is sent in batches, but is not parallelized.
|
// AppendWatcherMetadata sends metadata to the remote storage. Metadata is sent in batches, but is not parallelized.
|
||||||
func (t *QueueManager) AppendMetadata(ctx context.Context, metadata []scrape.MetricMetadata) {
|
// This is only used for the metadata_config.send setting and 1.x Remote Write.
|
||||||
|
func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scrape.MetricMetadata) {
|
||||||
|
// no op for any newer proto format, which will cache metadata sent to it from the WAL watcher.
|
||||||
|
if t.protoMsg != config.RemoteWriteProtoMsgV1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1.X will still get metadata in batches.
|
||||||
mm := make([]prompb.MetricMetadata, 0, len(metadata))
|
mm := make([]prompb.MetricMetadata, 0, len(metadata))
|
||||||
for _, entry := range metadata {
|
for _, entry := range metadata {
|
||||||
mm = append(mm, prompb.MetricMetadata{
|
mm = append(mm, prompb.MetricMetadata{
|
||||||
MetricFamilyName: entry.Metric,
|
MetricFamilyName: entry.Metric,
|
||||||
Help: entry.Help,
|
Help: entry.Help,
|
||||||
Type: metricTypeToMetricTypeProto(entry.Type),
|
Type: prompb.FromMetadataType(entry.Type),
|
||||||
Unit: entry.Unit,
|
Unit: entry.Unit,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -542,8 +573,8 @@ func (t *QueueManager) AppendMetadata(ctx context.Context, metadata []scrape.Met
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []prompb.MetricMetadata, pBuf *proto.Buffer) error {
|
func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []prompb.MetricMetadata, pBuf *proto.Buffer) error {
|
||||||
// Build the WriteRequest with no samples.
|
// Build the WriteRequest with no samples (v1 flow).
|
||||||
req, _, _, err := buildWriteRequest(t.logger, nil, metadata, pBuf, nil, nil)
|
req, _, _, err := buildWriteRequest(t.logger, nil, metadata, pBuf, nil, nil, t.enc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -629,6 +660,36 @@ func isTimeSeriesOldFilter(metrics *queueManagerMetrics, baseTime time.Time, sam
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isV2TimeSeriesOldFilter(metrics *queueManagerMetrics, baseTime time.Time, sampleAgeLimit time.Duration) func(ts writev2.TimeSeries) bool {
|
||||||
|
return func(ts writev2.TimeSeries) bool {
|
||||||
|
if sampleAgeLimit == 0 {
|
||||||
|
// If sampleAgeLimit is unset, then we never skip samples due to their age.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
// Only the first element should be set in the series, therefore we only check the first element.
|
||||||
|
case len(ts.Samples) > 0:
|
||||||
|
if isSampleOld(baseTime, sampleAgeLimit, ts.Samples[0].Timestamp) {
|
||||||
|
metrics.droppedSamplesTotal.WithLabelValues(reasonTooOld).Inc()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
case len(ts.Histograms) > 0:
|
||||||
|
if isSampleOld(baseTime, sampleAgeLimit, ts.Histograms[0].Timestamp) {
|
||||||
|
metrics.droppedHistogramsTotal.WithLabelValues(reasonTooOld).Inc()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
case len(ts.Exemplars) > 0:
|
||||||
|
if isSampleOld(baseTime, sampleAgeLimit, ts.Exemplars[0].Timestamp) {
|
||||||
|
metrics.droppedExemplarsTotal.WithLabelValues(reasonTooOld).Inc()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Append queues a sample to be sent to the remote storage. Blocks until all samples are
|
// Append queues a sample to be sent to the remote storage. Blocks until all samples are
|
||||||
// enqueued on their shards or a shutdown signal is received.
|
// enqueued on their shards or a shutdown signal is received.
|
||||||
func (t *QueueManager) Append(samples []record.RefSample) bool {
|
func (t *QueueManager) Append(samples []record.RefSample) bool {
|
||||||
|
@ -652,6 +713,9 @@ outer:
|
||||||
t.seriesMtx.Unlock()
|
t.seriesMtx.Unlock()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
// TODO(cstyan): Handle or at least log an error if no metadata is found.
|
||||||
|
// See https://github.com/prometheus/prometheus/issues/14405
|
||||||
|
meta := t.seriesMetadata[s.Ref]
|
||||||
t.seriesMtx.Unlock()
|
t.seriesMtx.Unlock()
|
||||||
// Start with a very small backoff. This should not be t.cfg.MinBackoff
|
// Start with a very small backoff. This should not be t.cfg.MinBackoff
|
||||||
// as it can happen without errors, and we want to pickup work after
|
// as it can happen without errors, and we want to pickup work after
|
||||||
|
@ -666,6 +730,7 @@ outer:
|
||||||
}
|
}
|
||||||
if t.shards.enqueue(s.Ref, timeSeries{
|
if t.shards.enqueue(s.Ref, timeSeries{
|
||||||
seriesLabels: lbls,
|
seriesLabels: lbls,
|
||||||
|
metadata: meta,
|
||||||
timestamp: s.T,
|
timestamp: s.T,
|
||||||
value: s.V,
|
value: s.V,
|
||||||
sType: tSample,
|
sType: tSample,
|
||||||
|
@ -711,6 +776,7 @@ outer:
|
||||||
t.seriesMtx.Unlock()
|
t.seriesMtx.Unlock()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
meta := t.seriesMetadata[e.Ref]
|
||||||
t.seriesMtx.Unlock()
|
t.seriesMtx.Unlock()
|
||||||
// This will only loop if the queues are being resharded.
|
// This will only loop if the queues are being resharded.
|
||||||
backoff := t.cfg.MinBackoff
|
backoff := t.cfg.MinBackoff
|
||||||
|
@ -722,6 +788,7 @@ outer:
|
||||||
}
|
}
|
||||||
if t.shards.enqueue(e.Ref, timeSeries{
|
if t.shards.enqueue(e.Ref, timeSeries{
|
||||||
seriesLabels: lbls,
|
seriesLabels: lbls,
|
||||||
|
metadata: meta,
|
||||||
timestamp: e.T,
|
timestamp: e.T,
|
||||||
value: e.V,
|
value: e.V,
|
||||||
exemplarLabels: e.Labels,
|
exemplarLabels: e.Labels,
|
||||||
|
@ -765,6 +832,7 @@ outer:
|
||||||
t.seriesMtx.Unlock()
|
t.seriesMtx.Unlock()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
meta := t.seriesMetadata[h.Ref]
|
||||||
t.seriesMtx.Unlock()
|
t.seriesMtx.Unlock()
|
||||||
|
|
||||||
backoff := model.Duration(5 * time.Millisecond)
|
backoff := model.Duration(5 * time.Millisecond)
|
||||||
|
@ -776,6 +844,7 @@ outer:
|
||||||
}
|
}
|
||||||
if t.shards.enqueue(h.Ref, timeSeries{
|
if t.shards.enqueue(h.Ref, timeSeries{
|
||||||
seriesLabels: lbls,
|
seriesLabels: lbls,
|
||||||
|
metadata: meta,
|
||||||
timestamp: h.T,
|
timestamp: h.T,
|
||||||
histogram: h.H,
|
histogram: h.H,
|
||||||
sType: tHistogram,
|
sType: tHistogram,
|
||||||
|
@ -818,6 +887,7 @@ outer:
|
||||||
t.seriesMtx.Unlock()
|
t.seriesMtx.Unlock()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
meta := t.seriesMetadata[h.Ref]
|
||||||
t.seriesMtx.Unlock()
|
t.seriesMtx.Unlock()
|
||||||
|
|
||||||
backoff := model.Duration(5 * time.Millisecond)
|
backoff := model.Duration(5 * time.Millisecond)
|
||||||
|
@ -829,6 +899,7 @@ outer:
|
||||||
}
|
}
|
||||||
if t.shards.enqueue(h.Ref, timeSeries{
|
if t.shards.enqueue(h.Ref, timeSeries{
|
||||||
seriesLabels: lbls,
|
seriesLabels: lbls,
|
||||||
|
metadata: meta,
|
||||||
timestamp: h.T,
|
timestamp: h.T,
|
||||||
floatHistogram: h.FH,
|
floatHistogram: h.FH,
|
||||||
sType: tFloatHistogram,
|
sType: tFloatHistogram,
|
||||||
|
@ -925,6 +996,23 @@ func (t *QueueManager) StoreSeries(series []record.RefSeries, index int) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StoreMetadata keeps track of known series' metadata for lookups when sending samples to remote.
|
||||||
|
func (t *QueueManager) StoreMetadata(meta []record.RefMetadata) {
|
||||||
|
if t.protoMsg == config.RemoteWriteProtoMsgV1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t.seriesMtx.Lock()
|
||||||
|
defer t.seriesMtx.Unlock()
|
||||||
|
for _, m := range meta {
|
||||||
|
t.seriesMetadata[m.Ref] = &metadata.Metadata{
|
||||||
|
Type: record.ToMetricType(m.Type),
|
||||||
|
Unit: m.Unit,
|
||||||
|
Help: m.Help,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// UpdateSeriesSegment updates the segment number held against the series,
|
// UpdateSeriesSegment updates the segment number held against the series,
|
||||||
// so we can trim older ones in SeriesReset.
|
// so we can trim older ones in SeriesReset.
|
||||||
func (t *QueueManager) UpdateSeriesSegment(series []record.RefSeries, index int) {
|
func (t *QueueManager) UpdateSeriesSegment(series []record.RefSeries, index int) {
|
||||||
|
@ -950,6 +1038,7 @@ func (t *QueueManager) SeriesReset(index int) {
|
||||||
delete(t.seriesSegmentIndexes, k)
|
delete(t.seriesSegmentIndexes, k)
|
||||||
t.releaseLabels(t.seriesLabels[k])
|
t.releaseLabels(t.seriesLabels[k])
|
||||||
delete(t.seriesLabels, k)
|
delete(t.seriesLabels, k)
|
||||||
|
delete(t.seriesMetadata, k)
|
||||||
delete(t.droppedSeries, k)
|
delete(t.droppedSeries, k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1165,6 +1254,7 @@ type shards struct {
|
||||||
samplesDroppedOnHardShutdown atomic.Uint32
|
samplesDroppedOnHardShutdown atomic.Uint32
|
||||||
exemplarsDroppedOnHardShutdown atomic.Uint32
|
exemplarsDroppedOnHardShutdown atomic.Uint32
|
||||||
histogramsDroppedOnHardShutdown atomic.Uint32
|
histogramsDroppedOnHardShutdown atomic.Uint32
|
||||||
|
metadataDroppedOnHardShutdown atomic.Uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// start the shards; must be called before any call to enqueue.
|
// start the shards; must be called before any call to enqueue.
|
||||||
|
@ -1193,6 +1283,7 @@ func (s *shards) start(n int) {
|
||||||
s.samplesDroppedOnHardShutdown.Store(0)
|
s.samplesDroppedOnHardShutdown.Store(0)
|
||||||
s.exemplarsDroppedOnHardShutdown.Store(0)
|
s.exemplarsDroppedOnHardShutdown.Store(0)
|
||||||
s.histogramsDroppedOnHardShutdown.Store(0)
|
s.histogramsDroppedOnHardShutdown.Store(0)
|
||||||
|
s.metadataDroppedOnHardShutdown.Store(0)
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
go s.runShard(hardShutdownCtx, i, newQueues[i])
|
go s.runShard(hardShutdownCtx, i, newQueues[i])
|
||||||
}
|
}
|
||||||
|
@ -1245,7 +1336,6 @@ func (s *shards) stop() {
|
||||||
func (s *shards) enqueue(ref chunks.HeadSeriesRef, data timeSeries) bool {
|
func (s *shards) enqueue(ref chunks.HeadSeriesRef, data timeSeries) bool {
|
||||||
s.mtx.RLock()
|
s.mtx.RLock()
|
||||||
defer s.mtx.RUnlock()
|
defer s.mtx.RUnlock()
|
||||||
|
|
||||||
shard := uint64(ref) % uint64(len(s.queues))
|
shard := uint64(ref) % uint64(len(s.queues))
|
||||||
select {
|
select {
|
||||||
case <-s.softShutdown:
|
case <-s.softShutdown:
|
||||||
|
@ -1288,6 +1378,7 @@ type timeSeries struct {
|
||||||
value float64
|
value float64
|
||||||
histogram *histogram.Histogram
|
histogram *histogram.Histogram
|
||||||
floatHistogram *histogram.FloatHistogram
|
floatHistogram *histogram.FloatHistogram
|
||||||
|
metadata *metadata.Metadata
|
||||||
timestamp int64
|
timestamp int64
|
||||||
exemplarLabels labels.Labels
|
exemplarLabels labels.Labels
|
||||||
// The type of series: sample, exemplar, or histogram.
|
// The type of series: sample, exemplar, or histogram.
|
||||||
|
@ -1301,6 +1392,7 @@ const (
|
||||||
tExemplar
|
tExemplar
|
||||||
tHistogram
|
tHistogram
|
||||||
tFloatHistogram
|
tFloatHistogram
|
||||||
|
tMetadata
|
||||||
)
|
)
|
||||||
|
|
||||||
func newQueue(batchSize, capacity int) *queue {
|
func newQueue(batchSize, capacity int) *queue {
|
||||||
|
@ -1324,6 +1416,10 @@ func newQueue(batchSize, capacity int) *queue {
|
||||||
func (q *queue) Append(datum timeSeries) bool {
|
func (q *queue) Append(datum timeSeries) bool {
|
||||||
q.batchMtx.Lock()
|
q.batchMtx.Lock()
|
||||||
defer q.batchMtx.Unlock()
|
defer q.batchMtx.Unlock()
|
||||||
|
// TODO(cstyan): Check if metadata now means we've reduced the total # of samples
|
||||||
|
// we can batch together here, and if so find a way to not include metadata
|
||||||
|
// in the batch size calculation.
|
||||||
|
// See https://github.com/prometheus/prometheus/issues/14405
|
||||||
q.batch = append(q.batch, datum)
|
q.batch = append(q.batch, datum)
|
||||||
if len(q.batch) == cap(q.batch) {
|
if len(q.batch) == cap(q.batch) {
|
||||||
select {
|
select {
|
||||||
|
@ -1347,7 +1443,6 @@ func (q *queue) Chan() <-chan []timeSeries {
|
||||||
func (q *queue) Batch() []timeSeries {
|
func (q *queue) Batch() []timeSeries {
|
||||||
q.batchMtx.Lock()
|
q.batchMtx.Lock()
|
||||||
defer q.batchMtx.Unlock()
|
defer q.batchMtx.Unlock()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case batch := <-q.batchQueue:
|
case batch := <-q.batchQueue:
|
||||||
return batch
|
return batch
|
||||||
|
@ -1419,19 +1514,23 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
shardNum := strconv.Itoa(shardID)
|
shardNum := strconv.Itoa(shardID)
|
||||||
|
symbolTable := writev2.NewSymbolTable()
|
||||||
|
|
||||||
// Send batches of at most MaxSamplesPerSend samples to the remote storage.
|
// Send batches of at most MaxSamplesPerSend samples to the remote storage.
|
||||||
// If we have fewer samples than that, flush them out after a deadline anyways.
|
// If we have fewer samples than that, flush them out after a deadline anyways.
|
||||||
var (
|
var (
|
||||||
max = s.qm.cfg.MaxSamplesPerSend
|
max = s.qm.cfg.MaxSamplesPerSend
|
||||||
|
|
||||||
pBuf = proto.NewBuffer(nil)
|
pBuf = proto.NewBuffer(nil)
|
||||||
buf []byte
|
pBufRaw []byte
|
||||||
|
buf []byte
|
||||||
)
|
)
|
||||||
|
// TODO(@tpaschalis) Should we also raise the max if we have WAL metadata?
|
||||||
if s.qm.sendExemplars {
|
if s.qm.sendExemplars {
|
||||||
max += int(float64(max) * 0.1)
|
max += int(float64(max) * 0.1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: Dry all of this, we should make an interface/generic for the timeseries type.
|
||||||
batchQueue := queue.Chan()
|
batchQueue := queue.Chan()
|
||||||
pendingData := make([]prompb.TimeSeries, max)
|
pendingData := make([]prompb.TimeSeries, max)
|
||||||
for i := range pendingData {
|
for i := range pendingData {
|
||||||
|
@ -1440,6 +1539,10 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
||||||
pendingData[i].Exemplars = []prompb.Exemplar{{}}
|
pendingData[i].Exemplars = []prompb.Exemplar{{}}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
pendingDataV2 := make([]writev2.TimeSeries, max)
|
||||||
|
for i := range pendingDataV2 {
|
||||||
|
pendingDataV2[i].Samples = []writev2.Sample{{}}
|
||||||
|
}
|
||||||
|
|
||||||
timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline))
|
timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline))
|
||||||
stop := func() {
|
stop := func() {
|
||||||
|
@ -1452,6 +1555,24 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
||||||
}
|
}
|
||||||
defer stop()
|
defer stop()
|
||||||
|
|
||||||
|
sendBatch := func(batch []timeSeries, protoMsg config.RemoteWriteProtoMsg, enc Compression, timer bool) {
|
||||||
|
switch protoMsg {
|
||||||
|
case config.RemoteWriteProtoMsgV1:
|
||||||
|
nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
|
||||||
|
n := nPendingSamples + nPendingExemplars + nPendingHistograms
|
||||||
|
if timer {
|
||||||
|
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
|
||||||
|
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
|
||||||
|
}
|
||||||
|
_ = s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf, enc)
|
||||||
|
case config.RemoteWriteProtoMsgV2:
|
||||||
|
nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata := populateV2TimeSeries(&symbolTable, batch, pendingDataV2, s.qm.sendExemplars, s.qm.sendNativeHistograms)
|
||||||
|
n := nPendingSamples + nPendingExemplars + nPendingHistograms
|
||||||
|
_ = s.sendV2Samples(ctx, pendingDataV2[:n], symbolTable.Symbols(), nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata, &pBufRaw, &buf, enc)
|
||||||
|
symbolTable.Reset()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
@ -1475,10 +1596,11 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
nPendingSamples, nPendingExemplars, nPendingHistograms := s.populateTimeSeries(batch, pendingData)
|
|
||||||
|
sendBatch(batch, s.qm.protoMsg, s.qm.enc, false)
|
||||||
|
// TODO(bwplotka): Previously the return was between popular and send.
|
||||||
|
// Consider this when DRY-ing https://github.com/prometheus/prometheus/issues/14409
|
||||||
queue.ReturnForReuse(batch)
|
queue.ReturnForReuse(batch)
|
||||||
n := nPendingSamples + nPendingExemplars + nPendingHistograms
|
|
||||||
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
|
|
||||||
|
|
||||||
stop()
|
stop()
|
||||||
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
|
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
|
||||||
|
@ -1486,11 +1608,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
||||||
case <-timer.C:
|
case <-timer.C:
|
||||||
batch := queue.Batch()
|
batch := queue.Batch()
|
||||||
if len(batch) > 0 {
|
if len(batch) > 0 {
|
||||||
nPendingSamples, nPendingExemplars, nPendingHistograms := s.populateTimeSeries(batch, pendingData)
|
sendBatch(batch, s.qm.protoMsg, s.qm.enc, true)
|
||||||
n := nPendingSamples + nPendingExemplars + nPendingHistograms
|
|
||||||
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
|
|
||||||
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
|
|
||||||
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
|
|
||||||
}
|
}
|
||||||
queue.ReturnForReuse(batch)
|
queue.ReturnForReuse(batch)
|
||||||
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
|
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
|
||||||
|
@ -1498,21 +1616,22 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries) (int, int, int) {
|
func populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries, sendExemplars, sendNativeHistograms bool) (int, int, int) {
|
||||||
var nPendingSamples, nPendingExemplars, nPendingHistograms int
|
var nPendingSamples, nPendingExemplars, nPendingHistograms int
|
||||||
for nPending, d := range batch {
|
for nPending, d := range batch {
|
||||||
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
|
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
|
||||||
if s.qm.sendExemplars {
|
if sendExemplars {
|
||||||
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
|
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
|
||||||
}
|
}
|
||||||
if s.qm.sendNativeHistograms {
|
if sendNativeHistograms {
|
||||||
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
|
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
|
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
|
||||||
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
|
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
|
||||||
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
|
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
|
||||||
pendingData[nPending].Labels = LabelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
|
pendingData[nPending].Labels = prompb.FromLabels(d.seriesLabels, pendingData[nPending].Labels)
|
||||||
|
|
||||||
switch d.sType {
|
switch d.sType {
|
||||||
case tSample:
|
case tSample:
|
||||||
pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
|
pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
|
||||||
|
@ -1522,25 +1641,39 @@ func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.Tim
|
||||||
nPendingSamples++
|
nPendingSamples++
|
||||||
case tExemplar:
|
case tExemplar:
|
||||||
pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.Exemplar{
|
pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.Exemplar{
|
||||||
Labels: LabelsToLabelsProto(d.exemplarLabels, nil),
|
Labels: prompb.FromLabels(d.exemplarLabels, nil),
|
||||||
Value: d.value,
|
Value: d.value,
|
||||||
Timestamp: d.timestamp,
|
Timestamp: d.timestamp,
|
||||||
})
|
})
|
||||||
nPendingExemplars++
|
nPendingExemplars++
|
||||||
case tHistogram:
|
case tHistogram:
|
||||||
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram))
|
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, prompb.FromIntHistogram(d.timestamp, d.histogram))
|
||||||
nPendingHistograms++
|
nPendingHistograms++
|
||||||
case tFloatHistogram:
|
case tFloatHistogram:
|
||||||
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, FloatHistogramToHistogramProto(d.timestamp, d.floatHistogram))
|
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, prompb.FromFloatHistogram(d.timestamp, d.floatHistogram))
|
||||||
nPendingHistograms++
|
nPendingHistograms++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nPendingSamples, nPendingExemplars, nPendingHistograms
|
return nPendingSamples, nPendingExemplars, nPendingHistograms
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) {
|
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) error {
|
||||||
begin := time.Now()
|
begin := time.Now()
|
||||||
err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, pBuf, buf)
|
err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, 0, pBuf, buf, enc)
|
||||||
|
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, 0, time.Since(begin))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(bwplotka): DRY this (have one logic for both v1 and v2).
|
||||||
|
// See https://github.com/prometheus/prometheus/issues/14409
|
||||||
|
func (s *shards) sendV2Samples(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) error {
|
||||||
|
begin := time.Now()
|
||||||
|
err := s.sendV2SamplesWithBackoff(ctx, samples, labels, sampleCount, exemplarCount, histogramCount, metadataCount, pBuf, buf, enc)
|
||||||
|
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, metadataCount, time.Since(begin))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exemplarCount, histogramCount, metadataCount int, duration time.Duration) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "histogramCount", histogramCount, "err", err)
|
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "histogramCount", histogramCount, "err", err)
|
||||||
s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount))
|
s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount))
|
||||||
|
@ -1550,8 +1683,8 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s
|
||||||
|
|
||||||
// These counters are used to calculate the dynamic sharding, and as such
|
// These counters are used to calculate the dynamic sharding, and as such
|
||||||
// should be maintained irrespective of success or failure.
|
// should be maintained irrespective of success or failure.
|
||||||
s.qm.dataOut.incr(int64(len(samples)))
|
s.qm.dataOut.incr(int64(sampleCount + exemplarCount + histogramCount + metadataCount))
|
||||||
s.qm.dataOutDuration.incr(int64(time.Since(begin)))
|
s.qm.dataOutDuration.incr(int64(duration))
|
||||||
s.qm.lastSendTimestamp.Store(time.Now().Unix())
|
s.qm.lastSendTimestamp.Store(time.Now().Unix())
|
||||||
// Pending samples/exemplars/histograms also should be subtracted, as an error means
|
// Pending samples/exemplars/histograms also should be subtracted, as an error means
|
||||||
// they will not be retried.
|
// they will not be retried.
|
||||||
|
@ -1564,9 +1697,9 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendSamples to the remote storage with backoff for recoverable errors.
|
// sendSamples to the remote storage with backoff for recoverable errors.
|
||||||
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) error {
|
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) error {
|
||||||
// Build the WriteRequest with no metadata.
|
// Build the WriteRequest with no metadata.
|
||||||
req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, *buf, nil)
|
req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, buf, nil, enc)
|
||||||
s.qm.buildRequestLimitTimestamp.Store(lowest)
|
s.qm.buildRequestLimitTimestamp.Store(lowest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failing to build the write request is non-recoverable, since it will
|
// Failing to build the write request is non-recoverable, since it will
|
||||||
|
@ -1590,8 +1723,9 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
|
||||||
samples,
|
samples,
|
||||||
nil,
|
nil,
|
||||||
pBuf,
|
pBuf,
|
||||||
*buf,
|
buf,
|
||||||
isTimeSeriesOldFilter(s.qm.metrics, currentTime, time.Duration(s.qm.cfg.SampleAgeLimit)),
|
isTimeSeriesOldFilter(s.qm.metrics, currentTime, time.Duration(s.qm.cfg.SampleAgeLimit)),
|
||||||
|
enc,
|
||||||
)
|
)
|
||||||
s.qm.buildRequestLimitTimestamp.Store(lowest)
|
s.qm.buildRequestLimitTimestamp.Store(lowest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1622,6 +1756,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
|
||||||
s.qm.metrics.samplesTotal.Add(float64(sampleCount))
|
s.qm.metrics.samplesTotal.Add(float64(sampleCount))
|
||||||
s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount))
|
s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount))
|
||||||
s.qm.metrics.histogramsTotal.Add(float64(histogramCount))
|
s.qm.metrics.histogramsTotal.Add(float64(histogramCount))
|
||||||
|
s.qm.metrics.metadataTotal.Add(float64(metadataCount))
|
||||||
err := s.qm.client().Store(ctx, *buf, try)
|
err := s.qm.client().Store(ctx, *buf, try)
|
||||||
s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
|
s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
|
||||||
|
|
||||||
|
@ -1652,6 +1787,148 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sendV2Samples to the remote storage with backoff for recoverable errors.
|
||||||
|
func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) error {
|
||||||
|
// Build the WriteRequest with no metadata.
|
||||||
|
req, highest, lowest, err := buildV2WriteRequest(s.qm.logger, samples, labels, pBuf, buf, nil, enc)
|
||||||
|
s.qm.buildRequestLimitTimestamp.Store(lowest)
|
||||||
|
if err != nil {
|
||||||
|
// Failing to build the write request is non-recoverable, since it will
|
||||||
|
// only error if marshaling the proto to bytes fails.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
reqSize := len(req)
|
||||||
|
*buf = req
|
||||||
|
|
||||||
|
// An anonymous function allows us to defer the completion of our per-try spans
|
||||||
|
// without causing a memory leak, and it has the nice effect of not propagating any
|
||||||
|
// parameters for sendSamplesWithBackoff/3.
|
||||||
|
attemptStore := func(try int) error {
|
||||||
|
currentTime := time.Now()
|
||||||
|
lowest := s.qm.buildRequestLimitTimestamp.Load()
|
||||||
|
if isSampleOld(currentTime, time.Duration(s.qm.cfg.SampleAgeLimit), lowest) {
|
||||||
|
// This will filter out old samples during retries.
|
||||||
|
req, _, lowest, err := buildV2WriteRequest(
|
||||||
|
s.qm.logger,
|
||||||
|
samples,
|
||||||
|
labels,
|
||||||
|
pBuf,
|
||||||
|
buf,
|
||||||
|
isV2TimeSeriesOldFilter(s.qm.metrics, currentTime, time.Duration(s.qm.cfg.SampleAgeLimit)),
|
||||||
|
enc,
|
||||||
|
)
|
||||||
|
s.qm.buildRequestLimitTimestamp.Store(lowest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*buf = req
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, span := otel.Tracer("").Start(ctx, "Remote Send Batch")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
span.SetAttributes(
|
||||||
|
attribute.Int("request_size", reqSize),
|
||||||
|
attribute.Int("samples", sampleCount),
|
||||||
|
attribute.Int("try", try),
|
||||||
|
attribute.String("remote_name", s.qm.storeClient.Name()),
|
||||||
|
attribute.String("remote_url", s.qm.storeClient.Endpoint()),
|
||||||
|
)
|
||||||
|
|
||||||
|
if exemplarCount > 0 {
|
||||||
|
span.SetAttributes(attribute.Int("exemplars", exemplarCount))
|
||||||
|
}
|
||||||
|
if histogramCount > 0 {
|
||||||
|
span.SetAttributes(attribute.Int("histograms", histogramCount))
|
||||||
|
}
|
||||||
|
|
||||||
|
begin := time.Now()
|
||||||
|
s.qm.metrics.samplesTotal.Add(float64(sampleCount))
|
||||||
|
s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount))
|
||||||
|
s.qm.metrics.histogramsTotal.Add(float64(histogramCount))
|
||||||
|
s.qm.metrics.metadataTotal.Add(float64(metadataCount))
|
||||||
|
err := s.qm.client().Store(ctx, *buf, try)
|
||||||
|
s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
span.RecordError(err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
onRetry := func() {
|
||||||
|
s.qm.metrics.retriedSamplesTotal.Add(float64(sampleCount))
|
||||||
|
s.qm.metrics.retriedExemplarsTotal.Add(float64(exemplarCount))
|
||||||
|
s.qm.metrics.retriedHistogramsTotal.Add(float64(histogramCount))
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.qm.sendWriteRequestWithBackoff(ctx, attemptStore, onRetry)
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
// When there is resharding, we cancel the context for this queue, which means the data is not sent.
|
||||||
|
// So we exit early to not update the metrics.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.qm.metrics.sentBytesTotal.Add(float64(reqSize))
|
||||||
|
s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000))
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateV2TimeSeries(symbolTable *writev2.SymbolsTable, batch []timeSeries, pendingData []writev2.TimeSeries, sendExemplars, sendNativeHistograms bool) (int, int, int, int) {
|
||||||
|
var nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata int
|
||||||
|
for nPending, d := range batch {
|
||||||
|
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
|
||||||
|
// todo: should we also safeguard against empty metadata here?
|
||||||
|
if d.metadata != nil {
|
||||||
|
pendingData[nPending].Metadata.Type = writev2.FromMetadataType(d.metadata.Type)
|
||||||
|
pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize(d.metadata.Help)
|
||||||
|
pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize(d.metadata.Unit)
|
||||||
|
nPendingMetadata++
|
||||||
|
}
|
||||||
|
|
||||||
|
if sendExemplars {
|
||||||
|
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
|
||||||
|
}
|
||||||
|
if sendNativeHistograms {
|
||||||
|
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
|
||||||
|
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
|
||||||
|
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
|
||||||
|
pendingData[nPending].LabelsRefs = symbolTable.SymbolizeLabels(d.seriesLabels, pendingData[nPending].LabelsRefs)
|
||||||
|
switch d.sType {
|
||||||
|
case tSample:
|
||||||
|
pendingData[nPending].Samples = append(pendingData[nPending].Samples, writev2.Sample{
|
||||||
|
Value: d.value,
|
||||||
|
Timestamp: d.timestamp,
|
||||||
|
})
|
||||||
|
nPendingSamples++
|
||||||
|
case tExemplar:
|
||||||
|
pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, writev2.Exemplar{
|
||||||
|
LabelsRefs: symbolTable.SymbolizeLabels(d.exemplarLabels, nil), // TODO: optimize, reuse slice
|
||||||
|
Value: d.value,
|
||||||
|
Timestamp: d.timestamp,
|
||||||
|
})
|
||||||
|
nPendingExemplars++
|
||||||
|
case tHistogram:
|
||||||
|
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, writev2.FromIntHistogram(d.timestamp, d.histogram))
|
||||||
|
nPendingHistograms++
|
||||||
|
case tFloatHistogram:
|
||||||
|
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, writev2.FromFloatHistogram(d.timestamp, d.floatHistogram))
|
||||||
|
nPendingHistograms++
|
||||||
|
case tMetadata:
|
||||||
|
// TODO: log or return an error?
|
||||||
|
// we shouldn't receive metadata type data here, it should already be inserted into the timeSeries
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata
|
||||||
|
}
|
||||||
|
|
||||||
func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt func(int) error, onRetry func()) error {
|
func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt func(int) error, onRetry func()) error {
|
||||||
backoff := t.cfg.MinBackoff
|
backoff := t.cfg.MinBackoff
|
||||||
sleepDuration := model.Duration(0)
|
sleepDuration := model.Duration(0)
|
||||||
|
@ -1795,7 +2072,21 @@ func buildTimeSeries(timeSeries []prompb.TimeSeries, filter func(prompb.TimeSeri
|
||||||
return highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms
|
return highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf []byte, filter func(prompb.TimeSeries) bool) ([]byte, int64, int64, error) {
|
func compressPayload(tmpbuf *[]byte, inp []byte, enc Compression) (compressed []byte, _ error) {
|
||||||
|
switch enc {
|
||||||
|
case SnappyBlockCompression:
|
||||||
|
compressed = snappy.Encode(*tmpbuf, inp)
|
||||||
|
if n := snappy.MaxEncodedLen(len(inp)); n > len(*tmpbuf) {
|
||||||
|
// grow the buffer for the next time
|
||||||
|
*tmpbuf = make([]byte, n)
|
||||||
|
}
|
||||||
|
return compressed, nil
|
||||||
|
default:
|
||||||
|
return compressed, fmt.Errorf("Unknown compression scheme [%v]", enc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf *[]byte, filter func(prompb.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) {
|
||||||
highest, lowest, timeSeries,
|
highest, lowest, timeSeries,
|
||||||
droppedSamples, droppedExemplars, droppedHistograms := buildTimeSeries(timeSeries, filter)
|
droppedSamples, droppedExemplars, droppedHistograms := buildTimeSeries(timeSeries, filter)
|
||||||
|
|
||||||
|
@ -1821,8 +2112,105 @@ func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metada
|
||||||
// snappy uses len() to see if it needs to allocate a new slice. Make the
|
// snappy uses len() to see if it needs to allocate a new slice. Make the
|
||||||
// buffer as long as possible.
|
// buffer as long as possible.
|
||||||
if buf != nil {
|
if buf != nil {
|
||||||
buf = buf[0:cap(buf)]
|
*buf = (*buf)[0:cap(*buf)]
|
||||||
|
} else {
|
||||||
|
buf = &[]byte{}
|
||||||
|
}
|
||||||
|
|
||||||
|
compressed, err = compressPayload(buf, pBuf.Bytes(), enc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, highest, lowest, err
|
||||||
}
|
}
|
||||||
compressed := snappy.Encode(buf, pBuf.Bytes())
|
|
||||||
return compressed, highest, lowest, nil
|
return compressed, highest, lowest, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func buildV2WriteRequest(logger log.Logger, samples []writev2.TimeSeries, labels []string, pBuf, buf *[]byte, filter func(writev2.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) {
|
||||||
|
highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms := buildV2TimeSeries(samples, filter)
|
||||||
|
|
||||||
|
if droppedSamples > 0 || droppedExemplars > 0 || droppedHistograms > 0 {
|
||||||
|
level.Debug(logger).Log("msg", "dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &writev2.Request{
|
||||||
|
Symbols: labels,
|
||||||
|
Timeseries: timeSeries,
|
||||||
|
}
|
||||||
|
|
||||||
|
if pBuf == nil {
|
||||||
|
pBuf = &[]byte{} // For convenience in tests. Not efficient.
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := req.OptimizedMarshal(*pBuf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, highest, lowest, err
|
||||||
|
}
|
||||||
|
*pBuf = data
|
||||||
|
|
||||||
|
// snappy uses len() to see if it needs to allocate a new slice. Make the
|
||||||
|
// buffer as long as possible.
|
||||||
|
if buf != nil {
|
||||||
|
*buf = (*buf)[0:cap(*buf)]
|
||||||
|
} else {
|
||||||
|
buf = &[]byte{}
|
||||||
|
}
|
||||||
|
|
||||||
|
compressed, err = compressPayload(buf, data, enc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, highest, lowest, err
|
||||||
|
}
|
||||||
|
return compressed, highest, lowest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildV2TimeSeries(timeSeries []writev2.TimeSeries, filter func(writev2.TimeSeries) bool) (int64, int64, []writev2.TimeSeries, int, int, int) {
|
||||||
|
var highest int64
|
||||||
|
var lowest int64
|
||||||
|
var droppedSamples, droppedExemplars, droppedHistograms int
|
||||||
|
|
||||||
|
keepIdx := 0
|
||||||
|
lowest = math.MaxInt64
|
||||||
|
for i, ts := range timeSeries {
|
||||||
|
if filter != nil && filter(ts) {
|
||||||
|
if len(ts.Samples) > 0 {
|
||||||
|
droppedSamples++
|
||||||
|
}
|
||||||
|
if len(ts.Exemplars) > 0 {
|
||||||
|
droppedExemplars++
|
||||||
|
}
|
||||||
|
if len(ts.Histograms) > 0 {
|
||||||
|
droppedHistograms++
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
|
||||||
|
if len(ts.Samples) > 0 && ts.Samples[0].Timestamp > highest {
|
||||||
|
highest = ts.Samples[0].Timestamp
|
||||||
|
}
|
||||||
|
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest {
|
||||||
|
highest = ts.Exemplars[0].Timestamp
|
||||||
|
}
|
||||||
|
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp > highest {
|
||||||
|
highest = ts.Histograms[0].Timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the lowest timestamp.
|
||||||
|
if len(ts.Samples) > 0 && ts.Samples[0].Timestamp < lowest {
|
||||||
|
lowest = ts.Samples[0].Timestamp
|
||||||
|
}
|
||||||
|
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp < lowest {
|
||||||
|
lowest = ts.Exemplars[0].Timestamp
|
||||||
|
}
|
||||||
|
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp < lowest {
|
||||||
|
lowest = ts.Histograms[0].Timestamp
|
||||||
|
}
|
||||||
|
if i != keepIdx {
|
||||||
|
// We have to swap the kept timeseries with the one which should be dropped.
|
||||||
|
// Copying any elements within timeSeries could cause data corruptions when reusing the slice in a next batch (shards.populateTimeSeries).
|
||||||
|
timeSeries[keepIdx], timeSeries[i] = timeSeries[i], timeSeries[keepIdx]
|
||||||
|
}
|
||||||
|
keepIdx++
|
||||||
|
}
|
||||||
|
|
||||||
|
timeSeries = timeSeries[:keepIdx]
|
||||||
|
return highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms
|
||||||
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -124,7 +124,7 @@ func TestSampledReadEndpoint(t *testing.T) {
|
||||||
{Name: "d", Value: "e"},
|
{Name: "d", Value: "e"},
|
||||||
},
|
},
|
||||||
Histograms: []prompb.Histogram{
|
Histograms: []prompb.Histogram{
|
||||||
FloatHistogramToHistogramProto(0, tsdbutil.GenerateTestFloatHistogram(0)),
|
prompb.FromFloatHistogram(0, tsdbutil.GenerateTestFloatHistogram(0)),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -92,7 +92,7 @@ func TestNoDuplicateReadConfigs(t *testing.T) {
|
||||||
|
|
||||||
for _, tc := range cases {
|
for _, tc := range cases {
|
||||||
t.Run("", func(t *testing.T) {
|
t.Run("", func(t *testing.T) {
|
||||||
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
|
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
|
||||||
conf := &config.Config{
|
conf := &config.Config{
|
||||||
GlobalConfig: config.DefaultGlobalConfig,
|
GlobalConfig: config.DefaultGlobalConfig,
|
||||||
RemoteReadConfigs: tc.cfgs,
|
RemoteReadConfigs: tc.cfgs,
|
||||||
|
@ -172,12 +172,12 @@ func TestSeriesSetFilter(t *testing.T) {
|
||||||
toRemove: []string{"foo"},
|
toRemove: []string{"foo"},
|
||||||
in: &prompb.QueryResult{
|
in: &prompb.QueryResult{
|
||||||
Timeseries: []*prompb.TimeSeries{
|
Timeseries: []*prompb.TimeSeries{
|
||||||
{Labels: LabelsToLabelsProto(labels.FromStrings("foo", "bar", "a", "b"), nil)},
|
{Labels: prompb.FromLabels(labels.FromStrings("foo", "bar", "a", "b"), nil)},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expected: &prompb.QueryResult{
|
expected: &prompb.QueryResult{
|
||||||
Timeseries: []*prompb.TimeSeries{
|
Timeseries: []*prompb.TimeSeries{
|
||||||
{Labels: LabelsToLabelsProto(labels.FromStrings("a", "b"), nil)},
|
{Labels: prompb.FromLabels(labels.FromStrings("a", "b"), nil)},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -211,7 +211,7 @@ func (c *mockedRemoteClient) Read(_ context.Context, query *prompb.Query) (*prom
|
||||||
|
|
||||||
q := &prompb.QueryResult{}
|
q := &prompb.QueryResult{}
|
||||||
for _, s := range c.store {
|
for _, s := range c.store {
|
||||||
l := LabelProtosToLabels(&c.b, s.Labels)
|
l := s.ToLabels(&c.b, nil)
|
||||||
var notMatch bool
|
var notMatch bool
|
||||||
|
|
||||||
for _, m := range matchers {
|
for _, m := range matchers {
|
||||||
|
|
|
@ -62,7 +62,7 @@ type Storage struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStorage returns a remote.Storage.
|
// NewStorage returns a remote.Storage.
|
||||||
func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager) *Storage {
|
func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWAL bool) *Storage {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -72,7 +72,7 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal
|
||||||
logger: logger,
|
logger: logger,
|
||||||
localStartTimeCallback: stCallback,
|
localStartTimeCallback: stCallback,
|
||||||
}
|
}
|
||||||
s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm)
|
s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, metadataInWAL)
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ import (
|
||||||
func TestStorageLifecycle(t *testing.T) {
|
func TestStorageLifecycle(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
|
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
|
||||||
conf := &config.Config{
|
conf := &config.Config{
|
||||||
GlobalConfig: config.DefaultGlobalConfig,
|
GlobalConfig: config.DefaultGlobalConfig,
|
||||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
||||||
|
@ -56,7 +56,7 @@ func TestStorageLifecycle(t *testing.T) {
|
||||||
func TestUpdateRemoteReadConfigs(t *testing.T) {
|
func TestUpdateRemoteReadConfigs(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
|
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
|
||||||
|
|
||||||
conf := &config.Config{
|
conf := &config.Config{
|
||||||
GlobalConfig: config.GlobalConfig{},
|
GlobalConfig: config.GlobalConfig{},
|
||||||
|
@ -77,7 +77,7 @@ func TestUpdateRemoteReadConfigs(t *testing.T) {
|
||||||
func TestFilterExternalLabels(t *testing.T) {
|
func TestFilterExternalLabels(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
|
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
|
||||||
|
|
||||||
conf := &config.Config{
|
conf := &config.Config{
|
||||||
GlobalConfig: config.GlobalConfig{
|
GlobalConfig: config.GlobalConfig{
|
||||||
|
@ -102,7 +102,7 @@ func TestFilterExternalLabels(t *testing.T) {
|
||||||
func TestIgnoreExternalLabels(t *testing.T) {
|
func TestIgnoreExternalLabels(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
|
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
|
||||||
|
|
||||||
conf := &config.Config{
|
conf := &config.Config{
|
||||||
GlobalConfig: config.GlobalConfig{
|
GlobalConfig: config.GlobalConfig{
|
||||||
|
@ -154,7 +154,7 @@ func baseRemoteReadConfig(host string) *config.RemoteReadConfig {
|
||||||
// ApplyConfig runs concurrently with Notify
|
// ApplyConfig runs concurrently with Notify
|
||||||
// See https://github.com/prometheus/prometheus/issues/12747
|
// See https://github.com/prometheus/prometheus/issues/12747
|
||||||
func TestWriteStorageApplyConfigsDuringCommit(t *testing.T) {
|
func TestWriteStorageApplyConfigsDuringCommit(t *testing.T) {
|
||||||
s := NewStorage(nil, nil, nil, t.TempDir(), defaultFlushDeadline, nil)
|
s := NewStorage(nil, nil, nil, t.TempDir(), defaultFlushDeadline, nil, false)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(2000)
|
wg.Add(2000)
|
||||||
|
|
|
@ -15,6 +15,7 @@ package remote
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -65,6 +66,7 @@ type WriteStorage struct {
|
||||||
externalLabels labels.Labels
|
externalLabels labels.Labels
|
||||||
dir string
|
dir string
|
||||||
queues map[string]*QueueManager
|
queues map[string]*QueueManager
|
||||||
|
metadataInWAL bool
|
||||||
samplesIn *ewmaRate
|
samplesIn *ewmaRate
|
||||||
flushDeadline time.Duration
|
flushDeadline time.Duration
|
||||||
interner *pool
|
interner *pool
|
||||||
|
@ -76,7 +78,7 @@ type WriteStorage struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWriteStorage creates and runs a WriteStorage.
|
// NewWriteStorage creates and runs a WriteStorage.
|
||||||
func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager) *WriteStorage {
|
func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWal bool) *WriteStorage {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -92,6 +94,7 @@ func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, f
|
||||||
interner: newPool(),
|
interner: newPool(),
|
||||||
scraper: sm,
|
scraper: sm,
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
|
metadataInWAL: metadataInWal,
|
||||||
highestTimestamp: &maxTimestamp{
|
highestTimestamp: &maxTimestamp{
|
||||||
Gauge: prometheus.NewGauge(prometheus.GaugeOpts{
|
Gauge: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
|
@ -145,6 +148,9 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
||||||
newQueues := make(map[string]*QueueManager)
|
newQueues := make(map[string]*QueueManager)
|
||||||
newHashes := []string{}
|
newHashes := []string{}
|
||||||
for _, rwConf := range conf.RemoteWriteConfigs {
|
for _, rwConf := range conf.RemoteWriteConfigs {
|
||||||
|
if rwConf.ProtobufMessage == config.RemoteWriteProtoMsgV2 && !rws.metadataInWAL {
|
||||||
|
return errors.New("invalid remote write configuration, if you are using remote write version 2.0 the `--enable-feature=metadata-wal-records` feature flag must be enabled")
|
||||||
|
}
|
||||||
hash, err := toHash(rwConf)
|
hash, err := toHash(rwConf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -165,6 +171,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
||||||
|
|
||||||
c, err := NewWriteClient(name, &ClientConfig{
|
c, err := NewWriteClient(name, &ClientConfig{
|
||||||
URL: rwConf.URL,
|
URL: rwConf.URL,
|
||||||
|
WriteProtoMsg: rwConf.ProtobufMessage,
|
||||||
Timeout: rwConf.RemoteTimeout,
|
Timeout: rwConf.RemoteTimeout,
|
||||||
HTTPClientConfig: rwConf.HTTPClientConfig,
|
HTTPClientConfig: rwConf.HTTPClientConfig,
|
||||||
SigV4Config: rwConf.SigV4Config,
|
SigV4Config: rwConf.SigV4Config,
|
||||||
|
@ -207,6 +214,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
||||||
rws.scraper,
|
rws.scraper,
|
||||||
rwConf.SendExemplars,
|
rwConf.SendExemplars,
|
||||||
rwConf.SendNativeHistograms,
|
rwConf.SendNativeHistograms,
|
||||||
|
rwConf.ProtobufMessage,
|
||||||
)
|
)
|
||||||
// Keep track of which queues are new so we know which to start.
|
// Keep track of which queues are new so we know which to start.
|
||||||
newHashes = append(newHashes, hash)
|
newHashes = append(newHashes, hash)
|
||||||
|
|
|
@ -17,19 +17,24 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
|
"github.com/golang/snappy"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/timestamp"
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
"github.com/prometheus/prometheus/prompb"
|
"github.com/prometheus/prometheus/prompb"
|
||||||
|
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
|
otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
|
||||||
)
|
)
|
||||||
|
@ -39,17 +44,23 @@ type writeHandler struct {
|
||||||
appendable storage.Appendable
|
appendable storage.Appendable
|
||||||
|
|
||||||
samplesWithInvalidLabelsTotal prometheus.Counter
|
samplesWithInvalidLabelsTotal prometheus.Counter
|
||||||
|
|
||||||
|
acceptedProtoMsgs map[config.RemoteWriteProtoMsg]struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
const maxAheadTime = 10 * time.Minute
|
const maxAheadTime = 10 * time.Minute
|
||||||
|
|
||||||
// NewWriteHandler creates a http.Handler that accepts remote write requests and
|
// NewWriteHandler creates a http.Handler that accepts remote write requests with
|
||||||
// writes them to the provided appendable.
|
// the given message in acceptedProtoMsgs and writes them to the provided appendable.
|
||||||
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable) http.Handler {
|
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler {
|
||||||
|
protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{}
|
||||||
|
for _, acc := range acceptedProtoMsgs {
|
||||||
|
protoMsgs[acc] = struct{}{}
|
||||||
|
}
|
||||||
h := &writeHandler{
|
h := &writeHandler{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
appendable: appendable,
|
appendable: appendable,
|
||||||
|
acceptedProtoMsgs: protoMsgs,
|
||||||
samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{
|
samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
Namespace: "prometheus",
|
Namespace: "prometheus",
|
||||||
Subsystem: "api",
|
Subsystem: "api",
|
||||||
|
@ -63,15 +74,107 @@ func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable st
|
||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *writeHandler) parseProtoMsg(contentType string) (config.RemoteWriteProtoMsg, error) {
|
||||||
|
contentType = strings.TrimSpace(contentType)
|
||||||
|
|
||||||
|
parts := strings.Split(contentType, ";")
|
||||||
|
if parts[0] != appProtoContentType {
|
||||||
|
return "", fmt.Errorf("expected %v as the first (media) part, got %v content-type", appProtoContentType, contentType)
|
||||||
|
}
|
||||||
|
// Parse potential https://www.rfc-editor.org/rfc/rfc9110#parameter
|
||||||
|
for _, p := range parts[1:] {
|
||||||
|
pair := strings.Split(p, "=")
|
||||||
|
if len(pair) != 2 {
|
||||||
|
return "", fmt.Errorf("as per https://www.rfc-editor.org/rfc/rfc9110#parameter expected parameters to be key-values, got %v in %v content-type", p, contentType)
|
||||||
|
}
|
||||||
|
if pair[0] == "proto" {
|
||||||
|
ret := config.RemoteWriteProtoMsg(pair[1])
|
||||||
|
if err := ret.Validate(); err != nil {
|
||||||
|
return "", fmt.Errorf("got %v content type; %w", contentType, err)
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// No "proto=" parameter, assuming v1.
|
||||||
|
return config.RemoteWriteProtoMsgV1, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
req, err := DecodeWriteRequest(r.Body)
|
contentType := r.Header.Get("Content-Type")
|
||||||
|
if contentType == "" {
|
||||||
|
// Don't break yolo 1.0 clients if not needed. This is similar to what we did
|
||||||
|
// before 2.0: https://github.com/prometheus/prometheus/blob/d78253319daa62c8f28ed47e40bafcad2dd8b586/storage/remote/write_handler.go#L62
|
||||||
|
// We could give http.StatusUnsupportedMediaType, but let's assume 1.0 message by default.
|
||||||
|
contentType = appProtoContentType
|
||||||
|
}
|
||||||
|
|
||||||
|
msg, err := h.parseProtoMsg(contentType)
|
||||||
|
if err != nil {
|
||||||
|
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err)
|
||||||
|
http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := h.acceptedProtoMsgs[msg]; !ok {
|
||||||
|
err := fmt.Errorf("%v protobuf message is not accepted by this server; accepted %v", msg, func() (ret []string) {
|
||||||
|
for k := range h.acceptedProtoMsgs {
|
||||||
|
ret = append(ret, string(k))
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}())
|
||||||
|
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err)
|
||||||
|
http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
|
||||||
|
}
|
||||||
|
|
||||||
|
enc := r.Header.Get("Content-Encoding")
|
||||||
|
if enc == "" {
|
||||||
|
// Don't break yolo 1.0 clients if not needed. This is similar to what we did
|
||||||
|
// before 2.0: https://github.com/prometheus/prometheus/blob/d78253319daa62c8f28ed47e40bafcad2dd8b586/storage/remote/write_handler.go#L62
|
||||||
|
// We could give http.StatusUnsupportedMediaType, but let's assume snappy by default.
|
||||||
|
} else if enc != string(SnappyBlockCompression) {
|
||||||
|
err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, SnappyBlockCompression)
|
||||||
|
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err)
|
||||||
|
http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the request body.
|
||||||
|
body, err := io.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error())
|
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error())
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err = h.write(r.Context(), req)
|
decompressed, err := snappy.Decode(nil, body)
|
||||||
|
if err != nil {
|
||||||
|
// TODO(bwplotka): Add more context to responded error?
|
||||||
|
level.Error(h.logger).Log("msg", "Error decompressing remote write request", "err", err.Error())
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we have a decompressed buffer we can unmarshal it.
|
||||||
|
switch msg {
|
||||||
|
case config.RemoteWriteProtoMsgV1:
|
||||||
|
var req prompb.WriteRequest
|
||||||
|
if err := proto.Unmarshal(decompressed, &req); err != nil {
|
||||||
|
// TODO(bwplotka): Add more context to responded error?
|
||||||
|
level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msg, "err", err.Error())
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = h.write(r.Context(), &req)
|
||||||
|
case config.RemoteWriteProtoMsgV2:
|
||||||
|
var req writev2.Request
|
||||||
|
if err := proto.Unmarshal(decompressed, &req); err != nil {
|
||||||
|
// TODO(bwplotka): Add more context to responded error?
|
||||||
|
level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msg, "err", err.Error())
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = h.writeV2(r.Context(), &req)
|
||||||
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case err == nil:
|
case err == nil:
|
||||||
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample):
|
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample):
|
||||||
|
@ -123,62 +226,27 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
||||||
}()
|
}()
|
||||||
|
|
||||||
b := labels.NewScratchBuilder(0)
|
b := labels.NewScratchBuilder(0)
|
||||||
var exemplarErr error
|
|
||||||
|
|
||||||
for _, ts := range req.Timeseries {
|
for _, ts := range req.Timeseries {
|
||||||
labels := LabelProtosToLabels(&b, ts.Labels)
|
ls := ts.ToLabels(&b, nil)
|
||||||
if !labels.IsValid() {
|
if !ls.IsValid() {
|
||||||
level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", labels.String())
|
level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", ls.String())
|
||||||
samplesWithInvalidLabels++
|
samplesWithInvalidLabels++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var ref storage.SeriesRef
|
|
||||||
for _, s := range ts.Samples {
|
err := h.appendSamples(timeLimitApp, ts.Samples, ls)
|
||||||
ref, err = timeLimitApp.Append(ref, labels, s.Timestamp, s.Value)
|
if err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
unwrappedErr := errors.Unwrap(err)
|
|
||||||
if unwrappedErr == nil {
|
|
||||||
unwrappedErr = err
|
|
||||||
}
|
|
||||||
if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
|
||||||
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ep := range ts.Exemplars {
|
for _, ep := range ts.Exemplars {
|
||||||
e := exemplarProtoToExemplar(&b, ep)
|
e := ep.ToExemplar(&b, nil)
|
||||||
|
h.appendExemplar(timeLimitApp, e, ls, &outOfOrderExemplarErrs)
|
||||||
_, exemplarErr = timeLimitApp.AppendExemplar(0, labels, e)
|
|
||||||
exemplarErr = h.checkAppendExemplarError(exemplarErr, e, &outOfOrderExemplarErrs)
|
|
||||||
if exemplarErr != nil {
|
|
||||||
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors.
|
|
||||||
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, hp := range ts.Histograms {
|
err = h.appendHistograms(timeLimitApp, ts.Histograms, ls)
|
||||||
if hp.IsFloatHistogram() {
|
if err != nil {
|
||||||
fhs := FloatHistogramProtoToFloatHistogram(hp)
|
return err
|
||||||
_, err = timeLimitApp.AppendHistogram(0, labels, hp.Timestamp, nil, fhs)
|
|
||||||
} else {
|
|
||||||
hs := HistogramProtoToHistogram(hp)
|
|
||||||
_, err = timeLimitApp.AppendHistogram(0, labels, hp.Timestamp, hs, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
unwrappedErr := errors.Unwrap(err)
|
|
||||||
if unwrappedErr == nil {
|
|
||||||
unwrappedErr = err
|
|
||||||
}
|
|
||||||
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
|
|
||||||
// a note indicating its inclusion in the future.
|
|
||||||
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
|
||||||
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,6 +260,149 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (err error) {
|
||||||
|
outOfOrderExemplarErrs := 0
|
||||||
|
|
||||||
|
timeLimitApp := &timeLimitAppender{
|
||||||
|
Appender: h.appendable.Appender(ctx),
|
||||||
|
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
_ = timeLimitApp.Rollback()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = timeLimitApp.Commit()
|
||||||
|
}()
|
||||||
|
|
||||||
|
b := labels.NewScratchBuilder(0)
|
||||||
|
for _, ts := range req.Timeseries {
|
||||||
|
ls := ts.ToLabels(&b, req.Symbols)
|
||||||
|
|
||||||
|
err := h.appendSamplesV2(timeLimitApp, ts.Samples, ls)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ep := range ts.Exemplars {
|
||||||
|
e := ep.ToExemplar(&b, req.Symbols)
|
||||||
|
h.appendExemplar(timeLimitApp, e, ls, &outOfOrderExemplarErrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = h.appendHistogramsV2(timeLimitApp, ts.Histograms, ls)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
m := ts.ToMetadata(req.Symbols)
|
||||||
|
if _, err = timeLimitApp.UpdateMetadata(0, ls, m); err != nil {
|
||||||
|
level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if outOfOrderExemplarErrs > 0 {
|
||||||
|
_ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *writeHandler) appendExemplar(app storage.Appender, e exemplar.Exemplar, labels labels.Labels, outOfOrderExemplarErrs *int) {
|
||||||
|
_, err := app.AppendExemplar(0, labels, e)
|
||||||
|
err = h.checkAppendExemplarError(err, e, outOfOrderExemplarErrs)
|
||||||
|
if err != nil {
|
||||||
|
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors
|
||||||
|
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *writeHandler) appendSamples(app storage.Appender, ss []prompb.Sample, labels labels.Labels) error {
|
||||||
|
var ref storage.SeriesRef
|
||||||
|
var err error
|
||||||
|
for _, s := range ss {
|
||||||
|
ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
|
||||||
|
if err != nil {
|
||||||
|
unwrappedErr := errors.Unwrap(err)
|
||||||
|
if unwrappedErr == nil {
|
||||||
|
unwrappedErr = err
|
||||||
|
}
|
||||||
|
if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
||||||
|
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *writeHandler) appendSamplesV2(app storage.Appender, ss []writev2.Sample, labels labels.Labels) error {
|
||||||
|
var ref storage.SeriesRef
|
||||||
|
var err error
|
||||||
|
for _, s := range ss {
|
||||||
|
ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
|
||||||
|
if err != nil {
|
||||||
|
unwrappedErr := errors.Unwrap(err)
|
||||||
|
if unwrappedErr == nil {
|
||||||
|
unwrappedErr = err
|
||||||
|
}
|
||||||
|
if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
||||||
|
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *writeHandler) appendHistograms(app storage.Appender, hh []prompb.Histogram, labels labels.Labels) error {
|
||||||
|
var err error
|
||||||
|
for _, hp := range hh {
|
||||||
|
if hp.IsFloatHistogram() {
|
||||||
|
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
|
||||||
|
} else {
|
||||||
|
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
unwrappedErr := errors.Unwrap(err)
|
||||||
|
if unwrappedErr == nil {
|
||||||
|
unwrappedErr = err
|
||||||
|
}
|
||||||
|
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
|
||||||
|
// a note indicating its inclusion in the future.
|
||||||
|
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
||||||
|
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *writeHandler) appendHistogramsV2(app storage.Appender, hh []writev2.Histogram, labels labels.Labels) error {
|
||||||
|
var err error
|
||||||
|
for _, hp := range hh {
|
||||||
|
if hp.IsFloatHistogram() {
|
||||||
|
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
|
||||||
|
} else {
|
||||||
|
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
unwrappedErr := errors.Unwrap(err)
|
||||||
|
if unwrappedErr == nil {
|
||||||
|
unwrappedErr = err
|
||||||
|
}
|
||||||
|
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
|
||||||
|
// a note indicating its inclusion in the future.
|
||||||
|
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
||||||
|
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
|
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
|
||||||
// writes them to the provided appendable.
|
// writes them to the provided appendable.
|
||||||
func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable) http.Handler {
|
func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable) http.Handler {
|
||||||
|
|
|
@ -30,25 +30,230 @@ import (
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
"github.com/prometheus/prometheus/prompb"
|
"github.com/prometheus/prometheus/prompb"
|
||||||
|
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestRemoteWriteHandler(t *testing.T) {
|
func TestRemoteWriteHandlerHeadersHandling_V1Message(t *testing.T) {
|
||||||
buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil)
|
payload, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
reqHeaders map[string]string
|
||||||
|
expectedCode int
|
||||||
|
}{
|
||||||
|
// Generally Prometheus 1.0 Receiver never checked for existence of the headers, so
|
||||||
|
// we keep things permissive.
|
||||||
|
{
|
||||||
|
name: "correct PRW 1.0 headers",
|
||||||
|
reqHeaders: map[string]string{
|
||||||
|
"Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV1],
|
||||||
|
"Content-Encoding": string(SnappyBlockCompression),
|
||||||
|
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||||
|
},
|
||||||
|
expectedCode: http.StatusNoContent,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing remote write version",
|
||||||
|
reqHeaders: map[string]string{
|
||||||
|
"Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV1],
|
||||||
|
"Content-Encoding": string(SnappyBlockCompression),
|
||||||
|
},
|
||||||
|
expectedCode: http.StatusNoContent,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no headers",
|
||||||
|
reqHeaders: map[string]string{},
|
||||||
|
expectedCode: http.StatusNoContent,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing content-type",
|
||||||
|
reqHeaders: map[string]string{
|
||||||
|
"Content-Encoding": string(SnappyBlockCompression),
|
||||||
|
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||||
|
},
|
||||||
|
expectedCode: http.StatusNoContent,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing content-encoding",
|
||||||
|
reqHeaders: map[string]string{
|
||||||
|
"Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV1],
|
||||||
|
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||||
|
},
|
||||||
|
expectedCode: http.StatusNoContent,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "wrong content-type",
|
||||||
|
reqHeaders: map[string]string{
|
||||||
|
"Content-Type": "yolo",
|
||||||
|
"Content-Encoding": string(SnappyBlockCompression),
|
||||||
|
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||||
|
},
|
||||||
|
expectedCode: http.StatusUnsupportedMediaType,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "wrong content-type2",
|
||||||
|
reqHeaders: map[string]string{
|
||||||
|
"Content-Type": appProtoContentType + ";proto=yolo",
|
||||||
|
"Content-Encoding": string(SnappyBlockCompression),
|
||||||
|
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||||
|
},
|
||||||
|
expectedCode: http.StatusUnsupportedMediaType,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "not supported content-encoding",
|
||||||
|
reqHeaders: map[string]string{
|
||||||
|
"Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV1],
|
||||||
|
"Content-Encoding": "zstd",
|
||||||
|
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||||
|
},
|
||||||
|
expectedCode: http.StatusUnsupportedMediaType,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||||
|
require.NoError(t, err)
|
||||||
|
for k, v := range tc.reqHeaders {
|
||||||
|
req.Header.Set(k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
appendable := &mockAppendable{}
|
||||||
|
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
|
||||||
|
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
handler.ServeHTTP(recorder, req)
|
||||||
|
|
||||||
|
resp := recorder.Result()
|
||||||
|
out, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
require.Equal(t, tc.expectedCode, resp.StatusCode, string(out))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemoteWriteHandlerHeadersHandling_V2Message(t *testing.T) {
|
||||||
|
payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
reqHeaders map[string]string
|
||||||
|
expectedCode int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "correct PRW 2.0 headers",
|
||||||
|
reqHeaders: map[string]string{
|
||||||
|
"Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2],
|
||||||
|
"Content-Encoding": string(SnappyBlockCompression),
|
||||||
|
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||||
|
},
|
||||||
|
expectedCode: http.StatusNoContent,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing remote write version",
|
||||||
|
reqHeaders: map[string]string{
|
||||||
|
"Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2],
|
||||||
|
"Content-Encoding": string(SnappyBlockCompression),
|
||||||
|
},
|
||||||
|
expectedCode: http.StatusNoContent, // We don't check for now.
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no headers",
|
||||||
|
reqHeaders: map[string]string{},
|
||||||
|
expectedCode: http.StatusUnsupportedMediaType,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing content-type",
|
||||||
|
reqHeaders: map[string]string{
|
||||||
|
"Content-Encoding": string(SnappyBlockCompression),
|
||||||
|
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||||
|
},
|
||||||
|
// This only gives 415, because we explicitly only support 2.0. If we supported both
|
||||||
|
// (default) it would be empty message parsed and ok response.
|
||||||
|
// This is perhaps better, than 415 for previously working 1.0 flow with
|
||||||
|
// no content-type.
|
||||||
|
expectedCode: http.StatusUnsupportedMediaType,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing content-encoding",
|
||||||
|
reqHeaders: map[string]string{
|
||||||
|
"Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2],
|
||||||
|
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||||
|
},
|
||||||
|
expectedCode: http.StatusNoContent, // Similar to 1.0 impl, we default to Snappy, so it works.
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "wrong content-type",
|
||||||
|
reqHeaders: map[string]string{
|
||||||
|
"Content-Type": "yolo",
|
||||||
|
"Content-Encoding": string(SnappyBlockCompression),
|
||||||
|
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||||
|
},
|
||||||
|
expectedCode: http.StatusUnsupportedMediaType,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "wrong content-type2",
|
||||||
|
reqHeaders: map[string]string{
|
||||||
|
"Content-Type": appProtoContentType + ";proto=yolo",
|
||||||
|
"Content-Encoding": string(SnappyBlockCompression),
|
||||||
|
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||||
|
},
|
||||||
|
expectedCode: http.StatusUnsupportedMediaType,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "not supported content-encoding",
|
||||||
|
reqHeaders: map[string]string{
|
||||||
|
"Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2],
|
||||||
|
"Content-Encoding": "zstd",
|
||||||
|
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||||
|
},
|
||||||
|
expectedCode: http.StatusUnsupportedMediaType,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||||
|
require.NoError(t, err)
|
||||||
|
for k, v := range tc.reqHeaders {
|
||||||
|
req.Header.Set(k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
appendable := &mockAppendable{}
|
||||||
|
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
|
||||||
|
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
handler.ServeHTTP(recorder, req)
|
||||||
|
|
||||||
|
resp := recorder.Result()
|
||||||
|
out, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
require.Equal(t, tc.expectedCode, resp.StatusCode, string(out))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemoteWriteHandler_V1Message(t *testing.T) {
|
||||||
|
payload, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// NOTE: Strictly speaking, even for 1.0 we require headers, but we never verified those
|
||||||
|
// in Prometheus, so keeping like this to not break existing 1.0 clients.
|
||||||
|
|
||||||
appendable := &mockAppendable{}
|
appendable := &mockAppendable{}
|
||||||
handler := NewWriteHandler(nil, nil, appendable)
|
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
handler.ServeHTTP(recorder, req)
|
handler.ServeHTTP(recorder, req)
|
||||||
|
@ -61,24 +266,22 @@ func TestRemoteWriteHandler(t *testing.T) {
|
||||||
j := 0
|
j := 0
|
||||||
k := 0
|
k := 0
|
||||||
for _, ts := range writeRequestFixture.Timeseries {
|
for _, ts := range writeRequestFixture.Timeseries {
|
||||||
labels := LabelProtosToLabels(&b, ts.Labels)
|
labels := ts.ToLabels(&b, nil)
|
||||||
for _, s := range ts.Samples {
|
for _, s := range ts.Samples {
|
||||||
requireEqual(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i])
|
requireEqual(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i])
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, e := range ts.Exemplars {
|
for _, e := range ts.Exemplars {
|
||||||
exemplarLabels := LabelProtosToLabels(&b, e.Labels)
|
exemplarLabels := e.ToExemplar(&b, nil).Labels
|
||||||
requireEqual(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
|
requireEqual(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
|
||||||
j++
|
j++
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, hp := range ts.Histograms {
|
for _, hp := range ts.Histograms {
|
||||||
if hp.IsFloatHistogram() {
|
if hp.IsFloatHistogram() {
|
||||||
fh := FloatHistogramProtoToFloatHistogram(hp)
|
fh := hp.ToFloatHistogram()
|
||||||
requireEqual(t, mockHistogram{labels, hp.Timestamp, nil, fh}, appendable.histograms[k])
|
requireEqual(t, mockHistogram{labels, hp.Timestamp, nil, fh}, appendable.histograms[k])
|
||||||
} else {
|
} else {
|
||||||
h := HistogramProtoToHistogram(hp)
|
h := hp.ToIntHistogram()
|
||||||
requireEqual(t, mockHistogram{labels, hp.Timestamp, h, nil}, appendable.histograms[k])
|
requireEqual(t, mockHistogram{labels, hp.Timestamp, h, nil}, appendable.histograms[k])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,8 +290,66 @@ func TestRemoteWriteHandler(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOutOfOrderSample(t *testing.T) {
|
func TestRemoteWriteHandler_V2Message(t *testing.T) {
|
||||||
tests := []struct {
|
payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
|
||||||
|
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
|
||||||
|
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
||||||
|
|
||||||
|
appendable := &mockAppendable{}
|
||||||
|
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
|
||||||
|
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
handler.ServeHTTP(recorder, req)
|
||||||
|
|
||||||
|
resp := recorder.Result()
|
||||||
|
require.Equal(t, http.StatusNoContent, resp.StatusCode)
|
||||||
|
|
||||||
|
b := labels.NewScratchBuilder(0)
|
||||||
|
i := 0
|
||||||
|
j := 0
|
||||||
|
k := 0
|
||||||
|
for _, ts := range writeV2RequestFixture.Timeseries {
|
||||||
|
ls := ts.ToLabels(&b, writeV2RequestFixture.Symbols)
|
||||||
|
|
||||||
|
for _, s := range ts.Samples {
|
||||||
|
requireEqual(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i])
|
||||||
|
|
||||||
|
switch i {
|
||||||
|
case 0:
|
||||||
|
requireEqual(t, mockMetadata{ls, writeV2RequestSeries1Metadata}, appendable.metadata[i])
|
||||||
|
case 1:
|
||||||
|
requireEqual(t, mockMetadata{ls, writeV2RequestSeries2Metadata}, appendable.metadata[i])
|
||||||
|
default:
|
||||||
|
t.Fatal("more series/samples then expected")
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
for _, e := range ts.Exemplars {
|
||||||
|
exemplarLabels := e.ToExemplar(&b, writeV2RequestFixture.Symbols).Labels
|
||||||
|
requireEqual(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
for _, hp := range ts.Histograms {
|
||||||
|
if hp.IsFloatHistogram() {
|
||||||
|
fh := hp.ToFloatHistogram()
|
||||||
|
requireEqual(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k])
|
||||||
|
} else {
|
||||||
|
h := hp.ToIntHistogram()
|
||||||
|
requireEqual(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k])
|
||||||
|
}
|
||||||
|
k++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOutOfOrderSample_V1Message(t *testing.T) {
|
||||||
|
for _, tc := range []struct {
|
||||||
Name string
|
Name string
|
||||||
Timestamp int64
|
Timestamp int64
|
||||||
}{
|
}{
|
||||||
|
@ -100,23 +361,59 @@ func TestOutOfOrderSample(t *testing.T) {
|
||||||
Name: "future",
|
Name: "future",
|
||||||
Timestamp: math.MaxInt64,
|
Timestamp: math.MaxInt64,
|
||||||
},
|
},
|
||||||
}
|
} {
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
t.Run(tc.Name, func(t *testing.T) {
|
t.Run(tc.Name, func(t *testing.T) {
|
||||||
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
payload, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
||||||
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
||||||
Samples: []prompb.Sample{{Value: 1, Timestamp: tc.Timestamp}},
|
Samples: []prompb.Sample{{Value: 1, Timestamp: tc.Timestamp}},
|
||||||
}}, nil, nil, nil, nil)
|
}}, nil, nil, nil, nil, "snappy")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
appendable := &mockAppendable{
|
appendable := &mockAppendable{latestSample: 100}
|
||||||
latestSample: 100,
|
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
|
||||||
}
|
|
||||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
|
recorder := httptest.NewRecorder()
|
||||||
|
handler.ServeHTTP(recorder, req)
|
||||||
|
|
||||||
|
resp := recorder.Result()
|
||||||
|
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOutOfOrderSample_V2Message(t *testing.T) {
|
||||||
|
for _, tc := range []struct {
|
||||||
|
Name string
|
||||||
|
Timestamp int64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Name: "historic",
|
||||||
|
Timestamp: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "future",
|
||||||
|
Timestamp: math.MaxInt64,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.Name, func(t *testing.T) {
|
||||||
|
payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
|
||||||
|
LabelsRefs: []uint32{1, 2},
|
||||||
|
Samples: []writev2.Sample{{Value: 1, Timestamp: tc.Timestamp}},
|
||||||
|
}}, []string{"", "__name__", "metric1"}, nil, nil, nil, "snappy")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
|
||||||
|
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
|
||||||
|
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
||||||
|
|
||||||
|
appendable := &mockAppendable{latestSample: 100}
|
||||||
|
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
handler.ServeHTTP(recorder, req)
|
handler.ServeHTTP(recorder, req)
|
||||||
|
@ -128,9 +425,9 @@ func TestOutOfOrderSample(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test case currently aims to verify that the WriteHandler endpoint
|
// This test case currently aims to verify that the WriteHandler endpoint
|
||||||
// don't fail on ingestion errors since the exemplar storage is
|
// don't fail on exemplar ingestion errors since the exemplar storage is
|
||||||
// still experimental.
|
// still experimental.
|
||||||
func TestOutOfOrderExemplar(t *testing.T) {
|
func TestOutOfOrderExemplar_V1Message(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
Name string
|
Name string
|
||||||
Timestamp int64
|
Timestamp int64
|
||||||
|
@ -147,19 +444,17 @@ func TestOutOfOrderExemplar(t *testing.T) {
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.Name, func(t *testing.T) {
|
t.Run(tc.Name, func(t *testing.T) {
|
||||||
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
payload, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
||||||
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
||||||
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "foo", Value: "bar"}}, Value: 1, Timestamp: tc.Timestamp}},
|
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "foo", Value: "bar"}}, Value: 1, Timestamp: tc.Timestamp}},
|
||||||
}}, nil, nil, nil, nil)
|
}}, nil, nil, nil, nil, "snappy")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
appendable := &mockAppendable{
|
appendable := &mockAppendable{latestExemplar: 100}
|
||||||
latestExemplar: 100,
|
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
|
||||||
}
|
|
||||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
|
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
handler.ServeHTTP(recorder, req)
|
handler.ServeHTTP(recorder, req)
|
||||||
|
@ -171,7 +466,7 @@ func TestOutOfOrderExemplar(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOutOfOrderHistogram(t *testing.T) {
|
func TestOutOfOrderExemplar_V2Message(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
Name string
|
Name string
|
||||||
Timestamp int64
|
Timestamp int64
|
||||||
|
@ -188,19 +483,58 @@ func TestOutOfOrderHistogram(t *testing.T) {
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.Name, func(t *testing.T) {
|
t.Run(tc.Name, func(t *testing.T) {
|
||||||
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
|
||||||
|
LabelsRefs: []uint32{1, 2},
|
||||||
|
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{3, 4}, Value: 1, Timestamp: tc.Timestamp}},
|
||||||
|
}}, []string{"", "__name__", "metric1", "foo", "bar"}, nil, nil, nil, "snappy")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
|
||||||
|
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
|
||||||
|
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
||||||
|
|
||||||
|
appendable := &mockAppendable{latestExemplar: 100}
|
||||||
|
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
|
||||||
|
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
handler.ServeHTTP(recorder, req)
|
||||||
|
|
||||||
|
resp := recorder.Result()
|
||||||
|
// TODO: update to require.Equal(t, http.StatusConflict, resp.StatusCode) once exemplar storage is not experimental.
|
||||||
|
require.Equal(t, http.StatusNoContent, resp.StatusCode)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOutOfOrderHistogram_V1Message(t *testing.T) {
|
||||||
|
for _, tc := range []struct {
|
||||||
|
Name string
|
||||||
|
Timestamp int64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Name: "historic",
|
||||||
|
Timestamp: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "future",
|
||||||
|
Timestamp: math.MaxInt64,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.Name, func(t *testing.T) {
|
||||||
|
payload, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
||||||
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
||||||
Histograms: []prompb.Histogram{HistogramToHistogramProto(tc.Timestamp, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat(nil))},
|
Histograms: []prompb.Histogram{prompb.FromIntHistogram(tc.Timestamp, &testHistogram), prompb.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
|
||||||
}}, nil, nil, nil, nil)
|
}}, nil, nil, nil, nil, "snappy")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
appendable := &mockAppendable{
|
appendable := &mockAppendable{latestHistogram: 100}
|
||||||
latestHistogram: 100,
|
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
|
||||||
}
|
|
||||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
|
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
handler.ServeHTTP(recorder, req)
|
handler.ServeHTTP(recorder, req)
|
||||||
|
@ -211,9 +545,49 @@ func TestOutOfOrderHistogram(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkRemoteWritehandler(b *testing.B) {
|
func TestOutOfOrderHistogram_V2Message(t *testing.T) {
|
||||||
|
for _, tc := range []struct {
|
||||||
|
Name string
|
||||||
|
Timestamp int64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Name: "historic",
|
||||||
|
Timestamp: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "future",
|
||||||
|
Timestamp: math.MaxInt64,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.Name, func(t *testing.T) {
|
||||||
|
payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
|
||||||
|
LabelsRefs: []uint32{0, 1},
|
||||||
|
Histograms: []writev2.Histogram{writev2.FromIntHistogram(0, &testHistogram), writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
|
||||||
|
}}, []string{"__name__", "metric1"}, nil, nil, nil, "snappy")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
|
||||||
|
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
|
||||||
|
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
||||||
|
|
||||||
|
appendable := &mockAppendable{latestHistogram: 100}
|
||||||
|
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
|
||||||
|
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
handler.ServeHTTP(recorder, req)
|
||||||
|
|
||||||
|
resp := recorder.Result()
|
||||||
|
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkRemoteWriteHandler(b *testing.B) {
|
||||||
const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte"
|
const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte"
|
||||||
reqs := []*http.Request{}
|
var reqs []*http.Request
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
num := strings.Repeat(strconv.Itoa(i), 16)
|
num := strings.Repeat(strconv.Itoa(i), 16)
|
||||||
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
||||||
|
@ -221,8 +595,8 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
|
||||||
{Name: "__name__", Value: "test_metric"},
|
{Name: "__name__", Value: "test_metric"},
|
||||||
{Name: "test_label_name_" + num, Value: labelValue + num},
|
{Name: "test_label_name_" + num, Value: labelValue + num},
|
||||||
},
|
},
|
||||||
Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram)},
|
Histograms: []prompb.Histogram{prompb.FromIntHistogram(0, &testHistogram)},
|
||||||
}}, nil, nil, nil, nil)
|
}}, nil, nil, nil, nil, "snappy")
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
|
@ -230,7 +604,8 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
appendable := &mockAppendable{}
|
appendable := &mockAppendable{}
|
||||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
|
// TODO: test with other proto format(s)
|
||||||
|
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
@ -239,17 +614,39 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCommitErr(t *testing.T) {
|
func TestCommitErr_V1Message(t *testing.T) {
|
||||||
buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil)
|
payload, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
appendable := &mockAppendable{
|
appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")}
|
||||||
commitErr: fmt.Errorf("commit error"),
|
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
|
||||||
}
|
|
||||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
|
recorder := httptest.NewRecorder()
|
||||||
|
handler.ServeHTTP(recorder, req)
|
||||||
|
|
||||||
|
resp := recorder.Result()
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, http.StatusInternalServerError, resp.StatusCode)
|
||||||
|
require.Equal(t, "commit error\n", string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCommitErr_V2Message(t *testing.T) {
|
||||||
|
payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
|
||||||
|
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
|
||||||
|
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
||||||
|
|
||||||
|
appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")}
|
||||||
|
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
handler.ServeHTTP(recorder, req)
|
handler.ServeHTTP(recorder, req)
|
||||||
|
@ -275,10 +672,10 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) {
|
||||||
b.Cleanup(func() {
|
b.Cleanup(func() {
|
||||||
require.NoError(b, db.Close())
|
require.NoError(b, db.Close())
|
||||||
})
|
})
|
||||||
|
// TODO: test with other proto format(s)
|
||||||
|
handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head(), []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
|
||||||
|
|
||||||
handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head())
|
buf, _, _, err := buildWriteRequest(nil, genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil, nil, "snappy")
|
||||||
|
|
||||||
buf, _, _, err := buildWriteRequest(nil, genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil, nil)
|
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
|
|
||||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
||||||
|
@ -291,7 +688,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) {
|
||||||
|
|
||||||
var bufRequests [][]byte
|
var bufRequests [][]byte
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
buf, _, _, err = buildWriteRequest(nil, genSeriesWithSample(1000, int64(80+i)*time.Minute.Milliseconds()), nil, nil, nil, nil)
|
buf, _, _, err = buildWriteRequest(nil, genSeriesWithSample(1000, int64(80+i)*time.Minute.Milliseconds()), nil, nil, nil, nil, "snappy")
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
bufRequests = append(bufRequests, buf)
|
bufRequests = append(bufRequests, buf)
|
||||||
}
|
}
|
||||||
|
@ -328,7 +725,9 @@ type mockAppendable struct {
|
||||||
exemplars []mockExemplar
|
exemplars []mockExemplar
|
||||||
latestHistogram int64
|
latestHistogram int64
|
||||||
histograms []mockHistogram
|
histograms []mockHistogram
|
||||||
commitErr error
|
metadata []mockMetadata
|
||||||
|
|
||||||
|
commitErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockSample struct {
|
type mockSample struct {
|
||||||
|
@ -351,10 +750,17 @@ type mockHistogram struct {
|
||||||
fh *histogram.FloatHistogram
|
fh *histogram.FloatHistogram
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type mockMetadata struct {
|
||||||
|
l labels.Labels
|
||||||
|
m metadata.Metadata
|
||||||
|
}
|
||||||
|
|
||||||
// Wrapper to instruct go-cmp package to compare a list of structs with unexported fields.
|
// Wrapper to instruct go-cmp package to compare a list of structs with unexported fields.
|
||||||
func requireEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) {
|
func requireEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
testutil.RequireEqualWithOptions(t, expected, actual,
|
testutil.RequireEqualWithOptions(t, expected, actual,
|
||||||
[]cmp.Option{cmp.AllowUnexported(mockSample{}), cmp.AllowUnexported(mockExemplar{}), cmp.AllowUnexported(mockHistogram{})},
|
[]cmp.Option{cmp.AllowUnexported(mockSample{}), cmp.AllowUnexported(mockExemplar{}), cmp.AllowUnexported(mockHistogram{}), cmp.AllowUnexported(mockMetadata{})},
|
||||||
msgAndArgs...)
|
msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -400,13 +806,14 @@ func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
|
func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) {
|
||||||
// TODO: Wire metadata in a mockAppendable field when we get around to handling metadata in remote_write.
|
m.metadata = append(m.metadata, mockMetadata{l: l, m: mp})
|
||||||
// UpdateMetadata is no-op for remote write (where mockAppendable is being used to test) for now.
|
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockAppendable) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) {
|
func (m *mockAppendable) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) {
|
||||||
// AppendCTZeroSample is no-op for remote-write for now.
|
// AppendCTZeroSample is no-op for remote-write for now.
|
||||||
|
// TODO(bwplotka): Add support for PRW 2.0 for CT zero feature (but also we might
|
||||||
|
// replace this with in-metadata CT storage, see https://github.com/prometheus/prometheus/issues/14218).
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@ package remote
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -43,11 +44,12 @@ func testRemoteWriteConfig() *config.RemoteWriteConfig {
|
||||||
Host: "localhost",
|
Host: "localhost",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
QueueConfig: config.DefaultQueueConfig,
|
QueueConfig: config.DefaultQueueConfig,
|
||||||
|
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNoDuplicateWriteConfigs(t *testing.T) {
|
func TestWriteStorageApplyConfig_NoDuplicateWriteConfigs(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
cfg1 := config.RemoteWriteConfig{
|
cfg1 := config.RemoteWriteConfig{
|
||||||
|
@ -58,7 +60,8 @@ func TestNoDuplicateWriteConfigs(t *testing.T) {
|
||||||
Host: "localhost",
|
Host: "localhost",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
QueueConfig: config.DefaultQueueConfig,
|
QueueConfig: config.DefaultQueueConfig,
|
||||||
|
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||||
}
|
}
|
||||||
cfg2 := config.RemoteWriteConfig{
|
cfg2 := config.RemoteWriteConfig{
|
||||||
Name: "write-2",
|
Name: "write-2",
|
||||||
|
@ -68,7 +71,8 @@ func TestNoDuplicateWriteConfigs(t *testing.T) {
|
||||||
Host: "localhost",
|
Host: "localhost",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
QueueConfig: config.DefaultQueueConfig,
|
QueueConfig: config.DefaultQueueConfig,
|
||||||
|
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||||
}
|
}
|
||||||
cfg3 := config.RemoteWriteConfig{
|
cfg3 := config.RemoteWriteConfig{
|
||||||
URL: &common_config.URL{
|
URL: &common_config.URL{
|
||||||
|
@ -77,61 +81,49 @@ func TestNoDuplicateWriteConfigs(t *testing.T) {
|
||||||
Host: "localhost",
|
Host: "localhost",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
QueueConfig: config.DefaultQueueConfig,
|
QueueConfig: config.DefaultQueueConfig,
|
||||||
|
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||||
}
|
}
|
||||||
|
|
||||||
type testcase struct {
|
for _, tc := range []struct {
|
||||||
cfgs []*config.RemoteWriteConfig
|
cfgs []*config.RemoteWriteConfig
|
||||||
err bool
|
expectedErr error
|
||||||
}
|
}{
|
||||||
|
|
||||||
cases := []testcase{
|
|
||||||
{ // Two duplicates, we should get an error.
|
{ // Two duplicates, we should get an error.
|
||||||
cfgs: []*config.RemoteWriteConfig{
|
cfgs: []*config.RemoteWriteConfig{&cfg1, &cfg1},
|
||||||
&cfg1,
|
expectedErr: errors.New("duplicate remote write configs are not allowed, found duplicate for URL: http://localhost"),
|
||||||
&cfg1,
|
|
||||||
},
|
|
||||||
err: true,
|
|
||||||
},
|
},
|
||||||
{ // Duplicates but with different names, we should not get an error.
|
{ // Duplicates but with different names, we should not get an error.
|
||||||
cfgs: []*config.RemoteWriteConfig{
|
cfgs: []*config.RemoteWriteConfig{&cfg1, &cfg2},
|
||||||
&cfg1,
|
|
||||||
&cfg2,
|
|
||||||
},
|
|
||||||
err: false,
|
|
||||||
},
|
},
|
||||||
{ // Duplicates but one with no name, we should not get an error.
|
{ // Duplicates but one with no name, we should not get an error.
|
||||||
cfgs: []*config.RemoteWriteConfig{
|
cfgs: []*config.RemoteWriteConfig{&cfg1, &cfg3},
|
||||||
&cfg1,
|
|
||||||
&cfg3,
|
|
||||||
},
|
|
||||||
err: false,
|
|
||||||
},
|
},
|
||||||
{ // Duplicates both with no name, we should get an error.
|
{ // Duplicates both with no name, we should get an error.
|
||||||
cfgs: []*config.RemoteWriteConfig{
|
cfgs: []*config.RemoteWriteConfig{&cfg3, &cfg3},
|
||||||
&cfg3,
|
expectedErr: errors.New("duplicate remote write configs are not allowed, found duplicate for URL: http://localhost"),
|
||||||
&cfg3,
|
|
||||||
},
|
|
||||||
err: true,
|
|
||||||
},
|
},
|
||||||
}
|
} {
|
||||||
|
t.Run("", func(t *testing.T) {
|
||||||
|
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false)
|
||||||
|
conf := &config.Config{
|
||||||
|
GlobalConfig: config.DefaultGlobalConfig,
|
||||||
|
RemoteWriteConfigs: tc.cfgs,
|
||||||
|
}
|
||||||
|
err := s.ApplyConfig(conf)
|
||||||
|
if tc.expectedErr == nil {
|
||||||
|
require.NoError(t, err)
|
||||||
|
} else {
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Equal(t, tc.expectedErr, err)
|
||||||
|
}
|
||||||
|
|
||||||
for _, tc := range cases {
|
require.NoError(t, s.Close())
|
||||||
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil)
|
})
|
||||||
conf := &config.Config{
|
|
||||||
GlobalConfig: config.DefaultGlobalConfig,
|
|
||||||
RemoteWriteConfigs: tc.cfgs,
|
|
||||||
}
|
|
||||||
err := s.ApplyConfig(conf)
|
|
||||||
gotError := err != nil
|
|
||||||
require.Equal(t, tc.err, gotError)
|
|
||||||
|
|
||||||
err = s.Close()
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRestartOnNameChange(t *testing.T) {
|
func TestWriteStorageApplyConfig_RestartOnNameChange(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
cfg := testRemoteWriteConfig()
|
cfg := testRemoteWriteConfig()
|
||||||
|
@ -139,13 +131,11 @@ func TestRestartOnNameChange(t *testing.T) {
|
||||||
hash, err := toHash(cfg)
|
hash, err := toHash(cfg)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil)
|
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false)
|
||||||
|
|
||||||
conf := &config.Config{
|
conf := &config.Config{
|
||||||
GlobalConfig: config.DefaultGlobalConfig,
|
GlobalConfig: config.DefaultGlobalConfig,
|
||||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
RemoteWriteConfigs: []*config.RemoteWriteConfig{cfg},
|
||||||
cfg,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
require.NoError(t, s.ApplyConfig(conf))
|
require.NoError(t, s.ApplyConfig(conf))
|
||||||
require.Equal(t, s.queues[hash].client().Name(), cfg.Name)
|
require.Equal(t, s.queues[hash].client().Name(), cfg.Name)
|
||||||
|
@ -157,14 +147,13 @@ func TestRestartOnNameChange(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, s.queues[hash].client().Name(), conf.RemoteWriteConfigs[0].Name)
|
require.Equal(t, s.queues[hash].client().Name(), conf.RemoteWriteConfigs[0].Name)
|
||||||
|
|
||||||
err = s.Close()
|
require.NoError(t, s.Close())
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateWithRegisterer(t *testing.T) {
|
func TestWriteStorageApplyConfig_UpdateWithRegisterer(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil)
|
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil, false)
|
||||||
c1 := &config.RemoteWriteConfig{
|
c1 := &config.RemoteWriteConfig{
|
||||||
Name: "named",
|
Name: "named",
|
||||||
URL: &common_config.URL{
|
URL: &common_config.URL{
|
||||||
|
@ -173,7 +162,8 @@ func TestUpdateWithRegisterer(t *testing.T) {
|
||||||
Host: "localhost",
|
Host: "localhost",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
QueueConfig: config.DefaultQueueConfig,
|
QueueConfig: config.DefaultQueueConfig,
|
||||||
|
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||||
}
|
}
|
||||||
c2 := &config.RemoteWriteConfig{
|
c2 := &config.RemoteWriteConfig{
|
||||||
URL: &common_config.URL{
|
URL: &common_config.URL{
|
||||||
|
@ -182,7 +172,8 @@ func TestUpdateWithRegisterer(t *testing.T) {
|
||||||
Host: "localhost",
|
Host: "localhost",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
QueueConfig: config.DefaultQueueConfig,
|
QueueConfig: config.DefaultQueueConfig,
|
||||||
|
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||||
}
|
}
|
||||||
conf := &config.Config{
|
conf := &config.Config{
|
||||||
GlobalConfig: config.DefaultGlobalConfig,
|
GlobalConfig: config.DefaultGlobalConfig,
|
||||||
|
@ -197,14 +188,13 @@ func TestUpdateWithRegisterer(t *testing.T) {
|
||||||
require.Equal(t, 10, queue.cfg.MaxShards)
|
require.Equal(t, 10, queue.cfg.MaxShards)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := s.Close()
|
require.NoError(t, s.Close())
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWriteStorageLifecycle(t *testing.T) {
|
func TestWriteStorageApplyConfig_Lifecycle(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil)
|
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
|
||||||
conf := &config.Config{
|
conf := &config.Config{
|
||||||
GlobalConfig: config.DefaultGlobalConfig,
|
GlobalConfig: config.DefaultGlobalConfig,
|
||||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
||||||
|
@ -214,14 +204,13 @@ func TestWriteStorageLifecycle(t *testing.T) {
|
||||||
require.NoError(t, s.ApplyConfig(conf))
|
require.NoError(t, s.ApplyConfig(conf))
|
||||||
require.Len(t, s.queues, 1)
|
require.Len(t, s.queues, 1)
|
||||||
|
|
||||||
err := s.Close()
|
require.NoError(t, s.Close())
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateExternalLabels(t *testing.T) {
|
func TestWriteStorageApplyConfig_UpdateExternalLabels(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil)
|
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil, false)
|
||||||
|
|
||||||
externalLabels := labels.FromStrings("external", "true")
|
externalLabels := labels.FromStrings("external", "true")
|
||||||
conf := &config.Config{
|
conf := &config.Config{
|
||||||
|
@ -243,15 +232,13 @@ func TestUpdateExternalLabels(t *testing.T) {
|
||||||
require.Len(t, s.queues, 1)
|
require.Len(t, s.queues, 1)
|
||||||
require.Equal(t, []labels.Label{{Name: "external", Value: "true"}}, s.queues[hash].externalLabels)
|
require.Equal(t, []labels.Label{{Name: "external", Value: "true"}}, s.queues[hash].externalLabels)
|
||||||
|
|
||||||
err = s.Close()
|
require.NoError(t, s.Close())
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
|
func TestWriteStorageApplyConfig_Idempotent(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil)
|
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
|
||||||
|
|
||||||
conf := &config.Config{
|
conf := &config.Config{
|
||||||
GlobalConfig: config.GlobalConfig{},
|
GlobalConfig: config.GlobalConfig{},
|
||||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
||||||
|
@ -269,14 +256,13 @@ func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
|
||||||
_, hashExists := s.queues[hash]
|
_, hashExists := s.queues[hash]
|
||||||
require.True(t, hashExists, "Queue pointer should have remained the same")
|
require.True(t, hashExists, "Queue pointer should have remained the same")
|
||||||
|
|
||||||
err = s.Close()
|
require.NoError(t, s.Close())
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
|
func TestWriteStorageApplyConfig_PartialUpdate(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil)
|
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
|
||||||
|
|
||||||
c0 := &config.RemoteWriteConfig{
|
c0 := &config.RemoteWriteConfig{
|
||||||
RemoteTimeout: model.Duration(10 * time.Second),
|
RemoteTimeout: model.Duration(10 * time.Second),
|
||||||
|
@ -286,6 +272,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
|
||||||
Regex: relabel.MustNewRegexp(".+"),
|
Regex: relabel.MustNewRegexp(".+"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||||
}
|
}
|
||||||
c1 := &config.RemoteWriteConfig{
|
c1 := &config.RemoteWriteConfig{
|
||||||
RemoteTimeout: model.Duration(20 * time.Second),
|
RemoteTimeout: model.Duration(20 * time.Second),
|
||||||
|
@ -293,10 +280,12 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
|
||||||
HTTPClientConfig: common_config.HTTPClientConfig{
|
HTTPClientConfig: common_config.HTTPClientConfig{
|
||||||
BearerToken: "foo",
|
BearerToken: "foo",
|
||||||
},
|
},
|
||||||
|
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||||
}
|
}
|
||||||
c2 := &config.RemoteWriteConfig{
|
c2 := &config.RemoteWriteConfig{
|
||||||
RemoteTimeout: model.Duration(30 * time.Second),
|
RemoteTimeout: model.Duration(30 * time.Second),
|
||||||
QueueConfig: config.DefaultQueueConfig,
|
QueueConfig: config.DefaultQueueConfig,
|
||||||
|
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||||
}
|
}
|
||||||
|
|
||||||
conf := &config.Config{
|
conf := &config.Config{
|
||||||
|
@ -376,8 +365,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
|
||||||
_, hashExists = s.queues[hashes[2]]
|
_, hashExists = s.queues[hashes[2]]
|
||||||
require.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
|
require.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
|
||||||
|
|
||||||
err = s.Close()
|
require.NoError(t, s.Close())
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOTLPWriteHandler(t *testing.T) {
|
func TestOTLPWriteHandler(t *testing.T) {
|
||||||
|
|
|
@ -89,7 +89,7 @@ func createTestAgentDB(t testing.TB, reg prometheus.Registerer, opts *Options) *
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
dbDir := t.TempDir()
|
dbDir := t.TempDir()
|
||||||
rs := remote.NewStorage(log.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil)
|
rs := remote.NewStorage(log.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil, false)
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
require.NoError(t, rs.Close())
|
require.NoError(t, rs.Close())
|
||||||
})
|
})
|
||||||
|
@ -585,7 +585,7 @@ func TestLockfile(t *testing.T) {
|
||||||
tsdbutil.TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*tsdbutil.DirLocker, testutil.Closer) {
|
tsdbutil.TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*tsdbutil.DirLocker, testutil.Closer) {
|
||||||
logger := log.NewNopLogger()
|
logger := log.NewNopLogger()
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil)
|
rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil, false)
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
require.NoError(t, rs.Close())
|
require.NoError(t, rs.Close())
|
||||||
})
|
})
|
||||||
|
@ -605,7 +605,7 @@ func TestLockfile(t *testing.T) {
|
||||||
|
|
||||||
func Test_ExistingWAL_NextRef(t *testing.T) {
|
func Test_ExistingWAL_NextRef(t *testing.T) {
|
||||||
dbDir := t.TempDir()
|
dbDir := t.TempDir()
|
||||||
rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil)
|
rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, false)
|
||||||
defer func() {
|
defer func() {
|
||||||
require.NoError(t, rs.Close())
|
require.NoError(t, rs.Close())
|
||||||
}()
|
}()
|
||||||
|
|
|
@ -57,6 +57,7 @@ type WriteTo interface {
|
||||||
AppendHistograms([]record.RefHistogramSample) bool
|
AppendHistograms([]record.RefHistogramSample) bool
|
||||||
AppendFloatHistograms([]record.RefFloatHistogramSample) bool
|
AppendFloatHistograms([]record.RefFloatHistogramSample) bool
|
||||||
StoreSeries([]record.RefSeries, int)
|
StoreSeries([]record.RefSeries, int)
|
||||||
|
StoreMetadata([]record.RefMetadata)
|
||||||
|
|
||||||
// Next two methods are intended for garbage-collection: first we call
|
// Next two methods are intended for garbage-collection: first we call
|
||||||
// UpdateSeriesSegment on all current series
|
// UpdateSeriesSegment on all current series
|
||||||
|
@ -88,6 +89,7 @@ type Watcher struct {
|
||||||
lastCheckpoint string
|
lastCheckpoint string
|
||||||
sendExemplars bool
|
sendExemplars bool
|
||||||
sendHistograms bool
|
sendHistograms bool
|
||||||
|
sendMetadata bool
|
||||||
metrics *WatcherMetrics
|
metrics *WatcherMetrics
|
||||||
readerMetrics *LiveReaderMetrics
|
readerMetrics *LiveReaderMetrics
|
||||||
|
|
||||||
|
@ -170,7 +172,7 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWatcher creates a new WAL watcher for a given WriteTo.
|
// NewWatcher creates a new WAL watcher for a given WriteTo.
|
||||||
func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms bool) *Watcher {
|
func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms, sendMetadata bool) *Watcher {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -183,6 +185,7 @@ func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logge
|
||||||
name: name,
|
name: name,
|
||||||
sendExemplars: sendExemplars,
|
sendExemplars: sendExemplars,
|
||||||
sendHistograms: sendHistograms,
|
sendHistograms: sendHistograms,
|
||||||
|
sendMetadata: sendMetadata,
|
||||||
|
|
||||||
readNotify: make(chan struct{}),
|
readNotify: make(chan struct{}),
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
|
@ -541,6 +544,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
|
||||||
histogramsToSend []record.RefHistogramSample
|
histogramsToSend []record.RefHistogramSample
|
||||||
floatHistograms []record.RefFloatHistogramSample
|
floatHistograms []record.RefFloatHistogramSample
|
||||||
floatHistogramsToSend []record.RefFloatHistogramSample
|
floatHistogramsToSend []record.RefFloatHistogramSample
|
||||||
|
metadata []record.RefMetadata
|
||||||
)
|
)
|
||||||
for r.Next() && !isClosed(w.quit) {
|
for r.Next() && !isClosed(w.quit) {
|
||||||
rec := r.Record()
|
rec := r.Record()
|
||||||
|
@ -652,6 +656,17 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
|
||||||
w.writer.AppendFloatHistograms(floatHistogramsToSend)
|
w.writer.AppendFloatHistograms(floatHistogramsToSend)
|
||||||
floatHistogramsToSend = floatHistogramsToSend[:0]
|
floatHistogramsToSend = floatHistogramsToSend[:0]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case record.Metadata:
|
||||||
|
if !w.sendMetadata || !tail {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
meta, err := dec.Metadata(rec, metadata[:0])
|
||||||
|
if err != nil {
|
||||||
|
w.recordDecodeFailsMetric.Inc()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.writer.StoreMetadata(meta)
|
||||||
case record.Tombstones:
|
case record.Tombstones:
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -92,6 +92,8 @@ func (wtm *writeToMock) StoreSeries(series []record.RefSeries, index int) {
|
||||||
wtm.UpdateSeriesSegment(series, index)
|
wtm.UpdateSeriesSegment(series, index)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (wtm *writeToMock) StoreMetadata(_ []record.RefMetadata) { /* no-op */ }
|
||||||
|
|
||||||
func (wtm *writeToMock) UpdateSeriesSegment(series []record.RefSeries, index int) {
|
func (wtm *writeToMock) UpdateSeriesSegment(series []record.RefSeries, index int) {
|
||||||
wtm.seriesLock.Lock()
|
wtm.seriesLock.Lock()
|
||||||
defer wtm.seriesLock.Unlock()
|
defer wtm.seriesLock.Unlock()
|
||||||
|
@ -219,7 +221,7 @@ func TestTailSamples(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
wt := newWriteToMock(0)
|
wt := newWriteToMock(0)
|
||||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, true, true)
|
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, true, true, true)
|
||||||
watcher.SetStartTime(now)
|
watcher.SetStartTime(now)
|
||||||
|
|
||||||
// Set the Watcher's metrics so they're not nil pointers.
|
// Set the Watcher's metrics so they're not nil pointers.
|
||||||
|
@ -304,7 +306,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
wt := newWriteToMock(0)
|
wt := newWriteToMock(0)
|
||||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
|
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
|
||||||
go watcher.Start()
|
go watcher.Start()
|
||||||
|
|
||||||
expected := seriesCount
|
expected := seriesCount
|
||||||
|
@ -393,7 +395,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
readTimeout = time.Second
|
readTimeout = time.Second
|
||||||
wt := newWriteToMock(0)
|
wt := newWriteToMock(0)
|
||||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
|
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
|
||||||
go watcher.Start()
|
go watcher.Start()
|
||||||
|
|
||||||
expected := seriesCount * 2
|
expected := seriesCount * 2
|
||||||
|
@ -464,7 +466,7 @@ func TestReadCheckpoint(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
wt := newWriteToMock(0)
|
wt := newWriteToMock(0)
|
||||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
|
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
|
||||||
go watcher.Start()
|
go watcher.Start()
|
||||||
|
|
||||||
expectedSeries := seriesCount
|
expectedSeries := seriesCount
|
||||||
|
@ -533,7 +535,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
wt := newWriteToMock(0)
|
wt := newWriteToMock(0)
|
||||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
|
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
|
||||||
watcher.MaxSegment = -1
|
watcher.MaxSegment = -1
|
||||||
|
|
||||||
// Set the Watcher's metrics so they're not nil pointers.
|
// Set the Watcher's metrics so they're not nil pointers.
|
||||||
|
@ -606,7 +608,7 @@ func TestCheckpointSeriesReset(t *testing.T) {
|
||||||
|
|
||||||
readTimeout = time.Second
|
readTimeout = time.Second
|
||||||
wt := newWriteToMock(0)
|
wt := newWriteToMock(0)
|
||||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
|
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
|
||||||
watcher.MaxSegment = -1
|
watcher.MaxSegment = -1
|
||||||
go watcher.Start()
|
go watcher.Start()
|
||||||
|
|
||||||
|
@ -685,7 +687,7 @@ func TestRun_StartupTime(t *testing.T) {
|
||||||
require.NoError(t, w.Close())
|
require.NoError(t, w.Close())
|
||||||
|
|
||||||
wt := newWriteToMock(0)
|
wt := newWriteToMock(0)
|
||||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
|
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
|
||||||
watcher.MaxSegment = segments
|
watcher.MaxSegment = segments
|
||||||
|
|
||||||
watcher.setMetrics()
|
watcher.setMetrics()
|
||||||
|
@ -774,7 +776,7 @@ func TestRun_AvoidNotifyWhenBehind(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
wt := newWriteToMock(time.Millisecond)
|
wt := newWriteToMock(time.Millisecond)
|
||||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
|
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
|
||||||
watcher.MaxSegment = segments
|
watcher.MaxSegment = segments
|
||||||
|
|
||||||
watcher.setMetrics()
|
watcher.setMetrics()
|
||||||
|
|
|
@ -248,6 +248,7 @@ func NewAPI(
|
||||||
registerer prometheus.Registerer,
|
registerer prometheus.Registerer,
|
||||||
statsRenderer StatsRenderer,
|
statsRenderer StatsRenderer,
|
||||||
rwEnabled bool,
|
rwEnabled bool,
|
||||||
|
acceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg,
|
||||||
otlpEnabled bool,
|
otlpEnabled bool,
|
||||||
) *API {
|
) *API {
|
||||||
a := &API{
|
a := &API{
|
||||||
|
@ -290,7 +291,7 @@ func NewAPI(
|
||||||
}
|
}
|
||||||
|
|
||||||
if rwEnabled {
|
if rwEnabled {
|
||||||
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap)
|
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs)
|
||||||
}
|
}
|
||||||
if otlpEnabled {
|
if otlpEnabled {
|
||||||
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap)
|
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap)
|
||||||
|
|
|
@ -455,7 +455,7 @@ func TestEndpoints(t *testing.T) {
|
||||||
|
|
||||||
remote := remote.NewStorage(promlog.New(&promlogConfig), prometheus.DefaultRegisterer, func() (int64, error) {
|
remote := remote.NewStorage(promlog.New(&promlogConfig), prometheus.DefaultRegisterer, func() (int64, error) {
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}, dbDir, 1*time.Second, nil)
|
}, dbDir, 1*time.Second, nil, false)
|
||||||
|
|
||||||
err = remote.ApplyConfig(&config.Config{
|
err = remote.ApplyConfig(&config.Config{
|
||||||
RemoteReadConfigs: []*config.RemoteReadConfig{
|
RemoteReadConfigs: []*config.RemoteReadConfig{
|
||||||
|
|
|
@ -135,6 +135,7 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable) *route.Router {
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
false,
|
false,
|
||||||
|
config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2},
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -265,6 +265,8 @@ type Options struct {
|
||||||
IsAgent bool
|
IsAgent bool
|
||||||
AppName string
|
AppName string
|
||||||
|
|
||||||
|
AcceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg
|
||||||
|
|
||||||
Gatherer prometheus.Gatherer
|
Gatherer prometheus.Gatherer
|
||||||
Registerer prometheus.Registerer
|
Registerer prometheus.Registerer
|
||||||
}
|
}
|
||||||
|
@ -353,6 +355,7 @@ func New(logger log.Logger, o *Options) *Handler {
|
||||||
o.Registerer,
|
o.Registerer,
|
||||||
nil,
|
nil,
|
||||||
o.EnableRemoteWriteReceiver,
|
o.EnableRemoteWriteReceiver,
|
||||||
|
o.AcceptRemoteWriteProtoMsgs,
|
||||||
o.EnableOTLPWriteReceiver,
|
o.EnableOTLPWriteReceiver,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue