mirror of
https://github.com/prometheus/prometheus.git
synced 2025-01-26 05:01:23 -08:00
Merge branch 'sparsehistogram' into beorn7/sparsehistogram
This commit is contained in:
commit
3ce988b031
2
.github/workflows/buf-lint.yml
vendored
2
.github/workflows/buf-lint.yml
vendored
|
@ -10,7 +10,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.5.0
|
||||
- uses: bufbuild/buf-setup-action@v1.6.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'prompb'
|
||||
|
|
2
.github/workflows/buf.yml
vendored
2
.github/workflows/buf.yml
vendored
|
@ -9,7 +9,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.5.0
|
||||
- uses: bufbuild/buf-setup-action@v1.6.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'prompb'
|
||||
|
|
44
.github/workflows/ui_build_and_release.yml
vendored
Normal file
44
.github/workflows/ui_build_and_release.yml
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
name: ui_build_and_release
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- "v0.[0-9]+.[0-9]+*"
|
||||
jobs:
|
||||
release:
|
||||
name: release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Install nodejs
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version-file: "web/ui/.nvmrc"
|
||||
- uses: actions/cache@v3.0.4
|
||||
with:
|
||||
path: ~/.npm
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Check libraries version
|
||||
## This step is verifying that the version of each package is matching the tag
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.ref_name, 'v') }}
|
||||
run: ./scripts/ui_release.sh --check-package "${{ github.ref_name }}"
|
||||
- name: build
|
||||
run: make assets
|
||||
- name: Copy files before publishing libs
|
||||
run: ./scripts/ui_release.sh --copy
|
||||
- name: Publish dry-run libraries
|
||||
if: ${{ github.event_name == 'pull_request' || github.ref_name == 'main' }}
|
||||
run: ./scripts/ui_release.sh --publish dry-run
|
||||
- name: Publish libraries
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.ref_name, 'v') }}
|
||||
run: ./scripts/ui_release.sh --publish
|
||||
env:
|
||||
# The setup-node action writes an .npmrc file with this env variable
|
||||
# as the placeholder for the auth token
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
|
@ -35,5 +35,4 @@ crossbuild:
|
|||
- illumos
|
||||
- linux
|
||||
- netbsd
|
||||
- openbsd
|
||||
- windows
|
||||
|
|
26
CHANGELOG.md
26
CHANGELOG.md
|
@ -1,8 +1,32 @@
|
|||
# Changelog
|
||||
|
||||
## 2.37.0-rc.0 / 2022-07-05
|
||||
|
||||
Following data loss by users due to lack of unified buffer cache in OpenBSD, we
|
||||
will no longer release Prometheus upstream for OpenBSD until a proper solution is
|
||||
found. #8799
|
||||
|
||||
* [FEATURE] Nomad SD: New service discovery for Nomad built-in service discovery. #10915
|
||||
* [ENHANCEMENT] Kubernetes SD: Allow attaching node labels for endpoint role. #10759
|
||||
* [ENHANCEMENT] PromQL: Optimise creation of signature with/without labels. #10667
|
||||
* [ENHANCEMENT] TSDB: Memory optimizations. #10873 #10874
|
||||
* [ENHANCEMENT] TSDB: Reduce sleep time when reading WAL. #10859 #10878
|
||||
* [BUGFIX] Alerting: Fix Alertmanager targets not being updated when alerts were queued. #10948
|
||||
* [BUGFIX] Hetzner SD: Make authentication files relative to Prometheus config file. #10813
|
||||
* [BUGFIX] Promtool: Fix `promtool check config` not erroring properly on failures. #10952
|
||||
* [BUGFIX] Scrape: Keep relabeled scrape interval and timeout on reloads. #10916
|
||||
* [BUGFIX] TSDB: Don't increment `prometheus_tsdb_compactions_failed_total` when context is canceled. #10772
|
||||
* [BUGFIX] TSDB: Fix panic if series is not found when deleting series. #10907
|
||||
* [BUGFIX] TSDB: Increase `prometheus_tsdb_mmap_chunk_corruptions_total` on out of sequence errors. #10406
|
||||
* [BUGFIX] Uyuni SD: Make authentication files relative to Prometheus configuration file and fix default configuration values. #10813
|
||||
|
||||
## 2.36.2 / 2022-06-20
|
||||
|
||||
* [BUGFIX] Fix serving of static assets like fonts and favicon. #10888
|
||||
|
||||
## 2.36.1 / 2022-06-09
|
||||
|
||||
* [BUGFIX] promtool: Add --lint-fatal option #10840
|
||||
* [BUGFIX] promtool: Add --lint-fatal option. #10840
|
||||
|
||||
## 2.36.0 / 2022-05-30
|
||||
|
||||
|
|
6
Makefile
6
Makefile
|
@ -39,6 +39,12 @@ upgrade-npm-deps:
|
|||
@echo ">> upgrading npm dependencies"
|
||||
./scripts/npm-deps.sh "latest"
|
||||
|
||||
.PHONY: ui-bump-version
|
||||
ui-bump-version:
|
||||
version=$$(sed s/2/0/ < VERSION) && ./scripts/ui_release.sh --bump-version "$${version}"
|
||||
cd web/ui && npm install
|
||||
git add "./web/ui/package-lock.json" "./**/package.json"
|
||||
|
||||
.PHONY: ui-install
|
||||
ui-install:
|
||||
cd $(UI_PATH) && npm install
|
||||
|
|
|
@ -144,6 +144,12 @@ Entries in the `CHANGELOG.md` are meant to be in this order:
|
|||
* `[ENHANCEMENT]`
|
||||
* `[BUGFIX]`
|
||||
|
||||
Then bump the UI module version:
|
||||
|
||||
```bash
|
||||
make ui-bump-version
|
||||
```
|
||||
|
||||
### 2. Draft the new release
|
||||
|
||||
Tag the new release via the following commands:
|
||||
|
|
|
@ -54,7 +54,7 @@ func TestStartupInterrupt(t *testing.T) {
|
|||
|
||||
Loop:
|
||||
for x := 0; x < 10; x++ {
|
||||
// error=nil means prometheus has started so we can send the interrupt
|
||||
// error=nil means prometheus has started, so we can send the interrupt
|
||||
// signal and wait for the graceful shutdown.
|
||||
if _, err := http.Get(url); err == nil {
|
||||
startedOk = true
|
||||
|
|
|
@ -344,6 +344,7 @@ func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files
|
|||
ruleFiles, err := checkConfig(agentMode, f, checkSyntaxOnly)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||
hasErrors = true
|
||||
failed = true
|
||||
} else {
|
||||
if len(ruleFiles) > 0 {
|
||||
|
|
|
@ -14,13 +14,16 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -30,6 +33,21 @@ import (
|
|||
"github.com/prometheus/prometheus/model/rulefmt"
|
||||
)
|
||||
|
||||
var promtoolPath = os.Args[0]
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
for i, arg := range os.Args {
|
||||
if arg == "-test.main" {
|
||||
os.Args = append(os.Args[:i], os.Args[i+1:]...)
|
||||
main()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
exitCode := m.Run()
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
func TestQueryRange(t *testing.T) {
|
||||
s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "matrix", "result": []}}`)
|
||||
defer s.Close()
|
||||
|
@ -359,3 +377,59 @@ func TestCheckMetricsExtended(t *testing.T) {
|
|||
},
|
||||
}, stats)
|
||||
}
|
||||
|
||||
func TestExitCodes(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
for _, c := range []struct {
|
||||
file string
|
||||
exitCode int
|
||||
lintIssue bool
|
||||
}{
|
||||
{
|
||||
file: "prometheus-config.good.yml",
|
||||
},
|
||||
{
|
||||
file: "prometheus-config.bad.yml",
|
||||
exitCode: 1,
|
||||
},
|
||||
{
|
||||
file: "prometheus-config.nonexistent.yml",
|
||||
exitCode: 1,
|
||||
},
|
||||
{
|
||||
file: "prometheus-config.lint.yml",
|
||||
lintIssue: true,
|
||||
exitCode: 3,
|
||||
},
|
||||
} {
|
||||
t.Run(c.file, func(t *testing.T) {
|
||||
for _, lintFatal := range []bool{true, false} {
|
||||
t.Run(fmt.Sprintf("%t", lintFatal), func(t *testing.T) {
|
||||
args := []string{"-test.main", "check", "config", "testdata/" + c.file}
|
||||
if lintFatal {
|
||||
args = append(args, "--lint-fatal")
|
||||
}
|
||||
tool := exec.Command(promtoolPath, args...)
|
||||
err := tool.Run()
|
||||
if c.exitCode == 0 || (c.lintIssue && !lintFatal) {
|
||||
require.NoError(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.Error(t, err)
|
||||
|
||||
var exitError *exec.ExitError
|
||||
if errors.As(err, &exitError) {
|
||||
status := exitError.Sys().(syscall.WaitStatus)
|
||||
require.Equal(t, c.exitCode, status.ExitStatus())
|
||||
} else {
|
||||
t.Errorf("unable to retrieve the exit status for promtool: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
1
cmd/promtool/testdata/prometheus-config.bad.yml
vendored
Normal file
1
cmd/promtool/testdata/prometheus-config.bad.yml
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
not-prometheus:
|
0
cmd/promtool/testdata/prometheus-config.good.yml
vendored
Normal file
0
cmd/promtool/testdata/prometheus-config.good.yml
vendored
Normal file
2
cmd/promtool/testdata/prometheus-config.lint.yml
vendored
Normal file
2
cmd/promtool/testdata/prometheus-config.lint.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
rule_files:
|
||||
- prometheus-rules.lint.yml
|
17
cmd/promtool/testdata/prometheus-rules.lint.yml
vendored
Normal file
17
cmd/promtool/testdata/prometheus-rules.lint.yml
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
groups:
|
||||
- name: example
|
||||
rules:
|
||||
- alert: HighRequestLatency
|
||||
expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5
|
||||
for: 10m
|
||||
labels:
|
||||
severity: page
|
||||
annotations:
|
||||
summary: High request latency
|
||||
- alert: HighRequestLatency
|
||||
expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5
|
||||
for: 10m
|
||||
labels:
|
||||
severity: page
|
||||
annotations:
|
||||
summary: High request latency
|
|
@ -45,6 +45,7 @@ import (
|
|||
"github.com/prometheus/prometheus/discovery/linode"
|
||||
"github.com/prometheus/prometheus/discovery/marathon"
|
||||
"github.com/prometheus/prometheus/discovery/moby"
|
||||
"github.com/prometheus/prometheus/discovery/nomad"
|
||||
"github.com/prometheus/prometheus/discovery/openstack"
|
||||
"github.com/prometheus/prometheus/discovery/puppetdb"
|
||||
"github.com/prometheus/prometheus/discovery/scaleway"
|
||||
|
@ -513,6 +514,32 @@ var expectedConf = &Config{
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
JobName: "service-nomad",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
|
||||
ServiceDiscoveryConfigs: discovery.Configs{
|
||||
&nomad.SDConfig{
|
||||
AllowStale: true,
|
||||
Namespace: "default",
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
Region: "global",
|
||||
Server: "http://localhost:4646",
|
||||
TagSeparator: ",",
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
FollowRedirects: true,
|
||||
EnableHTTP2: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
JobName: "service-ec2",
|
||||
|
||||
|
|
4
config/testdata/conf.good.yml
vendored
4
config/testdata/conf.good.yml
vendored
|
@ -214,6 +214,10 @@ scrape_configs:
|
|||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
|
||||
- job_name: service-nomad
|
||||
nomad_sd_configs:
|
||||
- server: 'http://localhost:4646'
|
||||
|
||||
- job_name: service-ec2
|
||||
ec2_sd_configs:
|
||||
- region: us-east-1
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
_ "github.com/prometheus/prometheus/discovery/linode" // register linode
|
||||
_ "github.com/prometheus/prometheus/discovery/marathon" // register marathon
|
||||
_ "github.com/prometheus/prometheus/discovery/moby" // register moby
|
||||
_ "github.com/prometheus/prometheus/discovery/nomad" // register nomad
|
||||
_ "github.com/prometheus/prometheus/discovery/openstack" // register openstack
|
||||
_ "github.com/prometheus/prometheus/discovery/puppetdb" // register puppetdb
|
||||
_ "github.com/prometheus/prometheus/discovery/scaleway" // register scaleway
|
||||
|
|
|
@ -41,9 +41,11 @@ var (
|
|||
type Endpoints struct {
|
||||
logger log.Logger
|
||||
|
||||
endpointsInf cache.SharedInformer
|
||||
serviceInf cache.SharedInformer
|
||||
podInf cache.SharedInformer
|
||||
endpointsInf cache.SharedIndexInformer
|
||||
serviceInf cache.SharedInformer
|
||||
podInf cache.SharedInformer
|
||||
nodeInf cache.SharedInformer
|
||||
withNodeMetadata bool
|
||||
|
||||
podStore cache.Store
|
||||
endpointsStore cache.Store
|
||||
|
@ -53,19 +55,21 @@ type Endpoints struct {
|
|||
}
|
||||
|
||||
// NewEndpoints returns a new endpoints discovery.
|
||||
func NewEndpoints(l log.Logger, svc, eps, pod cache.SharedInformer) *Endpoints {
|
||||
func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer) *Endpoints {
|
||||
if l == nil {
|
||||
l = log.NewNopLogger()
|
||||
}
|
||||
e := &Endpoints{
|
||||
logger: l,
|
||||
endpointsInf: eps,
|
||||
endpointsStore: eps.GetStore(),
|
||||
serviceInf: svc,
|
||||
serviceStore: svc.GetStore(),
|
||||
podInf: pod,
|
||||
podStore: pod.GetStore(),
|
||||
queue: workqueue.NewNamed("endpoints"),
|
||||
logger: l,
|
||||
endpointsInf: eps,
|
||||
endpointsStore: eps.GetStore(),
|
||||
serviceInf: svc,
|
||||
serviceStore: svc.GetStore(),
|
||||
podInf: pod,
|
||||
podStore: pod.GetStore(),
|
||||
nodeInf: node,
|
||||
withNodeMetadata: node != nil,
|
||||
queue: workqueue.NewNamed("endpoints"),
|
||||
}
|
||||
|
||||
e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
|
@ -118,10 +122,38 @@ func NewEndpoints(l log.Logger, svc, eps, pod cache.SharedInformer) *Endpoints {
|
|||
serviceUpdate(o)
|
||||
},
|
||||
})
|
||||
if e.withNodeMetadata {
|
||||
e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(o interface{}) {
|
||||
node := o.(*apiv1.Node)
|
||||
e.enqueueNode(node.Name)
|
||||
},
|
||||
UpdateFunc: func(_, o interface{}) {
|
||||
node := o.(*apiv1.Node)
|
||||
e.enqueueNode(node.Name)
|
||||
},
|
||||
DeleteFunc: func(o interface{}) {
|
||||
node := o.(*apiv1.Node)
|
||||
e.enqueueNode(node.Name)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *Endpoints) enqueueNode(nodeName string) {
|
||||
endpoints, err := e.endpointsInf.GetIndexer().ByIndex(nodeIndex, nodeName)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
e.enqueue(endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Endpoints) enqueue(obj interface{}) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
|
@ -135,7 +167,12 @@ func (e *Endpoints) enqueue(obj interface{}) {
|
|||
func (e *Endpoints) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
defer e.queue.ShutDown()
|
||||
|
||||
if !cache.WaitForCacheSync(ctx.Done(), e.endpointsInf.HasSynced, e.serviceInf.HasSynced, e.podInf.HasSynced) {
|
||||
cacheSyncs := []cache.InformerSynced{e.endpointsInf.HasSynced, e.serviceInf.HasSynced, e.podInf.HasSynced}
|
||||
if e.withNodeMetadata {
|
||||
cacheSyncs = append(cacheSyncs, e.nodeInf.HasSynced)
|
||||
}
|
||||
|
||||
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||
level.Error(e.logger).Log("msg", "endpoints informer unable to sync cache")
|
||||
}
|
||||
|
@ -257,6 +294,10 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
|||
target[model.LabelName(endpointHostname)] = lv(addr.Hostname)
|
||||
}
|
||||
|
||||
if e.withNodeMetadata {
|
||||
target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName)
|
||||
}
|
||||
|
||||
pod := e.resolvePodRef(addr.TargetRef)
|
||||
if pod == nil {
|
||||
// This target is not a Pod, so don't continue with Pod specific logic.
|
||||
|
@ -387,3 +428,31 @@ func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) {
|
|||
|
||||
tg.Labels = tg.Labels.Merge(serviceLabels(svc))
|
||||
}
|
||||
|
||||
func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.Logger, nodeName *string) model.LabelSet {
|
||||
if nodeName == nil {
|
||||
return tg
|
||||
}
|
||||
|
||||
obj, exists, err := nodeInf.GetStore().GetByKey(*nodeName)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Error getting node", "node", *nodeName, "err", err)
|
||||
return tg
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return tg
|
||||
}
|
||||
|
||||
node := obj.(*apiv1.Node)
|
||||
// Allocate one target label for the node name,
|
||||
// and two target labels for each node label.
|
||||
nodeLabelset := make(model.LabelSet, 1+2*len(node.GetLabels()))
|
||||
nodeLabelset[nodeNameLabel] = lv(*nodeName)
|
||||
for k, v := range node.GetLabels() {
|
||||
ln := strutil.SanitizeLabelName(k)
|
||||
nodeLabelset[model.LabelName(nodeLabelPrefix+ln)] = lv(v)
|
||||
nodeLabelset[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue
|
||||
}
|
||||
return tg.Merge(nodeLabelset)
|
||||
}
|
||||
|
|
|
@ -478,6 +478,126 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) {
|
|||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
|
||||
metadataConfig := AttachMetadataConfig{Node: true}
|
||||
nodeLabels := map[string]string{"az": "us-east1"}
|
||||
node := makeNode("foobar", "", "", nodeLabels, nil)
|
||||
svc := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testendpoints",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"app/name": "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
n, _ := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), svc, node)
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
expectedMaxItems: 1,
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
"endpoints/default/testendpoints": {
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": "1.2.3.4:9000",
|
||||
"__meta_kubernetes_endpoint_hostname": "testendpoint1",
|
||||
"__meta_kubernetes_endpoint_node_name": "foobar",
|
||||
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "true",
|
||||
"__meta_kubernetes_node_label_az": "us-east1",
|
||||
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||
"__meta_kubernetes_node_name": "foobar",
|
||||
},
|
||||
{
|
||||
"__address__": "2.3.4.5:9001",
|
||||
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "true",
|
||||
},
|
||||
{
|
||||
"__address__": "2.3.4.5:9001",
|
||||
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "false",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_endpoints_name": "testendpoints",
|
||||
"__meta_kubernetes_service_label_app_name": "test",
|
||||
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
||||
"__meta_kubernetes_service_name": "testendpoints",
|
||||
},
|
||||
Source: "endpoints/default/testendpoints",
|
||||
},
|
||||
},
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
||||
nodeLabels := map[string]string{"az": "us-east1"}
|
||||
nodes := makeNode("foobar", "", "", nodeLabels, nil)
|
||||
metadataConfig := AttachMetadataConfig{Node: true}
|
||||
svc := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testendpoints",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"app/name": "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
n, c := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), nodes, svc)
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
afterStart: func() {
|
||||
nodes.Labels["az"] = "eu-central1"
|
||||
c.CoreV1().Nodes().Update(context.Background(), nodes, metav1.UpdateOptions{})
|
||||
},
|
||||
expectedMaxItems: 2,
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
"endpoints/default/testendpoints": {
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": "1.2.3.4:9000",
|
||||
"__meta_kubernetes_endpoint_hostname": "testendpoint1",
|
||||
"__meta_kubernetes_endpoint_node_name": "foobar",
|
||||
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "true",
|
||||
"__meta_kubernetes_node_label_az": "eu-central1",
|
||||
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||
"__meta_kubernetes_node_name": "foobar",
|
||||
},
|
||||
{
|
||||
"__address__": "2.3.4.5:9001",
|
||||
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "true",
|
||||
},
|
||||
{
|
||||
"__address__": "2.3.4.5:9001",
|
||||
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||
"__meta_kubernetes_endpoint_ready": "false",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_endpoints_name": "testendpoints",
|
||||
"__meta_kubernetes_service_label_app_name": "test",
|
||||
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
||||
"__meta_kubernetes_service_name": "testendpoints",
|
||||
},
|
||||
Source: "endpoints/default/testendpoints",
|
||||
},
|
||||
},
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestEndpointsDiscoveryNamespaces(t *testing.T) {
|
||||
epOne := makeEndpoints()
|
||||
epOne.Namespace = "ns1"
|
||||
|
|
|
@ -42,9 +42,11 @@ var (
|
|||
type EndpointSlice struct {
|
||||
logger log.Logger
|
||||
|
||||
endpointSliceInf cache.SharedInformer
|
||||
endpointSliceInf cache.SharedIndexInformer
|
||||
serviceInf cache.SharedInformer
|
||||
podInf cache.SharedInformer
|
||||
nodeInf cache.SharedInformer
|
||||
withNodeMetadata bool
|
||||
|
||||
podStore cache.Store
|
||||
endpointSliceStore cache.Store
|
||||
|
@ -54,7 +56,7 @@ type EndpointSlice struct {
|
|||
}
|
||||
|
||||
// NewEndpointSlice returns a new endpointslice discovery.
|
||||
func NewEndpointSlice(l log.Logger, svc, eps, pod cache.SharedInformer) *EndpointSlice {
|
||||
func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer) *EndpointSlice {
|
||||
if l == nil {
|
||||
l = log.NewNopLogger()
|
||||
}
|
||||
|
@ -66,6 +68,8 @@ func NewEndpointSlice(l log.Logger, svc, eps, pod cache.SharedInformer) *Endpoin
|
|||
serviceStore: svc.GetStore(),
|
||||
podInf: pod,
|
||||
podStore: pod.GetStore(),
|
||||
nodeInf: node,
|
||||
withNodeMetadata: node != nil,
|
||||
queue: workqueue.NewNamed("endpointSlice"),
|
||||
}
|
||||
|
||||
|
@ -120,9 +124,38 @@ func NewEndpointSlice(l log.Logger, svc, eps, pod cache.SharedInformer) *Endpoin
|
|||
},
|
||||
})
|
||||
|
||||
if e.withNodeMetadata {
|
||||
e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(o interface{}) {
|
||||
node := o.(*apiv1.Node)
|
||||
e.enqueueNode(node.Name)
|
||||
},
|
||||
UpdateFunc: func(_, o interface{}) {
|
||||
node := o.(*apiv1.Node)
|
||||
e.enqueueNode(node.Name)
|
||||
},
|
||||
DeleteFunc: func(o interface{}) {
|
||||
node := o.(*apiv1.Node)
|
||||
e.enqueueNode(node.Name)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *EndpointSlice) enqueueNode(nodeName string) {
|
||||
endpoints, err := e.endpointSliceInf.GetIndexer().ByIndex(nodeIndex, nodeName)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
e.enqueue(endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EndpointSlice) enqueue(obj interface{}) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
|
@ -136,7 +169,11 @@ func (e *EndpointSlice) enqueue(obj interface{}) {
|
|||
func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
defer e.queue.ShutDown()
|
||||
|
||||
if !cache.WaitForCacheSync(ctx.Done(), e.endpointSliceInf.HasSynced, e.serviceInf.HasSynced, e.podInf.HasSynced) {
|
||||
cacheSyncs := []cache.InformerSynced{e.endpointSliceInf.HasSynced, e.serviceInf.HasSynced, e.podInf.HasSynced}
|
||||
if e.withNodeMetadata {
|
||||
cacheSyncs = append(cacheSyncs, e.nodeInf.HasSynced)
|
||||
}
|
||||
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
||||
if ctx.Err() != context.Canceled {
|
||||
level.Error(e.logger).Log("msg", "endpointslice informer unable to sync cache")
|
||||
}
|
||||
|
@ -282,6 +319,10 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
|||
target[model.LabelName(endpointSliceEndpointTopologyLabelPresentPrefix+ln)] = presentValue
|
||||
}
|
||||
|
||||
if e.withNodeMetadata {
|
||||
target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename())
|
||||
}
|
||||
|
||||
pod := e.resolvePodRef(ep.targetRef())
|
||||
if pod == nil {
|
||||
// This target is not a Pod, so don't continue with Pod specific logic.
|
||||
|
|
|
@ -41,6 +41,7 @@ type endpointSlicePortAdaptor interface {
|
|||
type endpointSliceEndpointAdaptor interface {
|
||||
addresses() []string
|
||||
hostname() *string
|
||||
nodename() *string
|
||||
conditions() endpointSliceEndpointConditionsAdaptor
|
||||
targetRef() *corev1.ObjectReference
|
||||
topology() map[string]string
|
||||
|
@ -164,6 +165,10 @@ func (e *endpointSliceEndpointAdaptorV1) hostname() *string {
|
|||
return e.endpoint.Hostname
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1) nodename() *string {
|
||||
return e.endpoint.NodeName
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1) conditions() endpointSliceEndpointConditionsAdaptor {
|
||||
return newEndpointSliceEndpointConditionsAdaptorFromV1(e.endpoint.Conditions)
|
||||
}
|
||||
|
@ -204,6 +209,10 @@ func (e *endpointSliceEndpointAdaptorV1beta1) hostname() *string {
|
|||
return e.endpoint.Hostname
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1beta1) nodename() *string {
|
||||
return e.endpoint.NodeName
|
||||
}
|
||||
|
||||
func (e *endpointSliceEndpointAdaptorV1beta1) conditions() endpointSliceEndpointConditionsAdaptor {
|
||||
return newEndpointSliceEndpointConditionsAdaptorFromV1beta1(e.endpoint.Conditions)
|
||||
}
|
||||
|
|
|
@ -68,6 +68,7 @@ func makeEndpointSliceV1() *v1.EndpointSlice {
|
|||
Conditions: v1.EndpointConditions{Ready: boolptr(true)},
|
||||
Hostname: strptr("testendpoint1"),
|
||||
TargetRef: &corev1.ObjectReference{},
|
||||
NodeName: strptr("foobar"),
|
||||
DeprecatedTopology: map[string]string{
|
||||
"topology": "value",
|
||||
},
|
||||
|
@ -688,6 +689,147 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
|
|||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
||||
metadataConfig := AttachMetadataConfig{Node: true}
|
||||
nodeLabels := map[string]string{"az": "us-east1"}
|
||||
svc := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testendpoints",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"app/name": "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
objs := []runtime.Object{makeEndpointSliceV1(), makeNode("foobar", "", "", nodeLabels, nil), svc}
|
||||
n, _ := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...)
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
expectedMaxItems: 1,
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
"endpointslice/default/testendpoints": {
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": "1.2.3.4:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
"__meta_kubernetes_node_label_az": "us-east1",
|
||||
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||
"__meta_kubernetes_node_name": "foobar",
|
||||
},
|
||||
{
|
||||
"__address__": "2.3.4.5:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "3.4.5.6:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_service_label_app_name": "test",
|
||||
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
||||
"__meta_kubernetes_service_name": "testendpoints",
|
||||
},
|
||||
Source: "endpointslice/default/testendpoints",
|
||||
},
|
||||
},
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
||||
metadataConfig := AttachMetadataConfig{Node: true}
|
||||
nodeLabels := map[string]string{"az": "us-east1"}
|
||||
svc := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testendpoints",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"app/name": "test",
|
||||
},
|
||||
},
|
||||
}
|
||||
node := makeNode("foobar", "", "", nodeLabels, nil)
|
||||
objs := []runtime.Object{makeEndpointSliceV1(), node, svc}
|
||||
n, c := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...)
|
||||
|
||||
k8sDiscoveryTest{
|
||||
discovery: n,
|
||||
expectedMaxItems: 2,
|
||||
afterStart: func() {
|
||||
node.Labels["az"] = "us-central1"
|
||||
c.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{})
|
||||
},
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
"endpointslice/default/testendpoints": {
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": "1.2.3.4:9000",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
"__meta_kubernetes_node_label_az": "us-central1",
|
||||
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||
"__meta_kubernetes_node_name": "foobar",
|
||||
},
|
||||
{
|
||||
"__address__": "2.3.4.5:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
{
|
||||
"__address__": "3.4.5.6:9000",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||
"__meta_kubernetes_endpointslice_port": "9000",
|
||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_service_label_app_name": "test",
|
||||
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
||||
"__meta_kubernetes_service_name": "testendpoints",
|
||||
},
|
||||
Source: "endpointslice/default/testendpoints",
|
||||
},
|
||||
},
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
|
||||
epOne := makeEndpointSliceV1()
|
||||
epOne.Namespace = "ns1"
|
||||
|
|
|
@ -23,6 +23,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
disv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -31,7 +33,6 @@ import (
|
|||
"github.com/prometheus/common/version"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
disv1 "k8s.io/api/discovery/v1"
|
||||
disv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
networkv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/api/networking/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -406,7 +407,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
var informer cache.SharedInformer
|
||||
var informer cache.SharedIndexInformer
|
||||
if v1Supported {
|
||||
e := d.client.DiscoveryV1().EndpointSlices(namespace)
|
||||
elw := &cache.ListWatch{
|
||||
|
@ -421,7 +422,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
return e.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
informer = cache.NewSharedInformer(elw, &disv1.EndpointSlice{}, resyncPeriod)
|
||||
informer = d.newEndpointSlicesByNodeInformer(elw, &disv1.EndpointSlice{})
|
||||
} else {
|
||||
e := d.client.DiscoveryV1beta1().EndpointSlices(namespace)
|
||||
elw := &cache.ListWatch{
|
||||
|
@ -436,7 +437,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
return e.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
informer = cache.NewSharedInformer(elw, &disv1beta1.EndpointSlice{}, resyncPeriod)
|
||||
informer = d.newEndpointSlicesByNodeInformer(elw, &disv1beta1.EndpointSlice{})
|
||||
}
|
||||
|
||||
s := d.client.CoreV1().Services(namespace)
|
||||
|
@ -465,11 +466,17 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
return p.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
var nodeInf cache.SharedInformer
|
||||
if d.attachMetadata.Node {
|
||||
nodeInf = d.newNodeInformer(context.Background())
|
||||
go nodeInf.Run(ctx.Done())
|
||||
}
|
||||
eps := NewEndpointSlice(
|
||||
log.With(d.logger, "role", "endpointslice"),
|
||||
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod),
|
||||
informer,
|
||||
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod),
|
||||
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod),
|
||||
nodeInf,
|
||||
)
|
||||
d.discoverers = append(d.discoverers, eps)
|
||||
go eps.endpointSliceInf.Run(ctx.Done())
|
||||
|
@ -517,11 +524,18 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
return p.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
var nodeInf cache.SharedInformer
|
||||
if d.attachMetadata.Node {
|
||||
nodeInf = d.newNodeInformer(ctx)
|
||||
go nodeInf.Run(ctx.Done())
|
||||
}
|
||||
|
||||
eps := NewEndpoints(
|
||||
log.With(d.logger, "role", "endpoint"),
|
||||
d.newEndpointsByNodeInformer(elw),
|
||||
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod),
|
||||
cache.NewSharedInformer(elw, &apiv1.Endpoints{}, resyncPeriod),
|
||||
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod),
|
||||
nodeInf,
|
||||
)
|
||||
d.discoverers = append(d.discoverers, eps)
|
||||
go eps.endpointsInf.Run(ctx.Done())
|
||||
|
@ -735,6 +749,65 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde
|
|||
return cache.NewSharedIndexInformer(plw, &apiv1.Pod{}, resyncPeriod, indexers)
|
||||
}
|
||||
|
||||
func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
|
||||
indexers := make(map[string]cache.IndexFunc)
|
||||
if !d.attachMetadata.Node {
|
||||
return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncPeriod, indexers)
|
||||
}
|
||||
|
||||
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
|
||||
e, ok := obj.(*apiv1.Endpoints)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("object is not a pod")
|
||||
}
|
||||
var nodes []string
|
||||
for _, target := range e.Subsets {
|
||||
for _, addr := range target.Addresses {
|
||||
if addr.NodeName == nil {
|
||||
continue
|
||||
}
|
||||
nodes = append(nodes, *addr.NodeName)
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncPeriod, indexers)
|
||||
}
|
||||
|
||||
func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer {
|
||||
indexers := make(map[string]cache.IndexFunc)
|
||||
if !d.attachMetadata.Node {
|
||||
cache.NewSharedIndexInformer(plw, &disv1.EndpointSlice{}, resyncPeriod, indexers)
|
||||
}
|
||||
|
||||
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
|
||||
var nodes []string
|
||||
switch e := obj.(type) {
|
||||
case *disv1.EndpointSlice:
|
||||
for _, target := range e.Endpoints {
|
||||
if target.NodeName == nil {
|
||||
continue
|
||||
}
|
||||
nodes = append(nodes, *target.NodeName)
|
||||
}
|
||||
case *disv1beta1.EndpointSlice:
|
||||
for _, target := range e.Endpoints {
|
||||
if target.NodeName == nil {
|
||||
continue
|
||||
}
|
||||
nodes = append(nodes, *target.NodeName)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("object is not an endpointslice")
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
return cache.NewSharedIndexInformer(plw, object, resyncPeriod, indexers)
|
||||
}
|
||||
|
||||
func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) {
|
||||
k8sVer, err := client.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
|
|
|
@ -253,7 +253,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group {
|
|||
tg.Labels = podLabels(pod)
|
||||
tg.Labels[namespaceLabel] = lv(pod.Namespace)
|
||||
if p.withNodeMetadata {
|
||||
p.attachNodeMetadata(tg, pod)
|
||||
tg.Labels = addNodeLabels(tg.Labels, p.nodeInf, p.logger, &pod.Spec.NodeName)
|
||||
}
|
||||
|
||||
containers := append(pod.Spec.Containers, pod.Spec.InitContainers...)
|
||||
|
@ -291,27 +291,6 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group {
|
|||
return tg
|
||||
}
|
||||
|
||||
func (p *Pod) attachNodeMetadata(tg *targetgroup.Group, pod *apiv1.Pod) {
|
||||
tg.Labels[nodeNameLabel] = lv(pod.Spec.NodeName)
|
||||
|
||||
obj, exists, err := p.nodeInf.GetStore().GetByKey(pod.Spec.NodeName)
|
||||
if err != nil {
|
||||
level.Error(p.logger).Log("msg", "Error getting node", "node", pod.Spec.NodeName, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
|
||||
node := obj.(*apiv1.Node)
|
||||
for k, v := range node.GetLabels() {
|
||||
ln := strutil.SanitizeLabelName(k)
|
||||
tg.Labels[model.LabelName(nodeLabelPrefix+ln)] = lv(v)
|
||||
tg.Labels[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pod) enqueuePodsForNode(nodeName string) {
|
||||
pods, err := p.podInf.GetIndexer().ByIndex(nodeIndex, nodeName)
|
||||
if err != nil {
|
||||
|
|
210
discovery/nomad/nomad.go
Normal file
210
discovery/nomad/nomad.go
Normal file
|
@ -0,0 +1,210 @@
|
|||
// Copyright 2022 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package nomad
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
nomad "github.com/hashicorp/nomad/api"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
// nomadLabel is the name for the label containing a target.
|
||||
nomadLabel = model.MetaLabelPrefix + "nomad_"
|
||||
// serviceLabel is the name of the label containing the service name.
|
||||
nomadAddress = nomadLabel + "address"
|
||||
nomadService = nomadLabel + "service"
|
||||
nomadNamespace = nomadLabel + "namespace"
|
||||
nomadNodeID = nomadLabel + "node_id"
|
||||
nomadDatacenter = nomadLabel + "dc"
|
||||
nomadServiceAddress = nomadService + "_address"
|
||||
nomadServicePort = nomadService + "_port"
|
||||
nomadServiceID = nomadService + "_id"
|
||||
nomadTags = nomadLabel + "tags"
|
||||
)
|
||||
|
||||
// DefaultSDConfig is the default nomad SD configuration.
|
||||
var (
|
||||
DefaultSDConfig = SDConfig{
|
||||
AllowStale: true,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
Namespace: "default",
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
Region: "global",
|
||||
Server: "http://localhost:4646",
|
||||
TagSeparator: ",",
|
||||
}
|
||||
|
||||
failuresCount = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_sd_nomad_failures_total",
|
||||
Help: "Number of nomad service discovery refresh failures.",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&SDConfig{})
|
||||
prometheus.MustRegister(failuresCount)
|
||||
}
|
||||
|
||||
// SDConfig is the configuration for nomad based service discovery.
|
||||
type SDConfig struct {
|
||||
AllowStale bool `yaml:"allow_stale"`
|
||||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
Namespace string `yaml:"namespace"`
|
||||
RefreshInterval model.Duration `yaml:"refresh_interval"`
|
||||
Region string `yaml:"region"`
|
||||
Server string `yaml:"server"`
|
||||
TagSeparator string `yaml:"tag_separator,omitempty"`
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "nomad" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *SDConfig) SetDirectory(dir string) {
|
||||
c.HTTPClientConfig.SetDirectory(dir)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*c = DefaultSDConfig
|
||||
type plain SDConfig
|
||||
err := unmarshal((*plain)(c))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.TrimSpace(c.Server) == "" {
|
||||
return errors.New("nomad SD configuration requires a server address")
|
||||
}
|
||||
return c.HTTPClientConfig.Validate()
|
||||
}
|
||||
|
||||
// Discovery periodically performs nomad requests. It implements
|
||||
// the Discoverer interface.
|
||||
type Discovery struct {
|
||||
*refresh.Discovery
|
||||
allowStale bool
|
||||
client *nomad.Client
|
||||
namespace string
|
||||
refreshInterval time.Duration
|
||||
region string
|
||||
server string
|
||||
tagSeparator string
|
||||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
||||
d := &Discovery{
|
||||
allowStale: conf.AllowStale,
|
||||
namespace: conf.Namespace,
|
||||
refreshInterval: time.Duration(conf.RefreshInterval),
|
||||
region: conf.Region,
|
||||
server: conf.Server,
|
||||
tagSeparator: conf.TagSeparator,
|
||||
}
|
||||
|
||||
HTTPClient, err := config.NewClientFromConfig(conf.HTTPClientConfig, "nomad_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config := nomad.Config{
|
||||
Address: conf.Server,
|
||||
HttpClient: HTTPClient,
|
||||
Namespace: conf.Namespace,
|
||||
Region: conf.Region,
|
||||
}
|
||||
|
||||
client, err := nomad.NewClient(&config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.client = client
|
||||
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
logger,
|
||||
"nomad",
|
||||
time.Duration(conf.RefreshInterval),
|
||||
d.refresh,
|
||||
)
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||
opts := &nomad.QueryOptions{
|
||||
AllowStale: d.allowStale,
|
||||
}
|
||||
stubs, _, err := d.client.Services().List(opts)
|
||||
if err != nil {
|
||||
failuresCount.Inc()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tg := &targetgroup.Group{
|
||||
Source: "Nomad",
|
||||
}
|
||||
|
||||
for _, stub := range stubs {
|
||||
for _, service := range stub.Services {
|
||||
instances, _, err := d.client.Services().Get(service.ServiceName, opts)
|
||||
if err != nil {
|
||||
failuresCount.Inc()
|
||||
return nil, fmt.Errorf("failed to fetch services: %w", err)
|
||||
}
|
||||
|
||||
for _, instance := range instances {
|
||||
labels := model.LabelSet{
|
||||
nomadAddress: model.LabelValue(instance.Address),
|
||||
nomadDatacenter: model.LabelValue(instance.Datacenter),
|
||||
nomadNodeID: model.LabelValue(instance.NodeID),
|
||||
nomadNamespace: model.LabelValue(instance.Namespace),
|
||||
nomadServiceAddress: model.LabelValue(instance.Address),
|
||||
nomadServiceID: model.LabelValue(instance.ID),
|
||||
nomadServicePort: model.LabelValue(strconv.Itoa(instance.Port)),
|
||||
nomadService: model.LabelValue(instance.ServiceName),
|
||||
}
|
||||
addr := net.JoinHostPort(instance.Address, strconv.FormatInt(int64(instance.Port), 10))
|
||||
labels[model.AddressLabel] = model.LabelValue(addr)
|
||||
|
||||
if len(instance.Tags) > 0 {
|
||||
tags := d.tagSeparator + strings.Join(instance.Tags, d.tagSeparator) + d.tagSeparator
|
||||
labels[nomadTags] = model.LabelValue(tags)
|
||||
}
|
||||
|
||||
tg.Targets = append(tg.Targets, labels)
|
||||
}
|
||||
}
|
||||
}
|
||||
return []*targetgroup.Group{tg}, nil
|
||||
}
|
170
discovery/nomad/nomad_test.go
Normal file
170
discovery/nomad/nomad_test.go
Normal file
|
@ -0,0 +1,170 @@
|
|||
// Copyright 2022 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package nomad
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type NomadSDTestSuite struct {
|
||||
Mock *SDMock
|
||||
}
|
||||
|
||||
// SDMock is the interface for the nomad mock
|
||||
type SDMock struct {
|
||||
t *testing.T
|
||||
Server *httptest.Server
|
||||
Mux *http.ServeMux
|
||||
}
|
||||
|
||||
// NewSDMock returns a new SDMock.
|
||||
func NewSDMock(t *testing.T) *SDMock {
|
||||
return &SDMock{
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
// Endpoint returns the URI to the mock server.
|
||||
func (m *SDMock) Endpoint() string {
|
||||
return m.Server.URL + "/"
|
||||
}
|
||||
|
||||
// Setup creates the mock server.
|
||||
func (m *SDMock) Setup() {
|
||||
m.Mux = http.NewServeMux()
|
||||
m.Server = httptest.NewServer(m.Mux)
|
||||
}
|
||||
|
||||
// ShutdownServer creates the mock server.
|
||||
func (m *SDMock) ShutdownServer() {
|
||||
m.Server.Close()
|
||||
}
|
||||
|
||||
func (s *NomadSDTestSuite) TearDownSuite() {
|
||||
s.Mock.ShutdownServer()
|
||||
}
|
||||
|
||||
func (s *NomadSDTestSuite) SetupTest(t *testing.T) {
|
||||
s.Mock = NewSDMock(t)
|
||||
s.Mock.Setup()
|
||||
|
||||
s.Mock.HandleServicesList()
|
||||
s.Mock.HandleServiceHashiCupsGet()
|
||||
}
|
||||
|
||||
func (m *SDMock) HandleServicesList() {
|
||||
m.Mux.HandleFunc("/v1/services", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("content-type", "application/json; charset=utf-8")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
fmt.Fprint(w, `
|
||||
[
|
||||
{
|
||||
"Namespace": "default",
|
||||
"Services": [
|
||||
{
|
||||
"ServiceName": "hashicups",
|
||||
"Tags": [
|
||||
"metrics"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]`,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
func (m *SDMock) HandleServiceHashiCupsGet() {
|
||||
m.Mux.HandleFunc("/v1/service/hashicups", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("content-type", "application/json; charset=utf-8")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
fmt.Fprint(w, `
|
||||
[
|
||||
{
|
||||
"ID": "_nomad-task-6a1d5f0a-7362-3f5d-9baf-5ed438918e50-group-hashicups-hashicups-hashicups_ui",
|
||||
"ServiceName": "hashicups",
|
||||
"Namespace": "default",
|
||||
"NodeID": "d92fdc3c-9c2b-298a-e8f4-c33f3a449f09",
|
||||
"Datacenter": "dc1",
|
||||
"JobID": "dashboard",
|
||||
"AllocID": "6a1d5f0a-7362-3f5d-9baf-5ed438918e50",
|
||||
"Tags": [
|
||||
"metrics"
|
||||
],
|
||||
"Address": "127.0.0.1",
|
||||
"Port": 30456,
|
||||
"CreateIndex": 226,
|
||||
"ModifyIndex": 226
|
||||
}
|
||||
]`,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfiguredService(t *testing.T) {
|
||||
conf := &SDConfig{
|
||||
Server: "http://localhost:4646",
|
||||
}
|
||||
_, err := NewDiscovery(conf, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestNomadSDRefresh(t *testing.T) {
|
||||
sdmock := &NomadSDTestSuite{}
|
||||
sdmock.SetupTest(t)
|
||||
t.Cleanup(sdmock.TearDownSuite)
|
||||
|
||||
endpoint, err := url.Parse(sdmock.Mock.Endpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := DefaultSDConfig
|
||||
cfg.Server = endpoint.String()
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
tgs, err := d.refresh(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(tgs))
|
||||
|
||||
tg := tgs[0]
|
||||
require.NotNil(t, tg)
|
||||
require.NotNil(t, tg.Targets)
|
||||
require.Equal(t, 1, len(tg.Targets))
|
||||
|
||||
lbls := model.LabelSet{
|
||||
"__address__": model.LabelValue("127.0.0.1:30456"),
|
||||
"__meta_nomad_address": model.LabelValue("127.0.0.1"),
|
||||
"__meta_nomad_dc": model.LabelValue("dc1"),
|
||||
"__meta_nomad_namespace": model.LabelValue("default"),
|
||||
"__meta_nomad_node_id": model.LabelValue("d92fdc3c-9c2b-298a-e8f4-c33f3a449f09"),
|
||||
"__meta_nomad_service": model.LabelValue("hashicups"),
|
||||
"__meta_nomad_service_address": model.LabelValue("127.0.0.1"),
|
||||
"__meta_nomad_service_id": model.LabelValue("_nomad-task-6a1d5f0a-7362-3f5d-9baf-5ed438918e50-group-hashicups-hashicups-hashicups_ui"),
|
||||
"__meta_nomad_service_port": model.LabelValue("30456"),
|
||||
"__meta_nomad_tags": model.LabelValue(",metrics,"),
|
||||
}
|
||||
require.Equal(t, lbls, tg.Targets[0])
|
||||
}
|
|
@ -285,6 +285,10 @@ marathon_sd_configs:
|
|||
nerve_sd_configs:
|
||||
[ - <nerve_sd_config> ... ]
|
||||
|
||||
# List of Nomad service discovery configurations.
|
||||
nomad_sd_configs:
|
||||
[ - <nomad_sd_config> ... ]
|
||||
|
||||
# List of OpenStack service discovery configurations.
|
||||
openstack_sd_configs:
|
||||
[ - <openstack_sd_config> ... ]
|
||||
|
@ -1686,7 +1690,7 @@ Available meta labels:
|
|||
* `__meta_kubernetes_pod_name`: The name of the pod object.
|
||||
* `__meta_kubernetes_pod_ip`: The pod IP of the pod object.
|
||||
* `__meta_kubernetes_pod_label_<labelname>`: Each label from the pod object.
|
||||
* `__meta_kubernetes_pod_labelpresent_<labelname>`: `true`for each label from the pod object.
|
||||
* `__meta_kubernetes_pod_labelpresent_<labelname>`: `true` for each label from the pod object.
|
||||
* `__meta_kubernetes_pod_annotation_<annotationname>`: Each annotation from the pod object.
|
||||
* `__meta_kubernetes_pod_annotationpresent_<annotationname>`: `true` for each annotation from the pod object.
|
||||
* `__meta_kubernetes_pod_container_init`: `true` if the container is an [InitContainer](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
|
||||
|
@ -1713,6 +1717,8 @@ Available meta labels:
|
|||
|
||||
* `__meta_kubernetes_namespace`: The namespace of the endpoints object.
|
||||
* `__meta_kubernetes_endpoints_name`: The names of the endpoints object.
|
||||
* `__meta_kubernetes_endpoints_label_<labelname>`: Each label from the endpoints object.
|
||||
* `__meta_kubernetes_endpoints_labelpresent_<labelname>`: `true` for each label from the endpoints object.
|
||||
* For all targets discovered directly from the endpoints list (those not additionally inferred
|
||||
from underlying pods), the following labels are attached:
|
||||
* `__meta_kubernetes_endpoint_hostname`: Hostname of the endpoint.
|
||||
|
@ -1851,7 +1857,7 @@ namespaces:
|
|||
|
||||
# Optional metadata to attach to discovered targets. If omitted, no additional metadata is attached.
|
||||
attach_metadata:
|
||||
# Attaches node metadata to discovered targets. Only valid for role: pod.
|
||||
# Attaches node metadata to discovered targets. Valid for roles: pod, endpoints, endpointslice.
|
||||
# When set to true, Prometheus must have permissions to get Nodes.
|
||||
[ node: <boolean> | default = false ]
|
||||
```
|
||||
|
@ -2174,6 +2180,73 @@ paths:
|
|||
- <string>
|
||||
[ timeout: <duration> | default = 10s ]
|
||||
```
|
||||
### `<nomad_sd_config>`
|
||||
|
||||
Nomad SD configurations allow retrieving scrape targets from [Nomad's](https://www.nomadproject.io/)
|
||||
Service API.
|
||||
|
||||
The following meta labels are available on targets during [relabeling](#relabel_config):
|
||||
|
||||
* `__meta_nomad_address`: the service address of the target
|
||||
* `__meta_nomad_dc`: the datacenter name for the target
|
||||
* `__meta_nomad_namespace`: the namespace of the target
|
||||
* `__meta_nomad_node_id`: the node name defined for the target
|
||||
* `__meta_nomad_service`: the name of the service the target belongs to
|
||||
* `__meta_nomad_service_address`: the service address of the target
|
||||
* `__meta_nomad_service_id`: the service ID of the target
|
||||
* `__meta_nomad_service_port`: the service port of the target
|
||||
* `__meta_nomad_tags`: the list of tags of the target joined by the tag separator
|
||||
|
||||
```yaml
|
||||
# The information to access the Nomad API. It is to be defined
|
||||
# as the Nomad documentation requires.
|
||||
[ allow_stale: <boolean> | default = true ]
|
||||
[ datacenter: <string> ]
|
||||
[ namespace: <string> | default = default ]
|
||||
[ refresh_interval: <duration> | default = 60s ]
|
||||
[ region: <string> | default = global ]
|
||||
[ server: <host> ]
|
||||
[ tag_separator: <string> | default = ,]
|
||||
|
||||
# Authentication information used to authenticate to the nomad server.
|
||||
# Note that `basic_auth`, `authorization` and `oauth2` options are
|
||||
# mutually exclusive.
|
||||
# `password` and `password_file` are mutually exclusive.
|
||||
|
||||
# Optional HTTP basic authentication information.
|
||||
basic_auth:
|
||||
[ username: <string> ]
|
||||
[ password: <secret> ]
|
||||
[ password_file: <string> ]
|
||||
|
||||
# Optional `Authorization` header configuration.
|
||||
authorization:
|
||||
# Sets the authentication type.
|
||||
[ type: <string> | default: Bearer ]
|
||||
# Sets the credentials. It is mutually exclusive with
|
||||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials to the credentials read from the configured file.
|
||||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# Optional OAuth 2.0 configuration.
|
||||
oauth2:
|
||||
[ <oauth2> ]
|
||||
|
||||
# Optional proxy URL.
|
||||
[ proxy_url: <string> ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <bool> | default: true ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
[ <tls_config> ]
|
||||
```
|
||||
|
||||
### `<serverset_sd_config>`
|
||||
|
||||
|
@ -2866,6 +2939,10 @@ marathon_sd_configs:
|
|||
nerve_sd_configs:
|
||||
[ - <nerve_sd_config> ... ]
|
||||
|
||||
# List of Nomad service discovery configurations.
|
||||
nomad_sd_configs:
|
||||
[ - <nomad_sd_config> ... ]
|
||||
|
||||
# List of OpenStack service discovery configurations.
|
||||
openstack_sd_configs:
|
||||
[ - <openstack_sd_config> ... ]
|
||||
|
|
|
@ -54,6 +54,9 @@ tls_server_config:
|
|||
# Go default cipher suites are used. Available cipher suites are documented
|
||||
# in the go documentation:
|
||||
# https://golang.org/pkg/crypto/tls/#pkg-constants
|
||||
#
|
||||
# Note that only the cipher returned by the following function are supported:
|
||||
# https://pkg.go.dev/crypto/tls#CipherSuites
|
||||
[ cipher_suites:
|
||||
[ - <string> ] ]
|
||||
|
||||
|
|
|
@ -114,6 +114,37 @@ Operations between vectors attempt to find a matching element in the right-hand
|
|||
vector for each entry in the left-hand side. There are two basic types of
|
||||
matching behavior: One-to-one and many-to-one/one-to-many.
|
||||
|
||||
### Vector matching keywords
|
||||
|
||||
These vector matching keywords allow for matching between series with different label sets
|
||||
providing:
|
||||
|
||||
* `on`
|
||||
* `ignoring`
|
||||
|
||||
Label lists provided to matching keywords will determine how vectors are combined. Examples
|
||||
can be found in [One-to-one vector matches](#one-to-one-vector-matches) and in
|
||||
[Many-to-one and one-to-many vector matches](#many-to-one-and-one-to-many-vector-matches)
|
||||
|
||||
### Group modifiers
|
||||
|
||||
These group modifiers enable many-to-one/one-to-many vector matching:
|
||||
|
||||
* `group_left`
|
||||
* `group_right`
|
||||
|
||||
Label lists can be provided to the group modifier which contain labels from the "one"-side to
|
||||
be included in the result metrics.
|
||||
|
||||
_Many-to-one and one-to-many matching are advanced use cases that should be carefully considered.
|
||||
Often a proper use of `ignoring(<labels>)` provides the desired outcome._
|
||||
|
||||
_Grouping modifiers can only be used for
|
||||
[comparison](#comparison-binary-operators) and
|
||||
[arithmetic](#arithmetic-binary-operators). Operations as `and`, `unless` and
|
||||
`or` operations match with all possible entries in the right vector by
|
||||
default._
|
||||
|
||||
### One-to-one vector matches
|
||||
|
||||
**One-to-one** finds a unique pair of entries from each side of the operation.
|
||||
|
@ -153,7 +184,7 @@ The entries with methods `put` and `del` have no match and will not show up in t
|
|||
|
||||
**Many-to-one** and **one-to-many** matchings refer to the case where each vector element on
|
||||
the "one"-side can match with multiple elements on the "many"-side. This has to
|
||||
be explicitly requested using the `group_left` or `group_right` modifier, where
|
||||
be explicitly requested using the `group_left` or `group_right` [modifiers](#group-modifiers), where
|
||||
left/right determines which vector has the higher cardinality.
|
||||
|
||||
<vector expr> <bin-op> ignoring(<label list>) group_left(<label list>) <vector expr>
|
||||
|
@ -161,17 +192,11 @@ left/right determines which vector has the higher cardinality.
|
|||
<vector expr> <bin-op> on(<label list>) group_left(<label list>) <vector expr>
|
||||
<vector expr> <bin-op> on(<label list>) group_right(<label list>) <vector expr>
|
||||
|
||||
The label list provided with the group modifier contains additional labels from
|
||||
The label list provided with the [group modifier](#group-modifiers) contains additional labels from
|
||||
the "one"-side to be included in the result metrics. For `on` a label can only
|
||||
appear in one of the lists. Every time series of the result vector must be
|
||||
uniquely identifiable.
|
||||
|
||||
_Grouping modifiers can only be used for
|
||||
[comparison](#comparison-binary-operators) and
|
||||
[arithmetic](#arithmetic-binary-operators). Operations as `and`, `unless` and
|
||||
`or` operations match with all possible entries in the right vector by
|
||||
default._
|
||||
|
||||
Example query:
|
||||
|
||||
method_code:http_errors:rate5m / ignoring(code) group_left method:http_requests:rate5m
|
||||
|
@ -186,8 +211,6 @@ left:
|
|||
{method="post", code="500"} 0.05 // 6 / 120
|
||||
{method="post", code="404"} 0.175 // 21 / 120
|
||||
|
||||
_Many-to-one and one-to-many matching are advanced use cases that should be carefully considered.
|
||||
Often a proper use of `ignoring(<labels>)` provides the desired outcome._
|
||||
|
||||
## Aggregation operators
|
||||
|
||||
|
|
23
documentation/examples/prometheus-nomad.yml
Normal file
23
documentation/examples/prometheus-nomad.yml
Normal file
|
@ -0,0 +1,23 @@
|
|||
# An example scrape configuration for running Prometheus with
|
||||
# Nomad build in service discovery.
|
||||
#
|
||||
# The following config can be used to monitor services running on
|
||||
# a nomad that is started using the getting started tutorial [1]
|
||||
#
|
||||
# sudo nomad agent -dev -bind 0.0.0.0 -log-level INFO
|
||||
#
|
||||
# [1] https://learn.hashicorp.com/tutorials/nomad/get-started-run?in=nomad/get-started
|
||||
|
||||
scrape_configs:
|
||||
# Make Prometheus scrape itself for metrics.
|
||||
- job_name: "prometheus"
|
||||
static_configs:
|
||||
- targets: ["localhost:9090"]
|
||||
|
||||
# Discover Nomad services to scrape.
|
||||
- job_name: 'nomad_sd'
|
||||
nomad_sd_configs:
|
||||
- server: 'http://localhost:4646'
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_nomad_service]
|
||||
target_label: job
|
65
go.mod
65
go.mod
|
@ -7,14 +7,14 @@ require (
|
|||
github.com/Azure/go-autorest/autorest v0.11.27
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.20
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
|
||||
github.com/aws/aws-sdk-go v1.44.29
|
||||
github.com/aws/aws-sdk-go v1.44.47
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/dennwc/varint v1.0.0
|
||||
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245
|
||||
github.com/digitalocean/godo v1.80.0
|
||||
github.com/docker/docker v20.10.16+incompatible
|
||||
github.com/digitalocean/godo v1.81.0
|
||||
github.com/docker/docker v20.10.17+incompatible
|
||||
github.com/edsrzf/mmap-go v1.1.0
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1
|
||||
github.com/envoyproxy/go-control-plane v0.10.3
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.7
|
||||
github.com/fsnotify/fsnotify v1.5.4
|
||||
github.com/go-kit/log v0.2.1
|
||||
|
@ -23,17 +23,18 @@ require (
|
|||
github.com/go-zookeeper/zk v1.0.2
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/google/pprof v0.0.0-20220520215854-d04f2422c8a1
|
||||
github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3
|
||||
github.com/gophercloud/gophercloud v0.25.0
|
||||
github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/consul/api v1.13.0
|
||||
github.com/hetznercloud/hcloud-go v1.33.2
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.0.5851
|
||||
github.com/hashicorp/nomad/api v0.0.0-20220629141207-c2428e1673ec
|
||||
github.com/hetznercloud/hcloud-go v1.35.0
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.0
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b
|
||||
github.com/linode/linodego v1.6.0
|
||||
github.com/miekg/dns v1.1.49
|
||||
github.com/linode/linodego v1.8.0
|
||||
github.com/miekg/dns v1.1.50
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/oklog/ulid v1.3.1
|
||||
|
@ -41,14 +42,14 @@ require (
|
|||
github.com/prometheus/alertmanager v0.24.0
|
||||
github.com/prometheus/client_golang v1.12.2
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.34.0
|
||||
github.com/prometheus/common/assets v0.1.0
|
||||
github.com/prometheus/common v0.35.0
|
||||
github.com/prometheus/common/assets v0.2.0
|
||||
github.com/prometheus/common/sigv4 v0.1.0
|
||||
github.com/prometheus/exporter-toolkit v0.7.1
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
|
||||
github.com/stretchr/testify v1.7.2
|
||||
github.com/vultr/govultr/v2 v2.17.1
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/vultr/govultr/v2 v2.17.2
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0
|
||||
go.opentelemetry.io/otel v1.7.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0
|
||||
|
@ -59,28 +60,28 @@ require (
|
|||
go.uber.org/atomic v1.9.0
|
||||
go.uber.org/automaxprocs v1.5.1
|
||||
go.uber.org/goleak v1.1.12
|
||||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d
|
||||
golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e
|
||||
golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a
|
||||
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65
|
||||
golang.org/x/tools v0.1.10
|
||||
google.golang.org/api v0.83.0
|
||||
google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8
|
||||
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858
|
||||
golang.org/x/tools v0.1.11
|
||||
google.golang.org/api v0.86.0
|
||||
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03
|
||||
google.golang.org/grpc v1.47.0
|
||||
google.golang.org/protobuf v1.28.0
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.24.1
|
||||
k8s.io/apimachinery v0.24.1
|
||||
k8s.io/client-go v0.24.1
|
||||
k8s.io/api v0.24.2
|
||||
k8s.io/apimachinery v0.24.2
|
||||
k8s.io/client-go v0.24.2
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/klog/v2 v2.60.1
|
||||
k8s.io/klog/v2 v2.70.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.6.1 // indirect
|
||||
cloud.google.com/go/compute v1.7.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
|
||||
|
@ -95,7 +96,7 @@ require (
|
|||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
|
@ -126,9 +127,12 @@ require (
|
|||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.4.0 // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.2 // indirect
|
||||
github.com/hashicorp/cronexpr v1.1.1 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-hclog v0.12.2 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.2.0 // indirect
|
||||
|
@ -161,20 +165,19 @@ require (
|
|||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stretchr/objx v0.2.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.8.3 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.30.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.16.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.66.4 // indirect
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
|
||||
|
|
155
go.sum
155
go.sum
|
@ -25,8 +25,9 @@ cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+Y
|
|||
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
|
||||
cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y=
|
||||
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
|
||||
cloud.google.com/go v0.102.0 h1:DAq3r8y4mDgyB/ZPJ9v/5VJNqjgJAxTn6ZYLlUywOu8=
|
||||
cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
|
@ -37,10 +38,12 @@ cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTB
|
|||
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
|
||||
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
|
||||
cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
|
||||
cloud.google.com/go/compute v1.6.1 h1:2sMmt8prCn7DPaG4Pmh0N3Inmc8cT8ae5k1M6VJ9Wqc=
|
||||
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
|
||||
cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk=
|
||||
cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
|
@ -50,6 +53,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
|||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw=
|
||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
|
@ -120,8 +124,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
|
|||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.43.11/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.29 h1:53YWlelsMiYmGxuTRpAq7Xp+pE+0esAVqNFiNyekU+A=
|
||||
github.com/aws/aws-sdk-go v1.44.29/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.47 h1:uyiNvoR4wfZ8Bp4ghgbyzGFIg5knjZMUAd5S9ba9qNU=
|
||||
github.com/aws/aws-sdk-go v1.44.47/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
|
@ -135,6 +139,7 @@ github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInq
|
|||
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
|
@ -158,8 +163,9 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH
|
|||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk=
|
||||
github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
|
@ -177,16 +183,17 @@ github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgz
|
|||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/digitalocean/godo v1.80.0 h1:ZULJ/fWDM97YtO7Fa+K6hzJLd7+smCu4N+0n+B/xtj4=
|
||||
github.com/digitalocean/godo v1.80.0/go.mod h1:BPCqvwbjbGqxuUnIKB4EvS/AX7IDnNmt5fwvIkWo+ew=
|
||||
github.com/digitalocean/godo v1.81.0 h1:sjb3fOfPfSlUQUK22E87BcI8Zx2qtnF7VUCCO4UK3C8=
|
||||
github.com/digitalocean/godo v1.81.0/go.mod h1:BPCqvwbjbGqxuUnIKB4EvS/AX7IDnNmt5fwvIkWo+ew=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v20.10.16+incompatible h1:2Db6ZR/+FUR3hqPMwnogOPHFn405crbpxvWzKovETOQ=
|
||||
github.com/docker/docker v20.10.16+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
|
||||
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
|
@ -210,8 +217,9 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
|
|||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0=
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||
github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY=
|
||||
github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.7 h1:qcZcULcd/abmQg6dwigimCNEyi4gg31M/xaciQlDml8=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
|
||||
|
@ -420,14 +428,17 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe
|
|||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20220520215854-d04f2422c8a1 h1:K4bn56FHdjFCfjSo3wWaD6rJL8r9yvmmncJNMhdkKrw=
|
||||
github.com/google/pprof v0.0.0-20220520215854-d04f2422c8a1/go.mod h1:gSuNB+gJaOiQKLEZ+q+PK9Mq3SOzhRcw2GsGS/FhYDk=
|
||||
github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3 h1:mpL/HvfIgIejhVwAfxBQkwEjlhP5o0O9RAeTAjpwzxc=
|
||||
github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3/go.mod h1:gSuNB+gJaOiQKLEZ+q+PK9Mq3SOzhRcw2GsGS/FhYDk=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||
|
@ -436,6 +447,7 @@ github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/Oth
|
|||
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
|
||||
github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk=
|
||||
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
|
||||
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
|
||||
github.com/gophercloud/gophercloud v0.25.0 h1:C3Oae7y0fUVQGSsBrb3zliAjdX+riCSEh4lNMejFNI4=
|
||||
github.com/gophercloud/gophercloud v0.25.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
|
@ -444,6 +456,7 @@ github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
|
|||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 h1:uirlL/j72L93RhV4+mkWhjv0cov2I0MIgPOG9rMDr1k=
|
||||
github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
|
||||
|
@ -462,6 +475,8 @@ github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m
|
|||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU=
|
||||
github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
|
||||
github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c=
|
||||
github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
|
@ -507,11 +522,13 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
|
|||
github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
|
||||
github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM=
|
||||
github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20220629141207-c2428e1673ec h1:jAF71e0KoaY2LJlRsRxxGz6MNQOG5gTBIc+rklxfNO0=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20220629141207-c2428e1673ec/go.mod h1:jP79oXjopTyH6E8LF0CEMq67STgrlmBRIyijA0tuR5o=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc=
|
||||
github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
|
||||
github.com/hetznercloud/hcloud-go v1.33.2 h1:ptWKVYLW7YtjXzsqTFKFxwpVo3iM9UMkVPBYQE4teLU=
|
||||
github.com/hetznercloud/hcloud-go v1.33.2/go.mod h1:XX/TQub3ge0yWR2yHWmnDVIrB+MQbda1pHxkUmDlUME=
|
||||
github.com/hetznercloud/hcloud-go v1.35.0 h1:sduXOrWM0/sJXwBty7EQd7+RXEJh5+CsAGQmHshChFg=
|
||||
github.com/hetznercloud/hcloud-go v1.35.0/go.mod h1:mepQwR6va27S3UQthaEPGS86jtzSY9xWL1e9dyxXpgA=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
|
||||
|
@ -523,8 +540,8 @@ github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
|||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.0.5851 h1:Xjdta3uR5SDLXXl0oahgVIJ+AQNFCyOCuAwxPAXFUCM=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.0.5851/go.mod h1:UE3V/2DjnqD5doOqtjYqzJRMpI1RiwrvuuSEPX1pdnk=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.0 h1:0EZz5H+t6W23zHt6dgHYkKavr72/30O9nA97E3FZaS4=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.0/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME=
|
||||
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
|
@ -566,8 +583,9 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
|||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
|
@ -576,8 +594,8 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+
|
|||
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/linode/linodego v1.6.0 h1:y3KgXttj0v6V3HyGtsvdkTl0gIzaAAOdrDXCIwGeh2g=
|
||||
github.com/linode/linodego v1.6.0/go.mod h1:9lmhBsOupR6ke7D9Ioj1bq/ny9pfgFkCLiX7ubq0r08=
|
||||
github.com/linode/linodego v1.8.0 h1:7B2UaWu6C48tZZZrtINWRElAcwzk4TLnL9USjKf3xm0=
|
||||
github.com/linode/linodego v1.8.0/go.mod h1:heqhl91D8QTPVm2k9qZHP78zzbOdTFLXE9NJc3bcc50=
|
||||
github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
|
@ -609,15 +627,16 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff
|
|||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/miekg/dns v1.1.49 h1:qe0mQU3Z/XpFeE+AEBo2rqaS1IPBJ3anmqZ4XiZJVG8=
|
||||
github.com/miekg/dns v1.1.49/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
|
||||
github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
|
||||
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
|
@ -742,10 +761,10 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8
|
|||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.34.0 h1:RBmGO9d/FVjqHT0yUGQwBJhkwKV+wPCn7KGpvfab0uE=
|
||||
github.com/prometheus/common v0.34.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE=
|
||||
github.com/prometheus/common/assets v0.1.0 h1:8WlWPDRjbfff4FWCBjaUF0NEIgDD2Mv2anoKfwG+Ums=
|
||||
github.com/prometheus/common/assets v0.1.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
|
||||
github.com/prometheus/common v0.35.0 h1:Eyr+Pw2VymWejHqCugNaQXkAi6KayVNxaHeu6khmFBE=
|
||||
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
|
||||
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
||||
github.com/prometheus/exporter-toolkit v0.7.1 h1:c6RXaK8xBVercEeUQ4tRNL8UGWzDHfvj9dseo1FcK1Y=
|
||||
|
@ -764,6 +783,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
|
|||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
|
@ -809,8 +830,8 @@ github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3
|
|||
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
|
@ -818,16 +839,17 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
|
|||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vultr/govultr/v2 v2.17.1 h1:UBmotwA0mkGtyJMakUF9jhLH/W3mN5wfGRn543i/BCA=
|
||||
github.com/vultr/govultr/v2 v2.17.1/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI=
|
||||
github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs=
|
||||
github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
|
||||
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
|
||||
|
@ -875,6 +897,7 @@ go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe3
|
|||
go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o=
|
||||
go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.opentelemetry.io/proto/otlp v0.16.0 h1:WHzDWdXUvbc5bG2ObdrGfaNpQz7ft7QN9HHmJlbiB1E=
|
||||
go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
|
@ -908,8 +931,9 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y
|
|||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSOnDjXcQ7ZRLWOHbC6HtRqE=
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 h1:Tgea0cVUD0ivh5ADBX4WwuI12DUd2to3nCYe2eayMIw=
|
||||
golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -948,8 +972,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -1008,8 +1032,9 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
|||
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d h1:4SFsTMi4UahlKoloni7L4eYzhFRifURQLw+yv0QDCx8=
|
||||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1029,8 +1054,10 @@ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ
|
|||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401 h1:zwrSfklXn0gxyLRX/aR+q6cgHbV/ItVyzbPlbA+dkAw=
|
||||
golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26 h1:uBgVQYJLi/m8M0wzp+aGwBWt90gMRoOVf+aWTW10QHI=
|
||||
golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1137,8 +1164,12 @@ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b h1:2n253B2r0pYSmEV+UNCQoPfU/FiaizQEK5Gu4Bq4JE8=
|
||||
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||
|
@ -1158,8 +1189,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb
|
|||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 h1:M73Iuj3xbbb9Uk1DYhzydthsj6oOd6l9bpuFcNoUvTs=
|
||||
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U=
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
@ -1227,15 +1258,16 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY=
|
||||
golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0=
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
|
@ -1272,8 +1304,10 @@ google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc
|
|||
google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
|
||||
google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
|
||||
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
|
||||
google.golang.org/api v0.83.0 h1:pMvST+6v+46Gabac4zlJlalxZjCeRcepwg2EdBU+nCc=
|
||||
google.golang.org/api v0.83.0/go.mod h1:CNywQoj/AfhTw26ZWAa6LwOv+6WFxHmeLPZq2uncLZk=
|
||||
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
|
||||
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
|
||||
google.golang.org/api v0.86.0 h1:ZAnyOHQFIuWso1BodVfSaRyffD74T9ERGFa3k1fNk/U=
|
||||
google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1322,6 +1356,7 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D
|
|||
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
|
@ -1350,15 +1385,21 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2
|
|||
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
|
||||
google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 h1:qRu95HZ148xXw+XeZ3dvqe85PxH4X8+jIo0iRPKcEnM=
|
||||
google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8/go.mod h1:yKyY4AMRwFiC8yMMNaMi+RkCnjZJt9LoWuvhXjMs+To=
|
||||
google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03 h1:W70HjnmXFJm+8RNjOpIDYW2nKsSi/af0VvIZUtYkwuU=
|
||||
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -1426,6 +1467,8 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
|
|||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=
|
||||
gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/telebot.v3 v3.0.0/go.mod h1:7rExV8/0mDDNu9epSrDm/8j22KLaActH1Tbee6YjzWg=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
|
@ -1458,12 +1501,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.24.1 h1:BjCMRDcyEYz03joa3K1+rbshwh1Ay6oB53+iUx2H8UY=
|
||||
k8s.io/api v0.24.1/go.mod h1:JhoOvNiLXKTPQ60zh2g0ewpA+bnEYf5q44Flhquh4vQ=
|
||||
k8s.io/apimachinery v0.24.1 h1:ShD4aDxTQKN5zNf8K1RQ2u98ELLdIW7jEnlO9uAMX/I=
|
||||
k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
|
||||
k8s.io/client-go v0.24.1 h1:w1hNdI9PFrzu3OlovVeTnf4oHDt+FJLd9Ndluvnb42E=
|
||||
k8s.io/client-go v0.24.1/go.mod h1:f1kIDqcEYmwXS/vTbbhopMUbhKp2JhOeVTfxgaCIlF8=
|
||||
k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI=
|
||||
k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg=
|
||||
k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM=
|
||||
k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
|
||||
k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA=
|
||||
k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU=
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
|
||||
|
|
|
@ -15,11 +15,10 @@ package labels
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Slice is a sortable slice of label sets.
|
||||
|
@ -81,7 +80,7 @@ func ReadLabels(fn string, n int) ([]Labels, error) {
|
|||
}
|
||||
|
||||
if i != n {
|
||||
return mets, errors.Errorf("requested %d metrics but found %d", n, i)
|
||||
return mets, fmt.Errorf("requested %d metrics but found %d", n, i)
|
||||
}
|
||||
return mets, nil
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/grafana/regexp"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
@ -71,7 +70,7 @@ func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
*a = act
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("unknown relabel action %q", s)
|
||||
return fmt.Errorf("unknown relabel action %q", s)
|
||||
}
|
||||
|
||||
// Config is the configuration for relabeling of target label sets.
|
||||
|
@ -105,25 +104,25 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
c.Regex = MustNewRegexp("")
|
||||
}
|
||||
if c.Action == "" {
|
||||
return errors.Errorf("relabel action cannot be empty")
|
||||
return fmt.Errorf("relabel action cannot be empty")
|
||||
}
|
||||
if c.Modulus == 0 && c.Action == HashMod {
|
||||
return errors.Errorf("relabel configuration for hashmod requires non-zero modulus")
|
||||
return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus")
|
||||
}
|
||||
if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase) && c.TargetLabel == "" {
|
||||
return errors.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action)
|
||||
return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action)
|
||||
}
|
||||
if (c.Action == Replace || c.Action == Lowercase || c.Action == Uppercase) && !relabelTarget.MatchString(c.TargetLabel) {
|
||||
return errors.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
|
||||
return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
|
||||
}
|
||||
if (c.Action == Lowercase || c.Action == Uppercase) && c.Replacement != DefaultRelabelConfig.Replacement {
|
||||
return errors.Errorf("'replacement' can not be set for %s action", c.Action)
|
||||
return fmt.Errorf("'replacement' can not be set for %s action", c.Action)
|
||||
}
|
||||
if c.Action == LabelMap && !relabelTarget.MatchString(c.Replacement) {
|
||||
return errors.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action)
|
||||
return fmt.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action)
|
||||
}
|
||||
if c.Action == HashMod && !model.LabelName(c.TargetLabel).IsValid() {
|
||||
return errors.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
|
||||
return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
|
||||
}
|
||||
|
||||
if c.Action == LabelDrop || c.Action == LabelKeep {
|
||||
|
@ -132,7 +131,7 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
c.Modulus != DefaultRelabelConfig.Modulus ||
|
||||
c.Separator != DefaultRelabelConfig.Separator ||
|
||||
c.Replacement != DefaultRelabelConfig.Replacement {
|
||||
return errors.Errorf("%s action requires only 'regex', and no other fields", c.Action)
|
||||
return fmt.Errorf("%s action requires only 'regex', and no other fields", c.Action)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -265,7 +264,7 @@ func relabel(lset labels.Labels, cfg *Config) labels.Labels {
|
|||
}
|
||||
}
|
||||
default:
|
||||
panic(errors.Errorf("relabel: unknown relabel action type %q", cfg.Action))
|
||||
panic(fmt.Errorf("relabel: unknown relabel action type %q", cfg.Action))
|
||||
}
|
||||
|
||||
return lb.Labels()
|
||||
|
|
|
@ -16,12 +16,13 @@ package rulefmt
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/model"
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
|
||||
|
@ -40,12 +41,21 @@ type Error struct {
|
|||
|
||||
// Error prints the error message in a formatted string.
|
||||
func (err *Error) Error() string {
|
||||
if err.Err.nodeAlt != nil {
|
||||
return errors.Wrapf(err.Err.err, "%d:%d: %d:%d: group %q, rule %d, %q", err.Err.node.Line, err.Err.node.Column, err.Err.nodeAlt.Line, err.Err.nodeAlt.Column, err.Group, err.Rule, err.RuleName).Error()
|
||||
} else if err.Err.node != nil {
|
||||
return errors.Wrapf(err.Err.err, "%d:%d: group %q, rule %d, %q", err.Err.node.Line, err.Err.node.Column, err.Group, err.Rule, err.RuleName).Error()
|
||||
if err.Err.err == nil {
|
||||
return ""
|
||||
}
|
||||
return errors.Wrapf(err.Err.err, "group %q, rule %d, %q", err.Group, err.Rule, err.RuleName).Error()
|
||||
if err.Err.nodeAlt != nil {
|
||||
return fmt.Sprintf("%d:%d: %d:%d: group %q, rule %d, %q: %v", err.Err.node.Line, err.Err.node.Column, err.Err.nodeAlt.Line, err.Err.nodeAlt.Column, err.Group, err.Rule, err.RuleName, err.Err.err)
|
||||
}
|
||||
if err.Err.node != nil {
|
||||
return fmt.Sprintf("%d:%d: group %q, rule %d, %q: %v", err.Err.node.Line, err.Err.node.Column, err.Group, err.Rule, err.RuleName, err.Err.err)
|
||||
}
|
||||
return fmt.Sprintf("group %q, rule %d, %q: %v", err.Group, err.Rule, err.RuleName, err.Err.err)
|
||||
}
|
||||
|
||||
// Unwrap unpacks wrapped error for use in errors.Is & errors.As.
|
||||
func (err *Error) Unwrap() error {
|
||||
return &err.Err
|
||||
}
|
||||
|
||||
// WrappedError wraps error with the yaml node which can be used to represent
|
||||
|
@ -58,14 +68,23 @@ type WrappedError struct {
|
|||
|
||||
// Error prints the error message in a formatted string.
|
||||
func (we *WrappedError) Error() string {
|
||||
if we.err == nil {
|
||||
return ""
|
||||
}
|
||||
if we.nodeAlt != nil {
|
||||
return errors.Wrapf(we.err, "%d:%d: %d:%d", we.node.Line, we.node.Column, we.nodeAlt.Line, we.nodeAlt.Column).Error()
|
||||
} else if we.node != nil {
|
||||
return errors.Wrapf(we.err, "%d:%d", we.node.Line, we.node.Column).Error()
|
||||
return fmt.Sprintf("%d:%d: %d:%d: %v", we.node.Line, we.node.Column, we.nodeAlt.Line, we.nodeAlt.Column, we.err)
|
||||
}
|
||||
if we.node != nil {
|
||||
return fmt.Sprintf("%d:%d: %v", we.node.Line, we.node.Column, we.err)
|
||||
}
|
||||
return we.err.Error()
|
||||
}
|
||||
|
||||
// Unwrap unpacks wrapped error for use in errors.Is & errors.As.
|
||||
func (we *WrappedError) Unwrap() error {
|
||||
return we.err
|
||||
}
|
||||
|
||||
// RuleGroups is a set of rule groups that are typically exposed in a file.
|
||||
type RuleGroups struct {
|
||||
Groups []RuleGroup `yaml:"groups"`
|
||||
|
@ -81,13 +100,13 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) {
|
|||
|
||||
for j, g := range g.Groups {
|
||||
if g.Name == "" {
|
||||
errs = append(errs, errors.Errorf("%d:%d: Groupname must not be empty", node.Groups[j].Line, node.Groups[j].Column))
|
||||
errs = append(errs, fmt.Errorf("%d:%d: Groupname must not be empty", node.Groups[j].Line, node.Groups[j].Column))
|
||||
}
|
||||
|
||||
if _, ok := set[g.Name]; ok {
|
||||
errs = append(
|
||||
errs,
|
||||
errors.Errorf("%d:%d: groupname: \"%s\" is repeated in the same file", node.Groups[j].Line, node.Groups[j].Column, g.Name),
|
||||
fmt.Errorf("%d:%d: groupname: \"%s\" is repeated in the same file", node.Groups[j].Line, node.Groups[j].Column, g.Name),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -146,7 +165,7 @@ type RuleNode struct {
|
|||
func (r *RuleNode) Validate() (nodes []WrappedError) {
|
||||
if r.Record.Value != "" && r.Alert.Value != "" {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("only one of 'record' and 'alert' must be set"),
|
||||
err: fmt.Errorf("only one of 'record' and 'alert' must be set"),
|
||||
node: &r.Record,
|
||||
nodeAlt: &r.Alert,
|
||||
})
|
||||
|
@ -154,12 +173,12 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
|
|||
if r.Record.Value == "" && r.Alert.Value == "" {
|
||||
if r.Record.Value == "0" {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("one of 'record' or 'alert' must be set"),
|
||||
err: fmt.Errorf("one of 'record' or 'alert' must be set"),
|
||||
node: &r.Alert,
|
||||
})
|
||||
} else {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("one of 'record' or 'alert' must be set"),
|
||||
err: fmt.Errorf("one of 'record' or 'alert' must be set"),
|
||||
node: &r.Record,
|
||||
})
|
||||
}
|
||||
|
@ -167,31 +186,31 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
|
|||
|
||||
if r.Expr.Value == "" {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("field 'expr' must be set in rule"),
|
||||
err: fmt.Errorf("field 'expr' must be set in rule"),
|
||||
node: &r.Expr,
|
||||
})
|
||||
} else if _, err := parser.ParseExpr(r.Expr.Value); err != nil {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Wrapf(err, "could not parse expression"),
|
||||
err: fmt.Errorf("could not parse expression: %w", err),
|
||||
node: &r.Expr,
|
||||
})
|
||||
}
|
||||
if r.Record.Value != "" {
|
||||
if len(r.Annotations) > 0 {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("invalid field 'annotations' in recording rule"),
|
||||
err: fmt.Errorf("invalid field 'annotations' in recording rule"),
|
||||
node: &r.Record,
|
||||
})
|
||||
}
|
||||
if r.For != 0 {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("invalid field 'for' in recording rule"),
|
||||
err: fmt.Errorf("invalid field 'for' in recording rule"),
|
||||
node: &r.Record,
|
||||
})
|
||||
}
|
||||
if !model.IsValidMetricName(model.LabelValue(r.Record.Value)) {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("invalid recording rule name: %s", r.Record.Value),
|
||||
err: fmt.Errorf("invalid recording rule name: %s", r.Record.Value),
|
||||
node: &r.Record,
|
||||
})
|
||||
}
|
||||
|
@ -200,13 +219,13 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
|
|||
for k, v := range r.Labels {
|
||||
if !model.LabelName(k).IsValid() || k == model.MetricNameLabel {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("invalid label name: %s", k),
|
||||
err: fmt.Errorf("invalid label name: %s", k),
|
||||
})
|
||||
}
|
||||
|
||||
if !model.LabelValue(v).IsValid() {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("invalid label value: %s", v),
|
||||
err: fmt.Errorf("invalid label value: %s", v),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -214,7 +233,7 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
|
|||
for k := range r.Annotations {
|
||||
if !model.LabelName(k).IsValid() {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("invalid annotation name: %s", k),
|
||||
err: fmt.Errorf("invalid annotation name: %s", k),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -260,7 +279,7 @@ func testTemplateParsing(rl *RuleNode) (errs []error) {
|
|||
for k, val := range rl.Labels {
|
||||
err := parseTest(val)
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "label %q", k))
|
||||
errs = append(errs, fmt.Errorf("label %q: %w", k, err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -268,7 +287,7 @@ func testTemplateParsing(rl *RuleNode) (errs []error) {
|
|||
for k, val := range rl.Annotations {
|
||||
err := parseTest(val)
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "annotation %q", k))
|
||||
errs = append(errs, fmt.Errorf("annotation %q: %w", k, err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -287,7 +306,7 @@ func Parse(content []byte) (*RuleGroups, []error) {
|
|||
decoder.KnownFields(true)
|
||||
err := decoder.Decode(&groups)
|
||||
// Ignore io.EOF which happens with empty input.
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
err = yaml.Unmarshal(content, &node)
|
||||
|
@ -306,11 +325,11 @@ func Parse(content []byte) (*RuleGroups, []error) {
|
|||
func ParseFile(file string) (*RuleGroups, []error) {
|
||||
b, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, []error{errors.Wrap(err, file)}
|
||||
return nil, []error{fmt.Errorf("%s: %w", file, err)}
|
||||
}
|
||||
rgs, errs := Parse(b)
|
||||
for i := range errs {
|
||||
errs[i] = errors.Wrap(errs[i], file)
|
||||
errs[i] = fmt.Errorf("%s: %w", file, errs[i])
|
||||
}
|
||||
return rgs, errs
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ package rulefmt
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
|
@ -182,8 +183,12 @@ groups:
|
|||
`
|
||||
_, errs := Parse([]byte(group))
|
||||
require.Len(t, errs, 2, "Expected two errors")
|
||||
err0 := errs[0].(*Error).Err.node
|
||||
err1 := errs[1].(*Error).Err.node
|
||||
var err00 *Error
|
||||
require.True(t, errors.As(errs[0], &err00))
|
||||
err0 := err00.Err.node
|
||||
var err01 *Error
|
||||
require.True(t, errors.As(errs[1], &err01))
|
||||
err1 := err01.Err.node
|
||||
require.NotEqual(t, err0, err1, "Error nodes should not be the same")
|
||||
}
|
||||
|
||||
|
@ -299,3 +304,27 @@ func TestWrappedError(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorUnwrap(t *testing.T) {
|
||||
err1 := errors.New("test error")
|
||||
|
||||
tests := []struct {
|
||||
wrappedError *Error
|
||||
unwrappedError error
|
||||
}{
|
||||
{
|
||||
wrappedError: &Error{Err: WrappedError{err: err1}},
|
||||
unwrappedError: err1,
|
||||
},
|
||||
{
|
||||
wrappedError: &Error{Err: WrappedError{err: io.ErrClosedPipe}},
|
||||
unwrappedError: io.ErrClosedPipe,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.wrappedError.Error(), func(t *testing.T) {
|
||||
require.ErrorIs(t, tt.wrappedError, tt.unwrappedError)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,8 +58,6 @@ yystate0:
|
|||
goto yystart61
|
||||
}
|
||||
|
||||
goto yystate0 // silence unused label error
|
||||
goto yystate1 // silence unused label error
|
||||
yystate1:
|
||||
c = l.next()
|
||||
yystart1:
|
||||
|
@ -94,7 +92,6 @@ yystate4:
|
|||
goto yystate4
|
||||
}
|
||||
|
||||
goto yystate5 // silence unused label error
|
||||
yystate5:
|
||||
c = l.next()
|
||||
yystart5:
|
||||
|
@ -262,7 +259,6 @@ yystate24:
|
|||
c = l.next()
|
||||
goto yyrule4
|
||||
|
||||
goto yystate25 // silence unused label error
|
||||
yystate25:
|
||||
c = l.next()
|
||||
yystart25:
|
||||
|
@ -282,7 +278,6 @@ yystate26:
|
|||
goto yystate26
|
||||
}
|
||||
|
||||
goto yystate27 // silence unused label error
|
||||
yystate27:
|
||||
c = l.next()
|
||||
yystart27:
|
||||
|
@ -308,7 +303,6 @@ yystate29:
|
|||
c = l.next()
|
||||
goto yyrule7
|
||||
|
||||
goto yystate30 // silence unused label error
|
||||
yystate30:
|
||||
c = l.next()
|
||||
yystart30:
|
||||
|
@ -346,7 +340,6 @@ yystate34:
|
|||
c = l.next()
|
||||
goto yyrule11
|
||||
|
||||
goto yystate35 // silence unused label error
|
||||
yystate35:
|
||||
c = l.next()
|
||||
yystart35:
|
||||
|
@ -383,7 +376,6 @@ yystate38:
|
|||
goto yystate36
|
||||
}
|
||||
|
||||
goto yystate39 // silence unused label error
|
||||
yystate39:
|
||||
c = l.next()
|
||||
yystart39:
|
||||
|
@ -418,7 +410,6 @@ yystate42:
|
|||
c = l.next()
|
||||
goto yyrule9
|
||||
|
||||
goto yystate43 // silence unused label error
|
||||
yystate43:
|
||||
c = l.next()
|
||||
yystart43:
|
||||
|
@ -479,7 +470,6 @@ yystate49:
|
|||
c = l.next()
|
||||
goto yyrule18
|
||||
|
||||
goto yystate50 // silence unused label error
|
||||
yystate50:
|
||||
c = l.next()
|
||||
yystart50:
|
||||
|
@ -517,7 +507,6 @@ yystate54:
|
|||
c = l.next()
|
||||
goto yyrule20
|
||||
|
||||
goto yystate55 // silence unused label error
|
||||
yystate55:
|
||||
c = l.next()
|
||||
yystart55:
|
||||
|
@ -574,7 +563,6 @@ yystate60:
|
|||
goto yystate58
|
||||
}
|
||||
|
||||
goto yystate61 // silence unused label error
|
||||
yystate61:
|
||||
c = l.next()
|
||||
yystart61:
|
||||
|
@ -747,16 +735,58 @@ yyrule25: // {S}[^ \n]+
|
|||
return tTimestamp
|
||||
}
|
||||
yyrule26: // \n
|
||||
{
|
||||
if true { // avoid go vet determining the below panic will not be reached
|
||||
l.state = sInit
|
||||
return tLinebreak
|
||||
goto yystate0
|
||||
}
|
||||
panic("unreachable")
|
||||
|
||||
goto yyabort // silence unused label error
|
||||
|
||||
yyabort: // no lexem recognized
|
||||
//
|
||||
// silence unused label errors for build and satisfy go vet reachability analysis
|
||||
//
|
||||
{
|
||||
if false {
|
||||
goto yyabort
|
||||
}
|
||||
if false {
|
||||
goto yystate0
|
||||
}
|
||||
if false {
|
||||
goto yystate1
|
||||
}
|
||||
if false {
|
||||
goto yystate5
|
||||
}
|
||||
if false {
|
||||
goto yystate25
|
||||
}
|
||||
if false {
|
||||
goto yystate27
|
||||
}
|
||||
if false {
|
||||
goto yystate30
|
||||
}
|
||||
if false {
|
||||
goto yystate35
|
||||
}
|
||||
if false {
|
||||
goto yystate39
|
||||
}
|
||||
if false {
|
||||
goto yystate43
|
||||
}
|
||||
if false {
|
||||
goto yystate50
|
||||
}
|
||||
if false {
|
||||
goto yystate55
|
||||
}
|
||||
if false {
|
||||
goto yystate61
|
||||
}
|
||||
}
|
||||
|
||||
return tInvalid
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ package textparse
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
@ -25,8 +26,6 @@ import (
|
|||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
@ -283,7 +282,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
|
|||
case "unknown":
|
||||
p.mtype = MetricTypeUnknown
|
||||
default:
|
||||
return EntryInvalid, errors.Errorf("invalid metric type %q", s)
|
||||
return EntryInvalid, fmt.Errorf("invalid metric type %q", s)
|
||||
}
|
||||
case tHelp:
|
||||
if !utf8.Valid(p.text) {
|
||||
|
@ -300,7 +299,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
|
|||
u := yoloString(p.text)
|
||||
if len(u) > 0 {
|
||||
if !strings.HasSuffix(m, u) || len(m) < len(u)+1 || p.l.b[p.offsets[1]-len(u)-1] != '_' {
|
||||
return EntryInvalid, errors.Errorf("unit not a suffix of metric %q", m)
|
||||
return EntryInvalid, fmt.Errorf("unit not a suffix of metric %q", m)
|
||||
}
|
||||
}
|
||||
return EntryUnit, nil
|
||||
|
@ -360,7 +359,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) {
|
|||
return EntrySeries, nil
|
||||
|
||||
default:
|
||||
err = errors.Errorf("%q %q is not a valid start token", t, string(p.l.cur()))
|
||||
err = fmt.Errorf("%q %q is not a valid start token", t, string(p.l.cur()))
|
||||
}
|
||||
return EntryInvalid, err
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
package textparse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
|
@ -223,7 +224,7 @@ foo_total 17.0 1520879607.789 # {xx="yy"} 5`
|
|||
|
||||
for {
|
||||
et, err := p.Next()
|
||||
if err == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -27,6 +27,9 @@ const (
|
|||
sLValue
|
||||
sValue
|
||||
sTimestamp
|
||||
sExemplar
|
||||
sEValue
|
||||
sETimestamp
|
||||
)
|
||||
|
||||
// Lex is called by the parser generated by "go tool yacc" to obtain each
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// CAUTION: Generated file - DO NOT EDIT.
|
||||
// Code generated by golex. DO NOT EDIT.
|
||||
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
@ -16,7 +16,7 @@
|
|||
package textparse
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -47,7 +47,7 @@ yystate0:
|
|||
|
||||
switch yyt := l.state; yyt {
|
||||
default:
|
||||
panic(errors.Errorf(`invalid start condition %d`, yyt))
|
||||
panic(fmt.Errorf(`invalid start condition %d`, yyt))
|
||||
case 0: // start condition: INITIAL
|
||||
goto yystart1
|
||||
case 1: // start condition: sComment
|
||||
|
@ -66,8 +66,6 @@ yystate0:
|
|||
goto yystart36
|
||||
}
|
||||
|
||||
goto yystate0 // silence unused label error
|
||||
goto yystate1 // silence unused label error
|
||||
yystate1:
|
||||
c = l.next()
|
||||
yystart1:
|
||||
|
@ -130,7 +128,6 @@ yystate7:
|
|||
goto yystate7
|
||||
}
|
||||
|
||||
goto yystate8 // silence unused label error
|
||||
yystate8:
|
||||
c = l.next()
|
||||
yystart8:
|
||||
|
@ -235,7 +232,6 @@ yystate18:
|
|||
goto yystate18
|
||||
}
|
||||
|
||||
goto yystate19 // silence unused label error
|
||||
yystate19:
|
||||
c = l.next()
|
||||
yystart19:
|
||||
|
@ -257,7 +253,6 @@ yystate20:
|
|||
goto yystate20
|
||||
}
|
||||
|
||||
goto yystate21 // silence unused label error
|
||||
yystate21:
|
||||
c = l.next()
|
||||
yystart21:
|
||||
|
@ -290,7 +285,6 @@ yystate23:
|
|||
goto yystate22
|
||||
}
|
||||
|
||||
goto yystate24 // silence unused label error
|
||||
yystate24:
|
||||
c = l.next()
|
||||
yystart24:
|
||||
|
@ -330,7 +324,6 @@ yystate28:
|
|||
c = l.next()
|
||||
goto yyrule13
|
||||
|
||||
goto yystate29 // silence unused label error
|
||||
yystate29:
|
||||
c = l.next()
|
||||
yystart29:
|
||||
|
@ -369,7 +362,6 @@ yystate32:
|
|||
goto yystate30
|
||||
}
|
||||
|
||||
goto yystate33 // silence unused label error
|
||||
yystate33:
|
||||
c = l.next()
|
||||
yystart33:
|
||||
|
@ -397,7 +389,6 @@ yystate35:
|
|||
c = l.next()
|
||||
goto yyrule11
|
||||
|
||||
goto yystate36 // silence unused label error
|
||||
yystate36:
|
||||
c = l.next()
|
||||
yystart36:
|
||||
|
@ -521,16 +512,50 @@ yyrule18: // {D}+
|
|||
return tTimestamp
|
||||
}
|
||||
yyrule19: // \n
|
||||
{
|
||||
if true { // avoid go vet determining the below panic will not be reached
|
||||
l.state = sInit
|
||||
return tLinebreak
|
||||
goto yystate0
|
||||
}
|
||||
panic("unreachable")
|
||||
|
||||
goto yyabort // silence unused label error
|
||||
|
||||
yyabort: // no lexem recognized
|
||||
//
|
||||
// silence unused label errors for build and satisfy go vet reachability analysis
|
||||
//
|
||||
{
|
||||
if false {
|
||||
goto yyabort
|
||||
}
|
||||
if false {
|
||||
goto yystate0
|
||||
}
|
||||
if false {
|
||||
goto yystate1
|
||||
}
|
||||
if false {
|
||||
goto yystate8
|
||||
}
|
||||
if false {
|
||||
goto yystate19
|
||||
}
|
||||
if false {
|
||||
goto yystate21
|
||||
}
|
||||
if false {
|
||||
goto yystate24
|
||||
}
|
||||
if false {
|
||||
goto yystate29
|
||||
}
|
||||
if false {
|
||||
goto yystate33
|
||||
}
|
||||
if false {
|
||||
goto yystate36
|
||||
}
|
||||
}
|
||||
|
||||
// Workaround to gobble up comments that started with a HELP or TYPE
|
||||
// prefix. We just consume all characters until we reach a newline.
|
||||
// This saves us from adding disproportionate complexity to the parser.
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package textparse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
@ -26,8 +27,6 @@ import (
|
|||
"unicode/utf8"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
@ -259,7 +258,7 @@ func (p *PromParser) nextToken() token {
|
|||
}
|
||||
|
||||
func parseError(exp string, got token) error {
|
||||
return errors.Errorf("%s, got %q", exp, got)
|
||||
return fmt.Errorf("%s, got %q", exp, got)
|
||||
}
|
||||
|
||||
// Next advances the parser to the next sample. It returns false if no
|
||||
|
@ -308,11 +307,11 @@ func (p *PromParser) Next() (Entry, error) {
|
|||
case "untyped":
|
||||
p.mtype = MetricTypeUnknown
|
||||
default:
|
||||
return EntryInvalid, errors.Errorf("invalid metric type %q", s)
|
||||
return EntryInvalid, fmt.Errorf("invalid metric type %q", s)
|
||||
}
|
||||
case tHelp:
|
||||
if !utf8.Valid(p.text) {
|
||||
return EntryInvalid, errors.Errorf("help text is not a valid utf8 string")
|
||||
return EntryInvalid, fmt.Errorf("help text is not a valid utf8 string")
|
||||
}
|
||||
}
|
||||
if t := p.nextToken(); t != tLinebreak {
|
||||
|
@ -371,7 +370,7 @@ func (p *PromParser) Next() (Entry, error) {
|
|||
return EntrySeries, nil
|
||||
|
||||
default:
|
||||
err = errors.Errorf("%q is not a valid start token", t)
|
||||
err = fmt.Errorf("%q is not a valid start token", t)
|
||||
}
|
||||
return EntryInvalid, err
|
||||
}
|
||||
|
@ -395,7 +394,7 @@ func (p *PromParser) parseLVals() error {
|
|||
return parseError("expected label value", t)
|
||||
}
|
||||
if !utf8.Valid(p.l.buf()) {
|
||||
return errors.Errorf("invalid UTF-8 label value")
|
||||
return fmt.Errorf("invalid UTF-8 label value")
|
||||
}
|
||||
|
||||
// The promlexer ensures the value string is quoted. Strip first
|
||||
|
|
|
@ -16,6 +16,7 @@ package textparse
|
|||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
@ -176,7 +177,7 @@ testmetric{label="\"bar\""} 1`
|
|||
|
||||
for {
|
||||
et, err := p.Next()
|
||||
if err == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
@ -378,7 +379,7 @@ func BenchmarkParse(b *testing.B) {
|
|||
t, err := p.Next()
|
||||
switch t {
|
||||
case EntryInvalid:
|
||||
if err == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break Outer
|
||||
}
|
||||
b.Fatal(err)
|
||||
|
@ -406,7 +407,7 @@ func BenchmarkParse(b *testing.B) {
|
|||
t, err := p.Next()
|
||||
switch t {
|
||||
case EntryInvalid:
|
||||
if err == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break Outer
|
||||
}
|
||||
b.Fatal(err)
|
||||
|
@ -439,7 +440,7 @@ func BenchmarkParse(b *testing.B) {
|
|||
t, err := p.Next()
|
||||
switch t {
|
||||
case EntryInvalid:
|
||||
if err == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break Outer
|
||||
}
|
||||
b.Fatal(err)
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/alertmanager/api/v2/models"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
|
@ -303,12 +302,22 @@ func (n *Manager) nextBatch() []*Alert {
|
|||
// Run dispatches notifications continuously.
|
||||
func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) {
|
||||
for {
|
||||
// The select is split in two parts, such as we will first try to read
|
||||
// new alertmanager targets if they are available, before sending new
|
||||
// alerts.
|
||||
select {
|
||||
case <-n.ctx.Done():
|
||||
return
|
||||
case ts := <-tsets:
|
||||
n.reload(ts)
|
||||
case <-n.more:
|
||||
default:
|
||||
select {
|
||||
case <-n.ctx.Done():
|
||||
return
|
||||
case ts := <-tsets:
|
||||
n.reload(ts)
|
||||
case <-n.more:
|
||||
}
|
||||
}
|
||||
alerts := n.nextBatch()
|
||||
|
||||
|
@ -588,7 +597,7 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b
|
|||
|
||||
// Any HTTP status 2xx is OK.
|
||||
if resp.StatusCode/100 != 2 {
|
||||
return errors.Errorf("bad response status %s", resp.Status)
|
||||
return fmt.Errorf("bad response status %s", resp.Status)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -742,7 +751,7 @@ func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig
|
|||
case "https":
|
||||
addr = addr + ":443"
|
||||
default:
|
||||
return nil, nil, errors.Errorf("invalid scheme: %q", cfg.Scheme)
|
||||
return nil, nil, fmt.Errorf("invalid scheme: %q", cfg.Scheme)
|
||||
}
|
||||
lb.Set(model.AddressLabel, addr)
|
||||
}
|
||||
|
|
|
@ -22,10 +22,10 @@ import (
|
|||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/alertmanager/api/v2/models"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -88,11 +88,11 @@ func TestHandlerNextBatch(t *testing.T) {
|
|||
|
||||
func alertsEqual(a, b []*Alert) error {
|
||||
if len(a) != len(b) {
|
||||
return errors.Errorf("length mismatch: %v != %v", a, b)
|
||||
return fmt.Errorf("length mismatch: %v != %v", a, b)
|
||||
}
|
||||
for i, alert := range a {
|
||||
if !labels.Equal(alert.Labels, b[i].Labels) {
|
||||
return errors.Errorf("label mismatch at index %d: %s != %s", i, alert.Labels, b[i].Labels)
|
||||
return fmt.Errorf("label mismatch at index %d: %s != %s", i, alert.Labels, b[i].Labels)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -121,14 +121,14 @@ func TestHandlerSendAll(t *testing.T) {
|
|||
}()
|
||||
user, pass, _ := r.BasicAuth()
|
||||
if user != u || pass != p {
|
||||
err = errors.Errorf("unexpected user/password: %s/%s != %s/%s", user, pass, u, p)
|
||||
err = fmt.Errorf("unexpected user/password: %s/%s != %s/%s", user, pass, u, p)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
b, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
err = errors.Errorf("error reading body: %v", err)
|
||||
err = fmt.Errorf("error reading body: %w", err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
@ -572,3 +572,118 @@ func makeInputTargetGroup() *targetgroup.Group {
|
|||
func TestLabelsToOpenAPILabelSet(t *testing.T) {
|
||||
require.Equal(t, models.LabelSet{"aaa": "111", "bbb": "222"}, labelsToOpenAPILabelSet(labels.Labels{{Name: "aaa", Value: "111"}, {Name: "bbb", Value: "222"}}))
|
||||
}
|
||||
|
||||
// TestHangingNotifier validates that targets updates happen even when there are
|
||||
// queued alerts.
|
||||
func TestHangingNotifier(t *testing.T) {
|
||||
// Note: When targets are not updated in time, this test is flaky because go
|
||||
// selects are not deterministic. Therefore we run 10 subtests to run into the issue.
|
||||
for i := 0; i < 10; i++ {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
var (
|
||||
done = make(chan struct{})
|
||||
changed = make(chan struct{})
|
||||
syncCh = make(chan map[string][]*targetgroup.Group)
|
||||
)
|
||||
|
||||
defer func() {
|
||||
close(done)
|
||||
}()
|
||||
|
||||
var calledOnce bool
|
||||
// Setting up a bad server. This server hangs for 2 seconds.
|
||||
badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if calledOnce {
|
||||
t.Fatal("hanging server called multiple times")
|
||||
}
|
||||
calledOnce = true
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(2 * time.Second):
|
||||
}
|
||||
}))
|
||||
badURL, err := url.Parse(badServer.URL)
|
||||
require.NoError(t, err)
|
||||
badAddress := badURL.Host // Used for __name__ label in targets.
|
||||
|
||||
// Setting up a bad server. This server returns fast, signaling requests on
|
||||
// by closing the changed channel.
|
||||
goodServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
close(changed)
|
||||
}))
|
||||
goodURL, err := url.Parse(goodServer.URL)
|
||||
require.NoError(t, err)
|
||||
goodAddress := goodURL.Host // Used for __name__ label in targets.
|
||||
|
||||
h := NewManager(
|
||||
&Options{
|
||||
QueueCapacity: 20 * maxBatchSize,
|
||||
},
|
||||
nil,
|
||||
)
|
||||
|
||||
h.alertmanagers = make(map[string]*alertmanagerSet)
|
||||
|
||||
am1Cfg := config.DefaultAlertmanagerConfig
|
||||
am1Cfg.Timeout = model.Duration(200 * time.Millisecond)
|
||||
|
||||
h.alertmanagers["config-0"] = &alertmanagerSet{
|
||||
ams: []alertmanager{},
|
||||
cfg: &am1Cfg,
|
||||
metrics: h.metrics,
|
||||
}
|
||||
go h.Run(syncCh)
|
||||
defer h.Stop()
|
||||
|
||||
var alerts []*Alert
|
||||
for i := range make([]struct{}, 20*maxBatchSize) {
|
||||
alerts = append(alerts, &Alert{
|
||||
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
|
||||
})
|
||||
}
|
||||
|
||||
// Injecting the hanging server URL.
|
||||
syncCh <- map[string][]*targetgroup.Group{
|
||||
"config-0": {
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue(badAddress),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Queing alerts.
|
||||
h.Send(alerts...)
|
||||
|
||||
// Updating with a working alertmanager target.
|
||||
go func() {
|
||||
select {
|
||||
case syncCh <- map[string][]*targetgroup.Group{
|
||||
"config-0": {
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue(goodAddress),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}:
|
||||
case <-done:
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatalf("Timeout after 1 second, targets not synced in time.")
|
||||
case <-changed:
|
||||
// The good server has been hit in less than 3 seconds, therefore
|
||||
// targets have been updated before a second call could be made to the
|
||||
// bad server.
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
- github.com/prometheus/prometheus/discovery/linode
|
||||
- github.com/prometheus/prometheus/discovery/marathon
|
||||
- github.com/prometheus/prometheus/discovery/moby
|
||||
- github.com/prometheus/prometheus/discovery/nomad
|
||||
- github.com/prometheus/prometheus/discovery/openstack
|
||||
- github.com/prometheus/prometheus/discovery/puppetdb
|
||||
- github.com/prometheus/prometheus/discovery/scaleway
|
||||
|
|
|
@ -55,6 +55,9 @@ import (
|
|||
// Register moby plugin.
|
||||
_ "github.com/prometheus/prometheus/discovery/moby"
|
||||
|
||||
// Register nomad plugin.
|
||||
_ "github.com/prometheus/prometheus/discovery/nomad"
|
||||
|
||||
// Register openstack plugin.
|
||||
_ "github.com/prometheus/prometheus/discovery/openstack"
|
||||
|
||||
|
|
|
@ -40,6 +40,11 @@ type Node interface {
|
|||
// as part of a valid query.
|
||||
String() string
|
||||
|
||||
// Pretty returns the prettified representation of the node.
|
||||
// It uses the level information to determine at which level/depth the current
|
||||
// node is in the AST and uses this to apply indentation.
|
||||
Pretty(level int) string
|
||||
|
||||
// PositionRange returns the position of the AST Node in the query string.
|
||||
PositionRange() PositionRange
|
||||
}
|
||||
|
@ -205,8 +210,9 @@ type VectorSelector struct {
|
|||
// of an arbitrary function during handling. It is used to test the Engine.
|
||||
type TestStmt func(context.Context) error
|
||||
|
||||
func (TestStmt) String() string { return "test statement" }
|
||||
func (TestStmt) PromQLStmt() {}
|
||||
func (TestStmt) String() string { return "test statement" }
|
||||
func (TestStmt) PromQLStmt() {}
|
||||
func (t TestStmt) Pretty(int) string { return t.String() }
|
||||
|
||||
func (TestStmt) PositionRange() PositionRange {
|
||||
return PositionRange{
|
||||
|
|
|
@ -48,6 +48,10 @@ func (i Item) String() string {
|
|||
return fmt.Sprintf("%q", i.Val)
|
||||
}
|
||||
|
||||
// Pretty returns the prettified form of an item.
|
||||
// This is same as the item's stringified format.
|
||||
func (i Item) Pretty(int) string { return i.String() }
|
||||
|
||||
// IsOperator returns true if the Item corresponds to a arithmetic or set operator.
|
||||
// Returns false otherwise.
|
||||
func (i ItemType) IsOperator() bool { return i > operatorsStart && i < operatorsEnd }
|
||||
|
|
166
promql/parser/prettier.go
Normal file
166
promql/parser/prettier.go
Normal file
|
@ -0,0 +1,166 @@
|
|||
// Copyright 2022 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Approach
|
||||
// --------
|
||||
// When a PromQL query is parsed, it is converted into PromQL AST,
|
||||
// which is a nested structure of nodes. Each node has a depth/level
|
||||
// (distance from the root), that is passed by its parent.
|
||||
//
|
||||
// While prettifying, a Node considers 2 things:
|
||||
// 1. Did the current Node's parent add a new line?
|
||||
// 2. Does the current Node needs to be prettified?
|
||||
//
|
||||
// The level of a Node determines if it should be indented or not.
|
||||
// The answer to the 1 is NO if the level passed is 0. This means, the
|
||||
// parent Node did not apply a new line, so the current Node must not
|
||||
// apply any indentation as prefix.
|
||||
// If level > 1, a new line is applied by the parent. So, the current Node
|
||||
// should prefix an indentation before writing any of its content. This indentation
|
||||
// will be ([level/depth of current Node] * " ").
|
||||
//
|
||||
// The answer to 2 is YES if the normalized length of the current Node exceeds
|
||||
// the maxCharactersPerLine limit. Hence, it applies the indentation equal to
|
||||
// its depth and increments the level by 1 before passing down the child.
|
||||
// If the answer is NO, the current Node returns the normalized string value of itself.
|
||||
|
||||
var maxCharactersPerLine = 100
|
||||
|
||||
func Prettify(n Node) string {
|
||||
return n.Pretty(0)
|
||||
}
|
||||
|
||||
func (e *AggregateExpr) Pretty(level int) string {
|
||||
s := indent(level)
|
||||
if !needsSplit(e) {
|
||||
s += e.String()
|
||||
return s
|
||||
}
|
||||
|
||||
s += e.getAggOpStr()
|
||||
s += "(\n"
|
||||
|
||||
if e.Op.IsAggregatorWithParam() {
|
||||
s += fmt.Sprintf("%s,\n", e.Param.Pretty(level+1))
|
||||
}
|
||||
s += fmt.Sprintf("%s\n%s)", e.Expr.Pretty(level+1), indent(level))
|
||||
return s
|
||||
}
|
||||
|
||||
func (e *BinaryExpr) Pretty(level int) string {
|
||||
s := indent(level)
|
||||
if !needsSplit(e) {
|
||||
s += e.String()
|
||||
return s
|
||||
}
|
||||
returnBool := ""
|
||||
if e.ReturnBool {
|
||||
returnBool = " bool"
|
||||
}
|
||||
|
||||
matching := e.getMatchingStr()
|
||||
return fmt.Sprintf("%s\n%s%s%s%s\n%s", e.LHS.Pretty(level+1), indent(level), e.Op, returnBool, matching, e.RHS.Pretty(level+1))
|
||||
}
|
||||
|
||||
func (e *Call) Pretty(level int) string {
|
||||
s := indent(level)
|
||||
if !needsSplit(e) {
|
||||
s += e.String()
|
||||
return s
|
||||
}
|
||||
s += fmt.Sprintf("%s(\n%s\n%s)", e.Func.Name, e.Args.Pretty(level+1), indent(level))
|
||||
return s
|
||||
}
|
||||
|
||||
func (e *EvalStmt) Pretty(_ int) string {
|
||||
return "EVAL " + e.Expr.String()
|
||||
}
|
||||
|
||||
func (e Expressions) Pretty(level int) string {
|
||||
// Do not prefix the indent since respective nodes will indent itself.
|
||||
s := ""
|
||||
for i := range e {
|
||||
s += fmt.Sprintf("%s,\n", e[i].Pretty(level))
|
||||
}
|
||||
return s[:len(s)-2]
|
||||
}
|
||||
|
||||
func (e *ParenExpr) Pretty(level int) string {
|
||||
s := indent(level)
|
||||
if !needsSplit(e) {
|
||||
s += e.String()
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("%s(\n%s\n%s)", s, e.Expr.Pretty(level+1), indent(level))
|
||||
}
|
||||
|
||||
func (e *StepInvariantExpr) Pretty(level int) string {
|
||||
return e.Expr.Pretty(level)
|
||||
}
|
||||
|
||||
func (e *MatrixSelector) Pretty(level int) string {
|
||||
return getCommonPrefixIndent(level, e)
|
||||
}
|
||||
|
||||
func (e *SubqueryExpr) Pretty(level int) string {
|
||||
if !needsSplit(e) {
|
||||
return e.String()
|
||||
}
|
||||
return fmt.Sprintf("%s%s", e.Expr.Pretty(level), e.getSubqueryTimeSuffix())
|
||||
}
|
||||
|
||||
func (e *VectorSelector) Pretty(level int) string {
|
||||
return getCommonPrefixIndent(level, e)
|
||||
}
|
||||
|
||||
func (e *NumberLiteral) Pretty(level int) string {
|
||||
return getCommonPrefixIndent(level, e)
|
||||
}
|
||||
|
||||
func (e *StringLiteral) Pretty(level int) string {
|
||||
return getCommonPrefixIndent(level, e)
|
||||
}
|
||||
|
||||
func (e *UnaryExpr) Pretty(level int) string {
|
||||
child := e.Expr.Pretty(level)
|
||||
// Remove the indent prefix from child since we attach the prefix indent before Op.
|
||||
child = strings.TrimSpace(child)
|
||||
return fmt.Sprintf("%s%s%s", indent(level), e.Op, child)
|
||||
}
|
||||
|
||||
func getCommonPrefixIndent(level int, current Node) string {
|
||||
return fmt.Sprintf("%s%s", indent(level), current.String())
|
||||
}
|
||||
|
||||
// needsSplit normalizes the node and then checks if the node needs any split.
|
||||
// This is necessary to remove any trailing whitespaces.
|
||||
func needsSplit(n Node) bool {
|
||||
if n == nil {
|
||||
return false
|
||||
}
|
||||
return len(n.String()) > maxCharactersPerLine
|
||||
}
|
||||
|
||||
const indentString = " "
|
||||
|
||||
// indent adds the indentString n number of times.
|
||||
func indent(n int) string {
|
||||
return strings.Repeat(indentString, n)
|
||||
}
|
16
promql/parser/prettier_rules.md
Normal file
16
promql/parser/prettier_rules.md
Normal file
|
@ -0,0 +1,16 @@
|
|||
# Prettifying PromQL expressions
|
||||
This files contains rules for prettifying PromQL expressions.
|
||||
|
||||
Note: The current version of prettier does not preserve comments.
|
||||
|
||||
### Keywords
|
||||
`max_characters_per_line`: Maximum number of characters that will be allowed on a single line in a prettified PromQL expression.
|
||||
|
||||
### Rules
|
||||
1. A node exceeding the `max_characters_per_line` will qualify for split unless
|
||||
1. It is a `MatrixSelector`
|
||||
2. It is a `VectorSelector`. Label sets in a `VectorSelector` will be in the same line as metric_name, separated by commas and a space
|
||||
Note: Label groupings like `by`, `without`, `on`, `ignoring` will remain on the same line as their parent node
|
||||
2. Nodes that are nested within another node will be prettified only if they exceed the `max_characters_per_line`
|
||||
3. Expressions like `sum(expression) without (label_matchers)` will be modified to `sum without(label_matchers) (expression)`
|
||||
4. Functional call args will be split to different lines if they exceed the `max_characters_per_line`
|
666
promql/parser/prettier_test.go
Normal file
666
promql/parser/prettier_test.go
Normal file
|
@ -0,0 +1,666 @@
|
|||
// Copyright 2022 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAggregateExprPretty(t *testing.T) {
|
||||
maxCharactersPerLine = 10
|
||||
inputs := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{
|
||||
in: `sum(foo)`,
|
||||
out: `sum(foo)`,
|
||||
},
|
||||
{
|
||||
in: `sum by() (task:errors:rate10s{job="s"})`,
|
||||
out: `sum(
|
||||
task:errors:rate10s{job="s"}
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `sum without(job,foo) (task:errors:rate10s{job="s"})`,
|
||||
out: `sum without(job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `sum(task:errors:rate10s{job="s"}) without(job,foo)`,
|
||||
out: `sum without(job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `sum by(job,foo) (task:errors:rate10s{job="s"})`,
|
||||
out: `sum by(job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `sum (task:errors:rate10s{job="s"}) by(job,foo)`,
|
||||
out: `sum by(job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `topk(10, ask:errors:rate10s{job="s"})`,
|
||||
out: `topk(
|
||||
10,
|
||||
ask:errors:rate10s{job="s"}
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `sum by(job,foo) (sum by(job,foo) (task:errors:rate10s{job="s"}))`,
|
||||
out: `sum by(job, foo) (
|
||||
sum by(job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `sum by(job,foo) (sum by(job,foo) (sum by(job,foo) (task:errors:rate10s{job="s"})))`,
|
||||
out: `sum by(job, foo) (
|
||||
sum by(job, foo) (
|
||||
sum by(job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)
|
||||
)
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `sum by(job,foo)
|
||||
(sum by(job,foo) (task:errors:rate10s{job="s"}))`,
|
||||
out: `sum by(job, foo) (
|
||||
sum by(job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `sum by(job,foo)
|
||||
(sum(task:errors:rate10s{job="s"}) without(job,foo))`,
|
||||
out: `sum by(job, foo) (
|
||||
sum without(job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `sum by(job,foo) # Comment 1.
|
||||
(sum by(job,foo) ( # Comment 2.
|
||||
task:errors:rate10s{job="s"}))`,
|
||||
out: `sum by(job, foo) (
|
||||
sum by(job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)
|
||||
)`,
|
||||
},
|
||||
}
|
||||
for _, test := range inputs {
|
||||
expr, err := ParseExpr(test.in)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, test.out, Prettify(expr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBinaryExprPretty(t *testing.T) {
|
||||
maxCharactersPerLine = 10
|
||||
inputs := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{
|
||||
in: `a+b`,
|
||||
out: `a + b`,
|
||||
},
|
||||
{
|
||||
in: `a == bool 1`,
|
||||
out: ` a
|
||||
== bool
|
||||
1`,
|
||||
},
|
||||
{
|
||||
in: `a + ignoring(job) b`,
|
||||
out: ` a
|
||||
+ ignoring(job)
|
||||
b`,
|
||||
},
|
||||
{
|
||||
in: `foo_1 + foo_2`,
|
||||
out: ` foo_1
|
||||
+
|
||||
foo_2`,
|
||||
},
|
||||
{
|
||||
in: `foo_1 + foo_2 + foo_3`,
|
||||
out: ` foo_1
|
||||
+
|
||||
foo_2
|
||||
+
|
||||
foo_3`,
|
||||
},
|
||||
{
|
||||
in: `foo + baar + foo_3`,
|
||||
out: ` foo + baar
|
||||
+
|
||||
foo_3`,
|
||||
},
|
||||
{
|
||||
in: `foo_1 + foo_2 + foo_3 + foo_4`,
|
||||
out: ` foo_1
|
||||
+
|
||||
foo_2
|
||||
+
|
||||
foo_3
|
||||
+
|
||||
foo_4`,
|
||||
},
|
||||
{
|
||||
in: `foo_1 + ignoring(foo) foo_2 + ignoring(job) group_left foo_3 + on(instance) group_right foo_4`,
|
||||
out: ` foo_1
|
||||
+ ignoring(foo)
|
||||
foo_2
|
||||
+ ignoring(job) group_left()
|
||||
foo_3
|
||||
+ on(instance) group_right()
|
||||
foo_4`,
|
||||
},
|
||||
}
|
||||
for _, test := range inputs {
|
||||
expr, err := ParseExpr(test.in)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, test.out, Prettify(expr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCallExprPretty(t *testing.T) {
|
||||
maxCharactersPerLine = 10
|
||||
inputs := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{
|
||||
in: `rate(foo[1m])`,
|
||||
out: `rate(
|
||||
foo[1m]
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `sum_over_time(foo[1m])`,
|
||||
out: `sum_over_time(
|
||||
foo[1m]
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `rate(long_vector_selector[10m:1m] @ start() offset 1m)`,
|
||||
out: `rate(
|
||||
long_vector_selector[10m:1m] @ start() offset 1m
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `histogram_quantile(0.9, rate(foo[1m]))`,
|
||||
out: `histogram_quantile(
|
||||
0.9,
|
||||
rate(
|
||||
foo[1m]
|
||||
)
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `max_over_time(rate(demo_api_request_duration_seconds_count[1m])[1m:] @ start() offset 1m)`,
|
||||
out: `max_over_time(
|
||||
rate(
|
||||
demo_api_request_duration_seconds_count[1m]
|
||||
)[1m:] @ start() offset 1m
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `label_replace(up{job="api-server",service="a:c"}, "foo", "$1", "service", "(.*):.*")`,
|
||||
out: `label_replace(
|
||||
up{job="api-server",service="a:c"},
|
||||
"foo",
|
||||
"$1",
|
||||
"service",
|
||||
"(.*):.*"
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `label_replace(label_replace(up{job="api-server",service="a:c"}, "foo", "$1", "service", "(.*):.*"), "foo", "$1", "service", "(.*):.*")`,
|
||||
out: `label_replace(
|
||||
label_replace(
|
||||
up{job="api-server",service="a:c"},
|
||||
"foo",
|
||||
"$1",
|
||||
"service",
|
||||
"(.*):.*"
|
||||
),
|
||||
"foo",
|
||||
"$1",
|
||||
"service",
|
||||
"(.*):.*"
|
||||
)`,
|
||||
},
|
||||
}
|
||||
for _, test := range inputs {
|
||||
expr, err := ParseExpr(test.in)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Println("=>", expr.String())
|
||||
require.Equal(t, test.out, Prettify(expr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParenExprPretty(t *testing.T) {
|
||||
maxCharactersPerLine = 10
|
||||
inputs := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{
|
||||
in: `(foo)`,
|
||||
out: `(foo)`,
|
||||
},
|
||||
{
|
||||
in: `(_foo_long_)`,
|
||||
out: `(
|
||||
_foo_long_
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `((foo_long))`,
|
||||
out: `(
|
||||
(foo_long)
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `((_foo_long_))`,
|
||||
out: `(
|
||||
(
|
||||
_foo_long_
|
||||
)
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `(((foo_long)))`,
|
||||
out: `(
|
||||
(
|
||||
(foo_long)
|
||||
)
|
||||
)`,
|
||||
},
|
||||
}
|
||||
for _, test := range inputs {
|
||||
expr, err := ParseExpr(test.in)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, test.out, Prettify(expr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepInvariantExpr(t *testing.T) {
|
||||
maxCharactersPerLine = 10
|
||||
inputs := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{
|
||||
in: `a @ 1`,
|
||||
out: `a @ 1.000`,
|
||||
},
|
||||
{
|
||||
in: `a @ start()`,
|
||||
out: `a @ start()`,
|
||||
},
|
||||
{
|
||||
in: `vector_selector @ start()`,
|
||||
out: `vector_selector @ start()`,
|
||||
},
|
||||
}
|
||||
for _, test := range inputs {
|
||||
expr, err := ParseExpr(test.in)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, test.out, Prettify(expr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestExprPretty(t *testing.T) {
|
||||
maxCharactersPerLine = 10
|
||||
inputs := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{
|
||||
in: `(1 + 2)`,
|
||||
out: `(1 + 2)`,
|
||||
},
|
||||
{
|
||||
in: `(foo + bar)`,
|
||||
out: `(
|
||||
foo + bar
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `(foo_long + bar_long)`,
|
||||
out: `(
|
||||
foo_long
|
||||
+
|
||||
bar_long
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `(foo_long + bar_long + bar_2_long)`,
|
||||
out: `(
|
||||
foo_long
|
||||
+
|
||||
bar_long
|
||||
+
|
||||
bar_2_long
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `((foo_long + bar_long) + bar_2_long)`,
|
||||
out: `(
|
||||
(
|
||||
foo_long
|
||||
+
|
||||
bar_long
|
||||
)
|
||||
+
|
||||
bar_2_long
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `(1111 + 2222)`,
|
||||
out: `(
|
||||
1111
|
||||
+
|
||||
2222
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `(sum_over_time(foo[1m]))`,
|
||||
out: `(
|
||||
sum_over_time(
|
||||
foo[1m]
|
||||
)
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `histogram_quantile(0.9, rate(foo[1m] @ start()))`,
|
||||
out: `histogram_quantile(
|
||||
0.9,
|
||||
rate(
|
||||
foo[1m] @ start()
|
||||
)
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `(label_replace(up{job="api-server",service="a:c"}, "foo", "$1", "service", "(.*):.*"))`,
|
||||
out: `(
|
||||
label_replace(
|
||||
up{job="api-server",service="a:c"},
|
||||
"foo",
|
||||
"$1",
|
||||
"service",
|
||||
"(.*):.*"
|
||||
)
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `(label_replace(label_replace(up{job="api-server",service="a:c"}, "foo", "$1", "service", "(.*):.*"), "foo", "$1", "service", "(.*):.*"))`,
|
||||
out: `(
|
||||
label_replace(
|
||||
label_replace(
|
||||
up{job="api-server",service="a:c"},
|
||||
"foo",
|
||||
"$1",
|
||||
"service",
|
||||
"(.*):.*"
|
||||
),
|
||||
"foo",
|
||||
"$1",
|
||||
"service",
|
||||
"(.*):.*"
|
||||
)
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `(label_replace(label_replace((up{job="api-server",service="a:c"}), "foo", "$1", "service", "(.*):.*"), "foo", "$1", "service", "(.*):.*"))`,
|
||||
out: `(
|
||||
label_replace(
|
||||
label_replace(
|
||||
(
|
||||
up{job="api-server",service="a:c"}
|
||||
),
|
||||
"foo",
|
||||
"$1",
|
||||
"service",
|
||||
"(.*):.*"
|
||||
),
|
||||
"foo",
|
||||
"$1",
|
||||
"service",
|
||||
"(.*):.*"
|
||||
)
|
||||
)`,
|
||||
},
|
||||
// Following queries have been taken from https://monitoring.mixins.dev/
|
||||
{
|
||||
in: `(node_filesystem_avail_bytes{job="node",fstype!=""} / node_filesystem_size_bytes{job="node",fstype!=""} * 100 < 40 and predict_linear(node_filesystem_avail_bytes{job="node",fstype!=""}[6h], 24*60*60) < 0 and node_filesystem_readonly{job="node",fstype!=""} == 0)`,
|
||||
out: `(
|
||||
node_filesystem_avail_bytes{fstype!="",job="node"}
|
||||
/
|
||||
node_filesystem_size_bytes{fstype!="",job="node"}
|
||||
*
|
||||
100
|
||||
<
|
||||
40
|
||||
and
|
||||
predict_linear(
|
||||
node_filesystem_avail_bytes{fstype!="",job="node"}[6h],
|
||||
24 * 60
|
||||
*
|
||||
60
|
||||
)
|
||||
<
|
||||
0
|
||||
and
|
||||
node_filesystem_readonly{fstype!="",job="node"}
|
||||
==
|
||||
0
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `(node_filesystem_avail_bytes{job="node",fstype!=""} / node_filesystem_size_bytes{job="node",fstype!=""} * 100 < 20 and predict_linear(node_filesystem_avail_bytes{job="node",fstype!=""}[6h], 4*60*60) < 0 and node_filesystem_readonly{job="node",fstype!=""} == 0)`,
|
||||
out: `(
|
||||
node_filesystem_avail_bytes{fstype!="",job="node"}
|
||||
/
|
||||
node_filesystem_size_bytes{fstype!="",job="node"}
|
||||
*
|
||||
100
|
||||
<
|
||||
20
|
||||
and
|
||||
predict_linear(
|
||||
node_filesystem_avail_bytes{fstype!="",job="node"}[6h],
|
||||
4 * 60
|
||||
*
|
||||
60
|
||||
)
|
||||
<
|
||||
0
|
||||
and
|
||||
node_filesystem_readonly{fstype!="",job="node"}
|
||||
==
|
||||
0
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `(node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)`,
|
||||
out: ` (
|
||||
node_timex_offset_seconds
|
||||
>
|
||||
0.05
|
||||
and
|
||||
deriv(
|
||||
node_timex_offset_seconds[5m]
|
||||
)
|
||||
>=
|
||||
0
|
||||
)
|
||||
or
|
||||
(
|
||||
node_timex_offset_seconds
|
||||
<
|
||||
-0.05
|
||||
and
|
||||
deriv(
|
||||
node_timex_offset_seconds[5m]
|
||||
)
|
||||
<=
|
||||
0
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `1 - ((node_memory_MemAvailable_bytes{job="node"} or (node_memory_Buffers_bytes{job="node"} + node_memory_Cached_bytes{job="node"} + node_memory_MemFree_bytes{job="node"} + node_memory_Slab_bytes{job="node"}) ) / node_memory_MemTotal_bytes{job="node"})`,
|
||||
out: ` 1
|
||||
-
|
||||
(
|
||||
(
|
||||
node_memory_MemAvailable_bytes{job="node"}
|
||||
or
|
||||
(
|
||||
node_memory_Buffers_bytes{job="node"}
|
||||
+
|
||||
node_memory_Cached_bytes{job="node"}
|
||||
+
|
||||
node_memory_MemFree_bytes{job="node"}
|
||||
+
|
||||
node_memory_Slab_bytes{job="node"}
|
||||
)
|
||||
)
|
||||
/
|
||||
node_memory_MemTotal_bytes{job="node"}
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `min by (job, integration) (rate(alertmanager_notifications_failed_total{job="alertmanager", integration=~".*"}[5m]) / rate(alertmanager_notifications_total{job="alertmanager", integration="~.*"}[5m])) > 0.01`,
|
||||
out: ` min by(job, integration) (
|
||||
rate(
|
||||
alertmanager_notifications_failed_total{integration=~".*",job="alertmanager"}[5m]
|
||||
)
|
||||
/
|
||||
rate(
|
||||
alertmanager_notifications_total{integration="~.*",job="alertmanager"}[5m]
|
||||
)
|
||||
)
|
||||
>
|
||||
0.01`,
|
||||
},
|
||||
{
|
||||
in: `(count by (job) (changes(process_start_time_seconds{job="alertmanager"}[10m]) > 4) / count by (job) (up{job="alertmanager"})) >= 0.5`,
|
||||
out: ` (
|
||||
count by(job) (
|
||||
changes(
|
||||
process_start_time_seconds{job="alertmanager"}[10m]
|
||||
)
|
||||
>
|
||||
4
|
||||
)
|
||||
/
|
||||
count by(job) (
|
||||
up{job="alertmanager"}
|
||||
)
|
||||
)
|
||||
>=
|
||||
0.5`,
|
||||
},
|
||||
}
|
||||
for _, test := range inputs {
|
||||
expr, err := ParseExpr(test.in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.out, Prettify(expr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnaryPretty(t *testing.T) {
|
||||
maxCharactersPerLine = 10
|
||||
inputs := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{
|
||||
in: `-1`,
|
||||
out: `-1`,
|
||||
},
|
||||
{
|
||||
in: `-vector_selector`,
|
||||
out: `-vector_selector`,
|
||||
},
|
||||
{
|
||||
in: `(-vector_selector)`,
|
||||
out: `(
|
||||
-vector_selector
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `-histogram_quantile(0.9,rate(foo[1m]))`,
|
||||
out: `-histogram_quantile(
|
||||
0.9,
|
||||
rate(
|
||||
foo[1m]
|
||||
)
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `-histogram_quantile(0.99, sum by (le) (rate(foo[1m])))`,
|
||||
out: `-histogram_quantile(
|
||||
0.99,
|
||||
sum by(le) (
|
||||
rate(
|
||||
foo[1m]
|
||||
)
|
||||
)
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `-histogram_quantile(0.9, -rate(foo[1m] @ start()))`,
|
||||
out: `-histogram_quantile(
|
||||
0.9,
|
||||
-rate(
|
||||
foo[1m] @ start()
|
||||
)
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `(-histogram_quantile(0.9, -rate(foo[1m] @ start())))`,
|
||||
out: `(
|
||||
-histogram_quantile(
|
||||
0.9,
|
||||
-rate(
|
||||
foo[1m] @ start()
|
||||
)
|
||||
)
|
||||
)`,
|
||||
},
|
||||
}
|
||||
for _, test := range inputs {
|
||||
expr, err := ParseExpr(test.in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.out, Prettify(expr))
|
||||
}
|
||||
}
|
|
@ -62,16 +62,7 @@ func (es Expressions) String() (s string) {
|
|||
}
|
||||
|
||||
func (node *AggregateExpr) String() string {
|
||||
aggrString := node.Op.String()
|
||||
|
||||
if node.Without {
|
||||
aggrString += fmt.Sprintf(" without(%s) ", strings.Join(node.Grouping, ", "))
|
||||
} else {
|
||||
if len(node.Grouping) > 0 {
|
||||
aggrString += fmt.Sprintf(" by(%s) ", strings.Join(node.Grouping, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
aggrString := node.getAggOpStr()
|
||||
aggrString += "("
|
||||
if node.Op.IsAggregatorWithParam() {
|
||||
aggrString += fmt.Sprintf("%s, ", node.Param)
|
||||
|
@ -81,31 +72,48 @@ func (node *AggregateExpr) String() string {
|
|||
return aggrString
|
||||
}
|
||||
|
||||
func (node *AggregateExpr) getAggOpStr() string {
|
||||
aggrString := node.Op.String()
|
||||
|
||||
switch {
|
||||
case node.Without:
|
||||
aggrString += fmt.Sprintf(" without(%s) ", strings.Join(node.Grouping, ", "))
|
||||
case len(node.Grouping) > 0:
|
||||
aggrString += fmt.Sprintf(" by(%s) ", strings.Join(node.Grouping, ", "))
|
||||
}
|
||||
|
||||
return aggrString
|
||||
}
|
||||
|
||||
func (node *BinaryExpr) String() string {
|
||||
returnBool := ""
|
||||
if node.ReturnBool {
|
||||
returnBool = " bool"
|
||||
}
|
||||
|
||||
matching := node.getMatchingStr()
|
||||
return fmt.Sprintf("%s %s%s%s %s", node.LHS, node.Op, returnBool, matching, node.RHS)
|
||||
}
|
||||
|
||||
func (node *BinaryExpr) getMatchingStr() string {
|
||||
matching := ""
|
||||
vm := node.VectorMatching
|
||||
if vm != nil && (len(vm.MatchingLabels) > 0 || vm.On) {
|
||||
vmTag := "ignoring"
|
||||
if vm.On {
|
||||
matching = fmt.Sprintf(" on(%s)", strings.Join(vm.MatchingLabels, ", "))
|
||||
} else {
|
||||
matching = fmt.Sprintf(" ignoring(%s)", strings.Join(vm.MatchingLabels, ", "))
|
||||
vmTag = "on"
|
||||
}
|
||||
matching = fmt.Sprintf(" %s(%s)", vmTag, strings.Join(vm.MatchingLabels, ", "))
|
||||
|
||||
if vm.Card == CardManyToOne || vm.Card == CardOneToMany {
|
||||
matching += " group_"
|
||||
vmCard := "right"
|
||||
if vm.Card == CardManyToOne {
|
||||
matching += "left"
|
||||
} else {
|
||||
matching += "right"
|
||||
vmCard = "left"
|
||||
}
|
||||
matching += fmt.Sprintf("(%s)", strings.Join(vm.Include, ", "))
|
||||
matching += fmt.Sprintf(" group_%s(%s)", vmCard, strings.Join(vm.Include, ", "))
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%s %s%s%s %s", node.LHS, node.Op, returnBool, matching, node.RHS)
|
||||
return matching
|
||||
}
|
||||
|
||||
func (node *Call) String() string {
|
||||
|
@ -144,6 +152,11 @@ func (node *MatrixSelector) String() string {
|
|||
}
|
||||
|
||||
func (node *SubqueryExpr) String() string {
|
||||
return fmt.Sprintf("%s%s", node.Expr.String(), node.getSubqueryTimeSuffix())
|
||||
}
|
||||
|
||||
// getSubqueryTimeSuffix returns the '[<range>:<step>] @ <timestamp> offset <offset>' suffix of the subquery.
|
||||
func (node *SubqueryExpr) getSubqueryTimeSuffix() string {
|
||||
step := ""
|
||||
if node.Step != 0 {
|
||||
step = model.Duration(node.Step).String()
|
||||
|
@ -162,7 +175,7 @@ func (node *SubqueryExpr) String() string {
|
|||
} else if node.StartOrEnd == END {
|
||||
at = " @ end()"
|
||||
}
|
||||
return fmt.Sprintf("%s[%s:%s]%s%s", node.Expr.String(), model.Duration(node.Range), step, at, offset)
|
||||
return fmt.Sprintf("[%s:%s]%s%s", model.Duration(node.Range), step, at, offset)
|
||||
}
|
||||
|
||||
func (node *NumberLiteral) String() string {
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/model"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
|
@ -69,7 +68,7 @@ func (s AlertState) String() string {
|
|||
case StateFiring:
|
||||
return "firing"
|
||||
}
|
||||
panic(errors.Errorf("unknown alert state: %d", s))
|
||||
panic(fmt.Errorf("unknown alert state: %d", s))
|
||||
}
|
||||
|
||||
// Alert is the user-level representation of a single instance of an alerting rule.
|
||||
|
@ -450,7 +449,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
|
|||
|
||||
if limit > 0 && numActivePending > limit {
|
||||
r.active = map[uint64]*Alert{}
|
||||
return nil, errors.Errorf("exceeded limit of %d with %d alerts", limit, numActivePending)
|
||||
return nil, fmt.Errorf("exceeded limit of %d with %d alerts", limit, numActivePending)
|
||||
}
|
||||
|
||||
return vec, nil
|
||||
|
|
|
@ -15,11 +15,11 @@ package rules
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
|
|
@ -15,6 +15,8 @@ package rules
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"sort"
|
||||
|
@ -23,7 +25,6 @@ import (
|
|||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
@ -631,7 +632,8 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
|
|||
|
||||
// Canceled queries are intentional termination of queries. This normally
|
||||
// happens on shutdown and thus we skip logging of any errors here.
|
||||
if _, ok := err.(promql.ErrQueryCanceled); !ok {
|
||||
var eqc promql.ErrQueryCanceled
|
||||
if !errors.As(err, &eqc) {
|
||||
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Evaluating rule failed", "rule", rule, "err", err)
|
||||
}
|
||||
return
|
||||
|
@ -668,12 +670,12 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
|
|||
rule.SetHealth(HealthBad)
|
||||
rule.SetLastError(err)
|
||||
sp.SetStatus(codes.Error, err.Error())
|
||||
|
||||
switch errors.Cause(err) {
|
||||
case storage.ErrOutOfOrderSample:
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
switch {
|
||||
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample):
|
||||
numOutOfOrder++
|
||||
level.Debug(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s)
|
||||
case storage.ErrDuplicateSampleForTimestamp:
|
||||
case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
|
||||
numDuplicates++
|
||||
level.Debug(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s)
|
||||
default:
|
||||
|
@ -695,9 +697,10 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
|
|||
if _, ok := seriesReturned[metric]; !ok {
|
||||
// Series no longer exposed, mark it stale.
|
||||
_, err = app.Append(0, lset, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
|
||||
switch errors.Cause(err) {
|
||||
case nil:
|
||||
case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
switch {
|
||||
case unwrappedErr == nil:
|
||||
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample), errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
|
||||
// Do not count these in logging, as this is expected if series
|
||||
// is exposed from a different rule.
|
||||
default:
|
||||
|
@ -721,9 +724,10 @@ func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) {
|
|||
for _, s := range g.staleSeries {
|
||||
// Rule that produced series no longer configured, mark it stale.
|
||||
_, err := app.Append(0, s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
|
||||
switch errors.Cause(err) {
|
||||
case nil:
|
||||
case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
switch {
|
||||
case unwrappedErr == nil:
|
||||
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample), errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
|
||||
// Do not count these in logging, as this is expected if series
|
||||
// is exposed from a different rule.
|
||||
default:
|
||||
|
@ -1075,7 +1079,7 @@ func (m *Manager) LoadGroups(
|
|||
for _, r := range rg.Rules {
|
||||
expr, err := m.opts.GroupLoader.Parse(r.Expr.Value)
|
||||
if err != nil {
|
||||
return nil, []error{errors.Wrap(err, fn)}
|
||||
return nil, []error{fmt.Errorf("%s: %w", fn, err)}
|
||||
}
|
||||
|
||||
if r.Alert.Value != "" {
|
||||
|
|
|
@ -15,7 +15,9 @@ package scrape
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
|
@ -132,3 +134,17 @@ func (a *collectResultAppender) Rollback() error {
|
|||
}
|
||||
return a.next.Rollback()
|
||||
}
|
||||
|
||||
func (a *collectResultAppender) String() string {
|
||||
var sb strings.Builder
|
||||
for _, s := range a.result {
|
||||
sb.WriteString(fmt.Sprintf("committed: %s %f %d\n", s.metric, s.v, s.t))
|
||||
}
|
||||
for _, s := range a.pendingResult {
|
||||
sb.WriteString(fmt.Sprintf("pending: %s %f %d\n", s.metric, s.v, s.t))
|
||||
}
|
||||
for _, s := range a.rolledbackResult {
|
||||
sb.WriteString(fmt.Sprintf("rolledback: %s %f %d\n", s.metric, s.v, s.t))
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
|
|
@ -405,8 +405,10 @@ scrape_configs:
|
|||
return noopLoop()
|
||||
}
|
||||
sp := &scrapePool{
|
||||
appendable: &nopAppendable{},
|
||||
activeTargets: map[uint64]*Target{},
|
||||
appendable: &nopAppendable{},
|
||||
activeTargets: map[uint64]*Target{
|
||||
1: {},
|
||||
},
|
||||
loops: map[uint64]loop{
|
||||
1: noopLoop(),
|
||||
},
|
||||
|
|
|
@ -427,8 +427,9 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
cache = newScrapeCache()
|
||||
}
|
||||
|
||||
t := sp.activeTargets[fp]
|
||||
interval, timeout, err := t.intervalAndTimeout(interval, timeout)
|
||||
var (
|
||||
t = sp.activeTargets[fp]
|
||||
s = &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit}
|
||||
newLoop = sp.newLoop(scrapeLoopOptions{
|
||||
target: t,
|
||||
|
@ -443,6 +444,9 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
timeout: timeout,
|
||||
})
|
||||
)
|
||||
if err != nil {
|
||||
newLoop.setForcedError(err)
|
||||
}
|
||||
wg.Add(1)
|
||||
|
||||
go func(oldLoop, newLoop loop) {
|
||||
|
|
|
@ -326,6 +326,40 @@ func TestScrapePoolReload(t *testing.T) {
|
|||
require.Equal(t, numTargets, len(sp.loops), "Unexpected number of stopped loops after reload")
|
||||
}
|
||||
|
||||
func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) {
|
||||
reloadCfg := &config.ScrapeConfig{
|
||||
ScrapeInterval: model.Duration(3 * time.Second),
|
||||
ScrapeTimeout: model.Duration(2 * time.Second),
|
||||
}
|
||||
newLoop := func(opts scrapeLoopOptions) loop {
|
||||
l := &testLoop{interval: time.Duration(opts.interval), timeout: time.Duration(opts.timeout)}
|
||||
l.startFunc = func(interval, timeout time.Duration, errc chan<- error) {
|
||||
require.Equal(t, 5*time.Second, interval, "Unexpected scrape interval")
|
||||
require.Equal(t, 3*time.Second, timeout, "Unexpected scrape timeout")
|
||||
}
|
||||
return l
|
||||
}
|
||||
sp := &scrapePool{
|
||||
appendable: &nopAppendable{},
|
||||
activeTargets: map[uint64]*Target{
|
||||
1: {
|
||||
labels: labels.FromStrings(model.ScrapeIntervalLabel, "5s", model.ScrapeTimeoutLabel, "3s"),
|
||||
},
|
||||
},
|
||||
loops: map[uint64]loop{
|
||||
1: noopLoop(),
|
||||
},
|
||||
newLoop: newLoop,
|
||||
logger: nil,
|
||||
client: http.DefaultClient,
|
||||
}
|
||||
|
||||
err := sp.reload(reloadCfg)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to reload configuration: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScrapePoolTargetLimit(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
// On starting to run, new loops created on reload check whether their preceding
|
||||
|
@ -1092,7 +1126,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
|
|||
|
||||
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
|
||||
// each scrape successful or not.
|
||||
require.Equal(t, 27, len(appender.result), "Appended samples not as expected")
|
||||
require.Equal(t, 27, len(appender.result), "Appended samples not as expected:\n%s", appender)
|
||||
require.Equal(t, 42.0, appender.result[0].v, "Appended first sample not as expected")
|
||||
require.True(t, value.IsStaleNaN(appender.result[6].v),
|
||||
"Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.result[6].v))
|
||||
|
@ -1156,7 +1190,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
|
|||
|
||||
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
|
||||
// each scrape successful or not.
|
||||
require.Equal(t, 17, len(appender.result), "Appended samples not as expected")
|
||||
require.Equal(t, 17, len(appender.result), "Appended samples not as expected:\n%s", appender)
|
||||
require.Equal(t, 42.0, appender.result[0].v, "Appended first sample not as expected")
|
||||
require.True(t, value.IsStaleNaN(appender.result[6].v),
|
||||
"Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.result[6].v))
|
||||
|
@ -1239,7 +1273,7 @@ func TestScrapeLoopCache(t *testing.T) {
|
|||
|
||||
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
|
||||
// each scrape successful or not.
|
||||
require.Equal(t, 26, len(appender.result), "Appended samples not as expected")
|
||||
require.Equal(t, 26, len(appender.result), "Appended samples not as expected:\n%s", appender)
|
||||
}
|
||||
|
||||
func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
|
||||
|
@ -1609,7 +1643,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
|
|||
v: 1,
|
||||
},
|
||||
}
|
||||
require.Equal(t, want, resApp.rolledbackResult, "Appended samples not as expected")
|
||||
require.Equal(t, want, resApp.rolledbackResult, "Appended samples not as expected:\n%s", appender)
|
||||
|
||||
now = time.Now()
|
||||
slApp = sl.appender(context.Background())
|
||||
|
@ -1674,7 +1708,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
|
|||
v: 2,
|
||||
},
|
||||
}
|
||||
require.Equal(t, want, capp.result, "Appended samples not as expected")
|
||||
require.Equal(t, want, capp.result, "Appended samples not as expected:\n%s", appender)
|
||||
}
|
||||
|
||||
func TestScrapeLoopAppendStaleness(t *testing.T) {
|
||||
|
@ -1726,7 +1760,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) {
|
|||
v: 42,
|
||||
},
|
||||
}
|
||||
require.Equal(t, want, app.result, "Appended samples not as expected")
|
||||
require.Equal(t, want, app.result, "Appended samples not as expected:\n%s", appender)
|
||||
}
|
||||
|
||||
func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
|
||||
|
@ -1767,7 +1801,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
|
|||
v: 1,
|
||||
},
|
||||
}
|
||||
require.Equal(t, want, app.result, "Appended samples not as expected")
|
||||
require.Equal(t, want, app.result, "Appended samples not as expected:\n%s", appender)
|
||||
}
|
||||
|
||||
func TestScrapeLoopAppendExemplar(t *testing.T) {
|
||||
|
@ -2075,7 +2109,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
|
|||
v: 1,
|
||||
},
|
||||
}
|
||||
require.Equal(t, want, app.result, "Appended samples not as expected")
|
||||
require.Equal(t, want, app.result, "Appended samples not as expected:\n%s", appender)
|
||||
require.Equal(t, 4, total)
|
||||
require.Equal(t, 4, added)
|
||||
require.Equal(t, 1, seriesAdded)
|
||||
|
@ -2377,7 +2411,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
|
|||
v: 1,
|
||||
},
|
||||
}
|
||||
require.Equal(t, want, capp.result, "Appended samples not as expected")
|
||||
require.Equal(t, want, capp.result, "Appended samples not as expected:\n%s", appender)
|
||||
}
|
||||
|
||||
func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
|
||||
|
@ -2418,7 +2452,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
|
|||
v: 1,
|
||||
},
|
||||
}
|
||||
require.Equal(t, want, capp.result, "Appended samples not as expected")
|
||||
require.Equal(t, want, capp.result, "Appended samples not as expected:\n%s", appender)
|
||||
}
|
||||
|
||||
func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
|
||||
|
|
104
scripts/ui_release.sh
Executable file
104
scripts/ui_release.sh
Executable file
|
@ -0,0 +1,104 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
## /!\ This file must be used at the root of the prometheus project
|
||||
## This script provides utils method to help to release and verify the readiness of each libs under the folder ui/
|
||||
|
||||
set -e
|
||||
|
||||
current=$(pwd)
|
||||
root_ui_folder=${current}/web/ui
|
||||
|
||||
cd "${root_ui_folder}"
|
||||
|
||||
files=("../../LICENSE" "../../CHANGELOG.md")
|
||||
workspaces=$(jq -r '.workspaces[]' < package.json)
|
||||
|
||||
function copy() {
|
||||
for file in "${files[@]}"; do
|
||||
for workspace in ${workspaces}; do
|
||||
if [ -f "${file}" ]; then
|
||||
cp "${file}" "${workspace}"/"$(basename "${file}")"
|
||||
fi
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
function publish() {
|
||||
dry_run="${1}"
|
||||
cmd="npm publish --access public"
|
||||
if [[ "${dry_run}" == "dry-run" ]]; then
|
||||
cmd+=" --dry-run"
|
||||
fi
|
||||
for workspace in ${workspaces}; do
|
||||
# package "app" is private so we shouldn't try to publish it.
|
||||
if [[ "${workspace}" != "react-app" ]]; then
|
||||
cd "${workspace}"
|
||||
eval "${cmd}"
|
||||
cd "${root_ui_folder}"
|
||||
fi
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
function checkPackage() {
|
||||
version=${1}
|
||||
if [[ "${version}" == v* ]]; then
|
||||
version="${version:1}"
|
||||
fi
|
||||
for workspace in ${workspaces}; do
|
||||
cd "${workspace}"
|
||||
package_version=$(npm run env | grep npm_package_version | cut -d= -f2-)
|
||||
if [ "${version}" != "${package_version}" ]; then
|
||||
echo "version of ${workspace} is not the correct one"
|
||||
echo "expected one: ${version}"
|
||||
echo "current one: ${package_version}"
|
||||
echo "please use ./ui_release --bump-version ${version}"
|
||||
exit 1
|
||||
fi
|
||||
cd "${root_ui_folder}"
|
||||
done
|
||||
}
|
||||
|
||||
function clean() {
|
||||
for file in "${files[@]}"; do
|
||||
for workspace in ${workspaces}; do
|
||||
f="${workspace}"/"$(basename "${file}")"
|
||||
if [ -f "${f}" ]; then
|
||||
rm "${f}"
|
||||
fi
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
function bumpVersion() {
|
||||
version="${1}"
|
||||
if [[ "${version}" == v* ]]; then
|
||||
version="${version:1}"
|
||||
fi
|
||||
# increase the version on all packages
|
||||
npm version "${version}" --workspaces
|
||||
# upgrade the @prometheus-io/* dependencies on all packages
|
||||
for workspace in ${workspaces}; do
|
||||
sed -E -i "s|(\"@prometheus-io/.+\": )\".+\"|\1\"\^${version}\"|" "${workspace}"/package.json
|
||||
done
|
||||
}
|
||||
|
||||
if [[ "$1" == "--copy" ]]; then
|
||||
copy
|
||||
fi
|
||||
|
||||
if [[ $1 == "--publish" ]]; then
|
||||
publish "${@:2}"
|
||||
fi
|
||||
|
||||
if [[ $1 == "--check-package" ]]; then
|
||||
checkPackage "${@:2}"
|
||||
fi
|
||||
|
||||
if [[ $1 == "--bump-version" ]]; then
|
||||
bumpVersion "${@:2}"
|
||||
fi
|
||||
|
||||
if [[ $1 == "--clean" ]]; then
|
||||
clean
|
||||
fi
|
|
@ -16,12 +16,11 @@ package storage
|
|||
import (
|
||||
"bytes"
|
||||
"container/heap"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
|
@ -160,7 +159,7 @@ func (l labelGenericQueriers) SplitByHalf() (labelGenericQueriers, labelGenericQ
|
|||
func (q *mergeGenericQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, Warnings, error) {
|
||||
res, ws, err := q.lvals(q.queriers, name, matchers...)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "LabelValues() from merge generic querier for label %s", name)
|
||||
return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err)
|
||||
}
|
||||
return res, ws, nil
|
||||
}
|
||||
|
@ -228,7 +227,7 @@ func (q *mergeGenericQuerier) LabelNames(matchers ...*labels.Matcher) ([]string,
|
|||
warnings = append(warnings, wrn...)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "LabelNames() from merge generic querier")
|
||||
return nil, nil, fmt.Errorf("LabelNames() from merge generic querier: %w", err)
|
||||
}
|
||||
for _, name := range names {
|
||||
labelNamesMap[name] = struct{}{}
|
||||
|
|
|
@ -14,13 +14,13 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
|
|
@ -16,13 +16,14 @@ package remote
|
|||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DefaultChunkedReadLimit is the default value for the maximum size of the protobuf frame client allows.
|
||||
|
@ -119,7 +120,7 @@ func (r *ChunkedReader) Next() ([]byte, error) {
|
|||
}
|
||||
|
||||
if size > r.sizeLimit {
|
||||
return nil, errors.Errorf("chunkedReader: message size exceeded the limit %v bytes; got: %v bytes", r.sizeLimit, size)
|
||||
return nil, fmt.Errorf("chunkedReader: message size exceeded the limit %v bytes; got: %v bytes", r.sizeLimit, size)
|
||||
}
|
||||
|
||||
if cap(r.data) < int(size) {
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -222,7 +221,7 @@ func (c *Client) Store(ctx context.Context, req []byte) error {
|
|||
if scanner.Scan() {
|
||||
line = scanner.Text()
|
||||
}
|
||||
err = errors.Errorf("server returned HTTP status %s: %s", httpResp.Status, line)
|
||||
err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line)
|
||||
}
|
||||
if httpResp.StatusCode/100 == 5 {
|
||||
return RecoverableError{err, defaultBackoff}
|
||||
|
@ -273,13 +272,13 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
|
|||
}
|
||||
data, err := proto.Marshal(req)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to marshal read request")
|
||||
return nil, fmt.Errorf("unable to marshal read request: %w", err)
|
||||
}
|
||||
|
||||
compressed := snappy.Encode(nil, data)
|
||||
httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(compressed))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to create request")
|
||||
return nil, fmt.Errorf("unable to create request: %w", err)
|
||||
}
|
||||
httpReq.Header.Add("Content-Encoding", "snappy")
|
||||
httpReq.Header.Add("Accept-Encoding", "snappy")
|
||||
|
@ -296,7 +295,7 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
|
|||
start := time.Now()
|
||||
httpResp, err := c.Client.Do(httpReq.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error sending request")
|
||||
return nil, fmt.Errorf("error sending request: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
io.Copy(io.Discard, httpResp.Body)
|
||||
|
@ -307,26 +306,26 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
|
|||
|
||||
compressed, err = io.ReadAll(httpResp.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("error reading response. HTTP status code: %s", httpResp.Status))
|
||||
return nil, fmt.Errorf("error reading response. HTTP status code: %s: %w", httpResp.Status, err)
|
||||
}
|
||||
|
||||
if httpResp.StatusCode/100 != 2 {
|
||||
return nil, errors.Errorf("remote server %s returned HTTP status %s: %s", c.url.String(), httpResp.Status, strings.TrimSpace(string(compressed)))
|
||||
return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.url.String(), httpResp.Status, strings.TrimSpace(string(compressed)))
|
||||
}
|
||||
|
||||
uncompressed, err := snappy.Decode(nil, compressed)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error reading response")
|
||||
return nil, fmt.Errorf("error reading response: %w", err)
|
||||
}
|
||||
|
||||
var resp prompb.ReadResponse
|
||||
err = proto.Unmarshal(uncompressed, &resp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal response body")
|
||||
return nil, fmt.Errorf("unable to unmarshal response body: %w", err)
|
||||
}
|
||||
|
||||
if len(resp.Results) != len(req.Queries) {
|
||||
return nil, errors.Errorf("responses: want %d, got %d", len(req.Queries), len(resp.Results))
|
||||
return nil, fmt.Errorf("responses: want %d, got %d", len(req.Queries), len(resp.Results))
|
||||
}
|
||||
|
||||
return resp.Results[0], nil
|
||||
|
|
|
@ -15,6 +15,7 @@ package remote
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
|
@ -22,7 +23,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
package remote
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -22,7 +23,6 @@ import (
|
|||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
|
@ -182,7 +182,7 @@ func NegotiateResponseType(accepted []prompb.ReadRequest_ResponseType) (prompb.R
|
|||
return resType, nil
|
||||
}
|
||||
}
|
||||
return 0, errors.Errorf("server does not support any of the requested response types: %v; supported: %v", accepted, supported)
|
||||
return 0, fmt.Errorf("server does not support any of the requested response types: %v; supported: %v", accepted, supported)
|
||||
}
|
||||
|
||||
// StreamChunkedReadResponses iterates over series, builds chunks and streams those to the caller.
|
||||
|
@ -216,7 +216,7 @@ func StreamChunkedReadResponses(
|
|||
chk := iter.At()
|
||||
|
||||
if chk.Chunk == nil {
|
||||
return ss.Warnings(), errors.Errorf("StreamChunkedReadResponses: found not populated chunk returned by SeriesSet at ref: %v", chk.Ref)
|
||||
return ss.Warnings(), fmt.Errorf("StreamChunkedReadResponses: found not populated chunk returned by SeriesSet at ref: %v", chk.Ref)
|
||||
}
|
||||
|
||||
// Cut the chunk.
|
||||
|
@ -241,11 +241,11 @@ func StreamChunkedReadResponses(
|
|||
QueryIndex: queryIndex,
|
||||
})
|
||||
if err != nil {
|
||||
return ss.Warnings(), errors.Wrap(err, "marshal ChunkedReadResponse")
|
||||
return ss.Warnings(), fmt.Errorf("marshal ChunkedReadResponse: %w", err)
|
||||
}
|
||||
|
||||
if _, err := stream.Write(b); err != nil {
|
||||
return ss.Warnings(), errors.Wrap(err, "write to stream")
|
||||
return ss.Warnings(), fmt.Errorf("write to stream: %w", err)
|
||||
}
|
||||
chks = chks[:0]
|
||||
}
|
||||
|
@ -425,16 +425,16 @@ func (c *concreteSeriesIterator) Err() error {
|
|||
func validateLabelsAndMetricName(ls labels.Labels) error {
|
||||
for i, l := range ls {
|
||||
if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) {
|
||||
return errors.Errorf("invalid metric name: %v", l.Value)
|
||||
return fmt.Errorf("invalid metric name: %v", l.Value)
|
||||
}
|
||||
if !model.LabelName(l.Name).IsValid() {
|
||||
return errors.Errorf("invalid label name: %v", l.Name)
|
||||
return fmt.Errorf("invalid label name: %v", l.Name)
|
||||
}
|
||||
if !model.LabelValue(l.Value).IsValid() {
|
||||
return errors.Errorf("invalid label value: %v", l.Value)
|
||||
return fmt.Errorf("invalid label value: %v", l.Value)
|
||||
}
|
||||
if i > 0 && l.Name == ls[i-1].Name {
|
||||
return errors.Errorf("duplicate label with name: %v", l.Name)
|
||||
return fmt.Errorf("duplicate label with name: %v", l.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -15,11 +15,11 @@ package remote
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/scrape"
|
||||
|
|
|
@ -15,10 +15,10 @@ package remote
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sort"
|
||||
|
@ -32,7 +31,6 @@ import (
|
|||
"github.com/golang/snappy"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
common_config "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
|
@ -84,20 +82,15 @@ func TestSampleDelivery(t *testing.T) {
|
|||
queueConfig.BatchSendDeadline = model.Duration(100 * time.Millisecond)
|
||||
queueConfig.MaxShards = 1
|
||||
|
||||
writeConfig := config.DefaultRemoteWriteConfig
|
||||
// We need to set URL's so that metric creation doesn't panic.
|
||||
writeConfig.URL = &common_config.URL{
|
||||
URL: &url.URL{
|
||||
Host: "http://test-storage.com",
|
||||
},
|
||||
}
|
||||
writeConfig := baseRemoteWriteConfig("http://test-storage.com")
|
||||
writeConfig.QueueConfig = queueConfig
|
||||
writeConfig.SendExemplars = true
|
||||
|
||||
conf := &config.Config{
|
||||
GlobalConfig: config.DefaultGlobalConfig,
|
||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
||||
&writeConfig,
|
||||
writeConfig,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -15,8 +15,8 @@ package remote
|
|||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
|
@ -164,12 +164,12 @@ func (q *querier) Select(sortSeries bool, hints *storage.SelectHints, matchers .
|
|||
m, added := q.addExternalLabels(matchers)
|
||||
query, err := ToQuery(q.mint, q.maxt, m, hints)
|
||||
if err != nil {
|
||||
return storage.ErrSeriesSet(errors.Wrap(err, "toQuery"))
|
||||
return storage.ErrSeriesSet(fmt.Errorf("toQuery: %w", err))
|
||||
}
|
||||
|
||||
res, err := q.client.Read(q.ctx, query)
|
||||
if err != nil {
|
||||
return storage.ErrSeriesSet(errors.Wrap(err, "remote_read"))
|
||||
return storage.ErrSeriesSet(fmt.Errorf("remote_read: %w", err))
|
||||
}
|
||||
return newSeriesSetFilter(FromQueryResult(sortSeries, res), added)
|
||||
}
|
||||
|
|
|
@ -15,11 +15,11 @@ package remote
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -209,7 +209,7 @@ type mockedRemoteClient struct {
|
|||
|
||||
func (c *mockedRemoteClient) Read(_ context.Context, query *prompb.Query) (*prompb.QueryResult, error) {
|
||||
if c.got != nil {
|
||||
return nil, errors.Errorf("expected only one call to remote client got: %v", query)
|
||||
return nil, fmt.Errorf("expected only one call to remote client got: %v", query)
|
||||
}
|
||||
c.got = query
|
||||
|
||||
|
|
|
@ -31,21 +31,11 @@ func TestStorageLifecycle(t *testing.T) {
|
|||
conf := &config.Config{
|
||||
GlobalConfig: config.DefaultGlobalConfig,
|
||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
||||
&config.DefaultRemoteWriteConfig,
|
||||
// We need to set URL's so that metric creation doesn't panic.
|
||||
baseRemoteWriteConfig("http://test-storage.com"),
|
||||
},
|
||||
RemoteReadConfigs: []*config.RemoteReadConfig{
|
||||
&config.DefaultRemoteReadConfig,
|
||||
},
|
||||
}
|
||||
// We need to set URL's so that metric creation doesn't panic.
|
||||
conf.RemoteWriteConfigs[0].URL = &common_config.URL{
|
||||
URL: &url.URL{
|
||||
Host: "http://test-storage.com",
|
||||
},
|
||||
}
|
||||
conf.RemoteReadConfigs[0].URL = &common_config.URL{
|
||||
URL: &url.URL{
|
||||
Host: "http://test-storage.com",
|
||||
baseRemoteReadConfig("http://test-storage.com"),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -73,7 +63,7 @@ func TestUpdateRemoteReadConfigs(t *testing.T) {
|
|||
require.Equal(t, 0, len(s.queryables))
|
||||
|
||||
conf.RemoteReadConfigs = []*config.RemoteReadConfig{
|
||||
&config.DefaultRemoteReadConfig,
|
||||
baseRemoteReadConfig("http://test-storage.com"),
|
||||
}
|
||||
require.NoError(t, s.ApplyConfig(conf))
|
||||
require.Equal(t, 1, len(s.queryables))
|
||||
|
@ -96,7 +86,7 @@ func TestFilterExternalLabels(t *testing.T) {
|
|||
require.Equal(t, 0, len(s.queryables))
|
||||
|
||||
conf.RemoteReadConfigs = []*config.RemoteReadConfig{
|
||||
&config.DefaultRemoteReadConfig,
|
||||
baseRemoteReadConfig("http://test-storage.com"),
|
||||
}
|
||||
|
||||
require.NoError(t, s.ApplyConfig(conf))
|
||||
|
@ -121,7 +111,7 @@ func TestIgnoreExternalLabels(t *testing.T) {
|
|||
require.Equal(t, 0, len(s.queryables))
|
||||
|
||||
conf.RemoteReadConfigs = []*config.RemoteReadConfig{
|
||||
&config.DefaultRemoteReadConfig,
|
||||
baseRemoteReadConfig("http://test-storage.com"),
|
||||
}
|
||||
|
||||
conf.RemoteReadConfigs[0].FilterExternalLabels = false
|
||||
|
@ -133,3 +123,27 @@ func TestIgnoreExternalLabels(t *testing.T) {
|
|||
err := s.Close()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// baseRemoteWriteConfig copy values from global Default Write config
|
||||
// to avoid change global state and cross impact test execution
|
||||
func baseRemoteWriteConfig(host string) *config.RemoteWriteConfig {
|
||||
cfg := config.DefaultRemoteWriteConfig
|
||||
cfg.URL = &common_config.URL{
|
||||
URL: &url.URL{
|
||||
Host: host,
|
||||
},
|
||||
}
|
||||
return &cfg
|
||||
}
|
||||
|
||||
// baseRemoteReadConfig copy values from global Default Read config
|
||||
// to avoid change global state and cross impact test execution
|
||||
func baseRemoteReadConfig(host string) *config.RemoteReadConfig {
|
||||
cfg := config.DefaultRemoteReadConfig
|
||||
cfg.URL = &common_config.URL{
|
||||
URL: &url.URL{
|
||||
Host: host,
|
||||
},
|
||||
}
|
||||
return &cfg
|
||||
}
|
||||
|
|
|
@ -15,12 +15,12 @@ package remote
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
|
@ -67,10 +67,11 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
// checkAppendExemplarError modifies the AppendExamplar's returned error based on the error cause.
|
||||
func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, outOfOrderErrs *int) error {
|
||||
switch errors.Cause(err) {
|
||||
case storage.ErrNotFound:
|
||||
unwrapedErr := errors.Unwrap(err)
|
||||
switch {
|
||||
case errors.Is(unwrapedErr, storage.ErrNotFound):
|
||||
return storage.ErrNotFound
|
||||
case storage.ErrOutOfOrderExemplar:
|
||||
case errors.Is(unwrapedErr, storage.ErrOutOfOrderExemplar):
|
||||
*outOfOrderErrs++
|
||||
level.Debug(h.logger).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e))
|
||||
return nil
|
||||
|
@ -97,8 +98,8 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
|||
for _, s := range ts.Samples {
|
||||
_, err = app.Append(0, labels, s.Timestamp, s.Value)
|
||||
if err != nil {
|
||||
switch errors.Cause(err) {
|
||||
case storage.ErrOutOfOrderSample, storage.ErrOutOfBounds, storage.ErrDuplicateSampleForTimestamp:
|
||||
unwrapedErr := errors.Unwrap(err)
|
||||
if errors.Is(unwrapedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrapedErr, storage.ErrOutOfBounds) || errors.Is(unwrapedErr, storage.ErrDuplicateSampleForTimestamp) {
|
||||
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
|
||||
}
|
||||
return err
|
||||
|
|
|
@ -202,7 +202,7 @@ func TestWriteStorageLifecycle(t *testing.T) {
|
|||
conf := &config.Config{
|
||||
GlobalConfig: config.DefaultGlobalConfig,
|
||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
||||
&config.DefaultRemoteWriteConfig,
|
||||
baseRemoteWriteConfig("http://test-storage.com"),
|
||||
},
|
||||
}
|
||||
require.NoError(t, s.ApplyConfig(conf))
|
||||
|
@ -249,18 +249,7 @@ func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
|
|||
conf := &config.Config{
|
||||
GlobalConfig: config.GlobalConfig{},
|
||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
||||
{
|
||||
RemoteTimeout: config.DefaultRemoteWriteConfig.RemoteTimeout,
|
||||
QueueConfig: config.DefaultRemoteWriteConfig.QueueConfig,
|
||||
MetadataConfig: config.DefaultRemoteWriteConfig.MetadataConfig,
|
||||
HTTPClientConfig: config.DefaultRemoteWriteConfig.HTTPClientConfig,
|
||||
// We need to set URL's so that metric creation doesn't panic.
|
||||
URL: &common_config.URL{
|
||||
URL: &url.URL{
|
||||
Host: "http://test-storage.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
baseRemoteWriteConfig("http://test-storage.com"),
|
||||
},
|
||||
}
|
||||
hash, err := toHash(conf.RemoteWriteConfigs[0])
|
||||
|
|
|
@ -16,12 +16,25 @@ package chunks
|
|||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
)
|
||||
|
||||
const (
|
||||
// Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkrefMap to shrink it again.
|
||||
chunkRefMapShrinkThreshold = 1000
|
||||
|
||||
// Minimum interval between shrinking of chunkWriteQueue.chunkRefMap.
|
||||
chunkRefMapMinShrinkInterval = 10 * time.Minute
|
||||
|
||||
// Maximum size of segment used by job queue (number of elements). With chunkWriteJob being 64 bytes,
|
||||
// this will use ~512 KiB for empty queue.
|
||||
maxChunkQueueSegmentSize = 8192
|
||||
)
|
||||
|
||||
type chunkWriteJob struct {
|
||||
cutFile bool
|
||||
seriesRef HeadSeriesRef
|
||||
|
@ -36,23 +49,30 @@ type chunkWriteJob struct {
|
|||
// Chunks that shall be written get added to the queue, which is consumed asynchronously.
|
||||
// Adding jobs to the queue is non-blocking as long as the queue isn't full.
|
||||
type chunkWriteQueue struct {
|
||||
jobs chan chunkWriteJob
|
||||
jobs *writeJobQueue
|
||||
|
||||
chunkRefMapMtx sync.RWMutex
|
||||
chunkRefMap map[ChunkDiskMapperRef]chunkenc.Chunk
|
||||
chunkRefMapMtx sync.RWMutex
|
||||
chunkRefMap map[ChunkDiskMapperRef]chunkenc.Chunk
|
||||
chunkRefMapPeakSize int // Largest size that chunkRefMap has grown to since the last time we shrank it.
|
||||
chunkRefMapLastShrink time.Time // When the chunkRefMap has been shrunk the last time.
|
||||
|
||||
isRunningMtx sync.Mutex // Protects the isRunning property.
|
||||
isRunning bool // Used to prevent that new jobs get added to the queue when the chan is already closed.
|
||||
// isRunningMtx serves two purposes:
|
||||
// 1. It protects isRunning field.
|
||||
// 2. It serializes adding of jobs to the chunkRefMap in addJob() method. If jobs channel is full then addJob() will block
|
||||
// while holding this mutex, which guarantees that chunkRefMap won't ever grow beyond the queue size + 1.
|
||||
isRunningMtx sync.Mutex
|
||||
isRunning bool // Used to prevent that new jobs get added to the queue when the chan is already closed.
|
||||
|
||||
workerWg sync.WaitGroup
|
||||
|
||||
writeChunk writeChunkF
|
||||
|
||||
// Keeping three separate counters instead of only a single CounterVec to improve the performance of the critical
|
||||
// Keeping separate counters instead of only a single CounterVec to improve the performance of the critical
|
||||
// addJob() method which otherwise would need to perform a WithLabelValues call on the CounterVec.
|
||||
adds prometheus.Counter
|
||||
gets prometheus.Counter
|
||||
completed prometheus.Counter
|
||||
shrink prometheus.Counter
|
||||
}
|
||||
|
||||
// writeChunkF is a function which writes chunks, it is dynamic to allow mocking in tests.
|
||||
|
@ -67,14 +87,21 @@ func newChunkWriteQueue(reg prometheus.Registerer, size int, writeChunk writeChu
|
|||
[]string{"operation"},
|
||||
)
|
||||
|
||||
segmentSize := size
|
||||
if segmentSize > maxChunkQueueSegmentSize {
|
||||
segmentSize = maxChunkQueueSegmentSize
|
||||
}
|
||||
|
||||
q := &chunkWriteQueue{
|
||||
jobs: make(chan chunkWriteJob, size),
|
||||
chunkRefMap: make(map[ChunkDiskMapperRef]chunkenc.Chunk, size),
|
||||
writeChunk: writeChunk,
|
||||
jobs: newWriteJobQueue(size, segmentSize),
|
||||
chunkRefMap: make(map[ChunkDiskMapperRef]chunkenc.Chunk),
|
||||
chunkRefMapLastShrink: time.Now(),
|
||||
writeChunk: writeChunk,
|
||||
|
||||
adds: counters.WithLabelValues("add"),
|
||||
gets: counters.WithLabelValues("get"),
|
||||
completed: counters.WithLabelValues("complete"),
|
||||
shrink: counters.WithLabelValues("shrink"),
|
||||
}
|
||||
|
||||
if reg != nil {
|
||||
|
@ -90,7 +117,12 @@ func (c *chunkWriteQueue) start() {
|
|||
go func() {
|
||||
defer c.workerWg.Done()
|
||||
|
||||
for job := range c.jobs {
|
||||
for {
|
||||
job, ok := c.jobs.pop()
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
c.processJob(job)
|
||||
}
|
||||
}()
|
||||
|
@ -112,6 +144,42 @@ func (c *chunkWriteQueue) processJob(job chunkWriteJob) {
|
|||
delete(c.chunkRefMap, job.ref)
|
||||
|
||||
c.completed.Inc()
|
||||
|
||||
c.shrinkChunkRefMap()
|
||||
}
|
||||
|
||||
// shrinkChunkRefMap checks whether the conditions to shrink the chunkRefMap are met,
|
||||
// if so chunkRefMap is reinitialized. The chunkRefMapMtx must be held when calling this method.
|
||||
//
|
||||
// We do this because Go runtime doesn't release internal memory used by map after map has been emptied.
|
||||
// To achieve that we create new map instead and throw the old one away.
|
||||
func (c *chunkWriteQueue) shrinkChunkRefMap() {
|
||||
if len(c.chunkRefMap) > 0 {
|
||||
// Can't shrink it while there is data in it.
|
||||
return
|
||||
}
|
||||
|
||||
if c.chunkRefMapPeakSize < chunkRefMapShrinkThreshold {
|
||||
// Not shrinking it because it has not grown to the minimum threshold yet.
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
if now.Sub(c.chunkRefMapLastShrink) < chunkRefMapMinShrinkInterval {
|
||||
// Not shrinking it because the minimum duration between shrink-events has not passed yet.
|
||||
return
|
||||
}
|
||||
|
||||
// Re-initialize the chunk ref map to half of the peak size that it has grown to since the last re-init event.
|
||||
// We are trying to hit the sweet spot in the trade-off between initializing it to a very small size
|
||||
// potentially resulting in many allocations to re-grow it, and initializing it to a large size potentially
|
||||
// resulting in unused allocated memory.
|
||||
c.chunkRefMap = make(map[ChunkDiskMapperRef]chunkenc.Chunk, c.chunkRefMapPeakSize/2)
|
||||
|
||||
c.chunkRefMapPeakSize = 0
|
||||
c.chunkRefMapLastShrink = now
|
||||
c.shrink.Inc()
|
||||
}
|
||||
|
||||
func (c *chunkWriteQueue) addJob(job chunkWriteJob) (err error) {
|
||||
|
@ -125,14 +193,25 @@ func (c *chunkWriteQueue) addJob(job chunkWriteJob) (err error) {
|
|||
defer c.isRunningMtx.Unlock()
|
||||
|
||||
if !c.isRunning {
|
||||
return errors.New("queue is not started")
|
||||
return errors.New("queue is not running")
|
||||
}
|
||||
|
||||
c.chunkRefMapMtx.Lock()
|
||||
c.chunkRefMap[job.ref] = job.chk
|
||||
|
||||
// Keep track of the peak usage of c.chunkRefMap.
|
||||
if len(c.chunkRefMap) > c.chunkRefMapPeakSize {
|
||||
c.chunkRefMapPeakSize = len(c.chunkRefMap)
|
||||
}
|
||||
c.chunkRefMapMtx.Unlock()
|
||||
|
||||
c.jobs <- job
|
||||
if ok := c.jobs.push(job); !ok {
|
||||
c.chunkRefMapMtx.Lock()
|
||||
delete(c.chunkRefMap, job.ref)
|
||||
c.chunkRefMapMtx.Unlock()
|
||||
|
||||
return errors.New("queue is closed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -159,7 +238,7 @@ func (c *chunkWriteQueue) stop() {
|
|||
|
||||
c.isRunning = false
|
||||
|
||||
close(c.jobs)
|
||||
c.jobs.close()
|
||||
|
||||
c.workerWg.Wait()
|
||||
}
|
||||
|
@ -171,7 +250,7 @@ func (c *chunkWriteQueue) queueIsEmpty() bool {
|
|||
func (c *chunkWriteQueue) queueIsFull() bool {
|
||||
// When the queue is full and blocked on the writer the chunkRefMap has one more job than the cap of the jobCh
|
||||
// because one job is currently being processed and blocked in the writer.
|
||||
return c.queueSize() == cap(c.jobs)+1
|
||||
return c.queueSize() == c.jobs.maxSize+1
|
||||
}
|
||||
|
||||
func (c *chunkWriteQueue) queueSize() int {
|
||||
|
|
141
tsdb/chunks/queue.go
Normal file
141
tsdb/chunks/queue.go
Normal file
|
@ -0,0 +1,141 @@
|
|||
// Copyright 2022 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package chunks
|
||||
|
||||
import "sync"
|
||||
|
||||
// writeJobQueue is similar to buffered channel of chunkWriteJob, but manages its own buffers
|
||||
// to avoid using a lot of memory when it's empty. It does that by storing elements into segments
|
||||
// of equal size (segmentSize). When segment is not used anymore, reference to it are removed,
|
||||
// so it can be treated as a garbage.
|
||||
type writeJobQueue struct {
|
||||
maxSize int
|
||||
segmentSize int
|
||||
|
||||
mtx sync.Mutex // protects all following variables
|
||||
pushed, popped *sync.Cond // signalled when something is pushed into the queue or popped from it
|
||||
first, last *writeJobQueueSegment // pointer to first and last segment, if any
|
||||
size int // total size of the queue
|
||||
closed bool // after closing the queue, nothing can be pushed to it
|
||||
}
|
||||
|
||||
type writeJobQueueSegment struct {
|
||||
segment []chunkWriteJob
|
||||
nextRead, nextWrite int // index of next read and next write in this segment.
|
||||
nextSegment *writeJobQueueSegment // next segment, if any
|
||||
}
|
||||
|
||||
func newWriteJobQueue(maxSize, segmentSize int) *writeJobQueue {
|
||||
if maxSize <= 0 || segmentSize <= 0 {
|
||||
panic("invalid queue")
|
||||
}
|
||||
|
||||
q := &writeJobQueue{
|
||||
maxSize: maxSize,
|
||||
segmentSize: segmentSize,
|
||||
}
|
||||
|
||||
q.pushed = sync.NewCond(&q.mtx)
|
||||
q.popped = sync.NewCond(&q.mtx)
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *writeJobQueue) close() {
|
||||
q.mtx.Lock()
|
||||
defer q.mtx.Unlock()
|
||||
|
||||
q.closed = true
|
||||
|
||||
// Unblock all blocked goroutines.
|
||||
q.pushed.Broadcast()
|
||||
q.popped.Broadcast()
|
||||
}
|
||||
|
||||
// push blocks until there is space available in the queue, and then adds job to the queue.
|
||||
// If queue is closed or gets closed while waiting for space, push returns false.
|
||||
func (q *writeJobQueue) push(job chunkWriteJob) bool {
|
||||
q.mtx.Lock()
|
||||
defer q.mtx.Unlock()
|
||||
|
||||
// Wait until queue has more space or is closed.
|
||||
for !q.closed && q.size >= q.maxSize {
|
||||
q.popped.Wait()
|
||||
}
|
||||
|
||||
if q.closed {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if this segment has more space for writing, and create new one if not.
|
||||
if q.last == nil || q.last.nextWrite >= q.segmentSize {
|
||||
prevLast := q.last
|
||||
q.last = &writeJobQueueSegment{
|
||||
segment: make([]chunkWriteJob, q.segmentSize),
|
||||
}
|
||||
|
||||
if prevLast != nil {
|
||||
prevLast.nextSegment = q.last
|
||||
}
|
||||
if q.first == nil {
|
||||
q.first = q.last
|
||||
}
|
||||
}
|
||||
|
||||
q.last.segment[q.last.nextWrite] = job
|
||||
q.last.nextWrite++
|
||||
q.size++
|
||||
q.pushed.Signal()
|
||||
return true
|
||||
}
|
||||
|
||||
// pop returns first job from the queue, and true.
|
||||
// If queue is empty, pop blocks until there is a job (returns true), or until queue is closed (returns false).
|
||||
// If queue was already closed, pop first returns all remaining elements from the queue (with true value), and only then returns false.
|
||||
func (q *writeJobQueue) pop() (chunkWriteJob, bool) {
|
||||
q.mtx.Lock()
|
||||
defer q.mtx.Unlock()
|
||||
|
||||
// wait until something is pushed to the queue, or queue is closed.
|
||||
for q.size == 0 {
|
||||
if q.closed {
|
||||
return chunkWriteJob{}, false
|
||||
}
|
||||
|
||||
q.pushed.Wait()
|
||||
}
|
||||
|
||||
res := q.first.segment[q.first.nextRead]
|
||||
q.first.segment[q.first.nextRead] = chunkWriteJob{} // clear just-read element
|
||||
q.first.nextRead++
|
||||
q.size--
|
||||
|
||||
// If we have read all possible elements from first segment, we can drop it.
|
||||
if q.first.nextRead >= q.segmentSize {
|
||||
q.first = q.first.nextSegment
|
||||
if q.first == nil {
|
||||
q.last = nil
|
||||
}
|
||||
}
|
||||
|
||||
q.popped.Signal()
|
||||
return res, true
|
||||
}
|
||||
|
||||
// length returns number of all jobs in the queue.
|
||||
func (q *writeJobQueue) length() int {
|
||||
q.mtx.Lock()
|
||||
defer q.mtx.Unlock()
|
||||
|
||||
return q.size
|
||||
}
|
323
tsdb/chunks/queue_test.go
Normal file
323
tsdb/chunks/queue_test.go
Normal file
|
@ -0,0 +1,323 @@
|
|||
// Copyright 2022 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package chunks
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
func (q *writeJobQueue) assertInvariants(t *testing.T) {
|
||||
q.mtx.Lock()
|
||||
defer q.mtx.Unlock()
|
||||
|
||||
totalSize := 0
|
||||
for s := q.first; s != nil; s = s.nextSegment {
|
||||
require.True(t, s.segment != nil)
|
||||
|
||||
// Next read index is lower or equal than next write index (we cannot past written jobs)
|
||||
require.True(t, s.nextRead <= s.nextWrite)
|
||||
|
||||
// Number of unread elements in this segment.
|
||||
totalSize += s.nextWrite - s.nextRead
|
||||
|
||||
// First segment can be partially read, other segments were not read yet.
|
||||
if s == q.first {
|
||||
require.True(t, s.nextRead >= 0)
|
||||
} else {
|
||||
require.True(t, s.nextRead == 0)
|
||||
}
|
||||
|
||||
// If first shard is empty (everything was read from it already), it must have extra capacity for
|
||||
// additional elements, otherwise it would have been removed.
|
||||
if s == q.first && s.nextRead == s.nextWrite {
|
||||
require.True(t, s.nextWrite < len(s.segment))
|
||||
}
|
||||
|
||||
// Segments in the middle are full.
|
||||
if s != q.first && s != q.last {
|
||||
require.True(t, s.nextWrite == len(s.segment))
|
||||
}
|
||||
// Last segment must have at least one element, or we wouldn't have created it.
|
||||
require.True(t, s.nextWrite > 0)
|
||||
}
|
||||
|
||||
require.Equal(t, q.size, totalSize)
|
||||
}
|
||||
|
||||
func TestQueuePushPopSingleGoroutine(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
t.Log("seed:", seed)
|
||||
r := rand.New(rand.NewSource(seed))
|
||||
|
||||
const maxSize = 500
|
||||
const maxIters = 50
|
||||
|
||||
for max := 1; max < maxSize; max++ {
|
||||
queue := newWriteJobQueue(max, 1+(r.Int()%max))
|
||||
|
||||
elements := 0 // total elements in the queue
|
||||
lastWriteID := 0
|
||||
lastReadID := 0
|
||||
|
||||
for iter := 0; iter < maxIters; iter++ {
|
||||
if elements < max {
|
||||
toWrite := r.Int() % (max - elements)
|
||||
if toWrite == 0 {
|
||||
toWrite = 1
|
||||
}
|
||||
|
||||
for i := 0; i < toWrite; i++ {
|
||||
lastWriteID++
|
||||
require.True(t, queue.push(chunkWriteJob{seriesRef: HeadSeriesRef(lastWriteID)}))
|
||||
|
||||
elements++
|
||||
}
|
||||
}
|
||||
|
||||
if elements > 0 {
|
||||
toRead := r.Int() % elements
|
||||
if toRead == 0 {
|
||||
toRead = 1
|
||||
}
|
||||
|
||||
for i := 0; i < toRead; i++ {
|
||||
lastReadID++
|
||||
|
||||
j, b := queue.pop()
|
||||
require.True(t, b)
|
||||
require.Equal(t, HeadSeriesRef(lastReadID), j.seriesRef)
|
||||
|
||||
elements--
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, elements, queue.length())
|
||||
queue.assertInvariants(t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueuePushBlocksOnFullQueue(t *testing.T) {
|
||||
queue := newWriteJobQueue(5, 5)
|
||||
|
||||
pushTime := make(chan time.Time)
|
||||
go func() {
|
||||
require.True(t, queue.push(chunkWriteJob{seriesRef: 1}))
|
||||
require.True(t, queue.push(chunkWriteJob{seriesRef: 2}))
|
||||
require.True(t, queue.push(chunkWriteJob{seriesRef: 3}))
|
||||
require.True(t, queue.push(chunkWriteJob{seriesRef: 4}))
|
||||
require.True(t, queue.push(chunkWriteJob{seriesRef: 5}))
|
||||
pushTime <- time.Now()
|
||||
// This will block
|
||||
require.True(t, queue.push(chunkWriteJob{seriesRef: 6}))
|
||||
pushTime <- time.Now()
|
||||
}()
|
||||
|
||||
timeBeforePush := <-pushTime
|
||||
|
||||
delay := 100 * time.Millisecond
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
// ok
|
||||
case <-pushTime:
|
||||
require.Fail(t, "didn't expect another push to proceed")
|
||||
}
|
||||
|
||||
popTime := time.Now()
|
||||
j, b := queue.pop()
|
||||
require.True(t, b)
|
||||
require.Equal(t, HeadSeriesRef(1), j.seriesRef)
|
||||
|
||||
timeAfterPush := <-pushTime
|
||||
|
||||
require.GreaterOrEqual(t, timeAfterPush.Sub(popTime), time.Duration(0))
|
||||
require.GreaterOrEqual(t, timeAfterPush.Sub(timeBeforePush), delay)
|
||||
}
|
||||
|
||||
func TestQueuePopBlocksOnEmptyQueue(t *testing.T) {
|
||||
queue := newWriteJobQueue(5, 5)
|
||||
|
||||
popTime := make(chan time.Time)
|
||||
go func() {
|
||||
j, b := queue.pop()
|
||||
require.True(t, b)
|
||||
require.Equal(t, HeadSeriesRef(1), j.seriesRef)
|
||||
|
||||
popTime <- time.Now()
|
||||
|
||||
// This will block
|
||||
j, b = queue.pop()
|
||||
require.True(t, b)
|
||||
require.Equal(t, HeadSeriesRef(2), j.seriesRef)
|
||||
|
||||
popTime <- time.Now()
|
||||
}()
|
||||
|
||||
queue.push(chunkWriteJob{seriesRef: 1})
|
||||
|
||||
timeBeforePop := <-popTime
|
||||
|
||||
delay := 100 * time.Millisecond
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
// ok
|
||||
case <-popTime:
|
||||
require.Fail(t, "didn't expect another pop to proceed")
|
||||
}
|
||||
|
||||
pushTime := time.Now()
|
||||
require.True(t, queue.push(chunkWriteJob{seriesRef: 2}))
|
||||
|
||||
timeAfterPop := <-popTime
|
||||
|
||||
require.GreaterOrEqual(t, timeAfterPop.Sub(pushTime), time.Duration(0))
|
||||
require.Greater(t, timeAfterPop.Sub(timeBeforePop), delay)
|
||||
}
|
||||
|
||||
func TestQueuePopUnblocksOnClose(t *testing.T) {
|
||||
queue := newWriteJobQueue(5, 5)
|
||||
|
||||
popTime := make(chan time.Time)
|
||||
go func() {
|
||||
j, b := queue.pop()
|
||||
require.True(t, b)
|
||||
require.Equal(t, HeadSeriesRef(1), j.seriesRef)
|
||||
|
||||
popTime <- time.Now()
|
||||
|
||||
// This will block until queue is closed.
|
||||
j, b = queue.pop()
|
||||
require.False(t, b)
|
||||
|
||||
popTime <- time.Now()
|
||||
}()
|
||||
|
||||
queue.push(chunkWriteJob{seriesRef: 1})
|
||||
|
||||
timeBeforePop := <-popTime
|
||||
|
||||
delay := 100 * time.Millisecond
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
// ok
|
||||
case <-popTime:
|
||||
require.Fail(t, "didn't expect another pop to proceed")
|
||||
}
|
||||
|
||||
closeTime := time.Now()
|
||||
queue.close()
|
||||
|
||||
timeAfterPop := <-popTime
|
||||
|
||||
require.GreaterOrEqual(t, timeAfterPop.Sub(closeTime), time.Duration(0))
|
||||
require.GreaterOrEqual(t, timeAfterPop.Sub(timeBeforePop), delay)
|
||||
}
|
||||
|
||||
func TestQueuePopAfterCloseReturnsAllElements(t *testing.T) {
|
||||
const count = 10
|
||||
|
||||
queue := newWriteJobQueue(count, count)
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
require.True(t, queue.push(chunkWriteJob{seriesRef: HeadSeriesRef(i)}))
|
||||
}
|
||||
|
||||
// close the queue before popping all elements.
|
||||
queue.close()
|
||||
|
||||
// No more pushing allowed after close.
|
||||
require.False(t, queue.push(chunkWriteJob{seriesRef: HeadSeriesRef(11111)}))
|
||||
|
||||
// Verify that we can still read all pushed elements.
|
||||
for i := 0; i < count; i++ {
|
||||
j, b := queue.pop()
|
||||
require.True(t, b)
|
||||
require.Equal(t, HeadSeriesRef(i), j.seriesRef)
|
||||
}
|
||||
|
||||
_, b := queue.pop()
|
||||
require.False(t, b)
|
||||
}
|
||||
|
||||
func TestQueuePushPopManyGoroutines(t *testing.T) {
|
||||
const readGoroutines = 5
|
||||
const writeGoroutines = 10
|
||||
const writes = 500
|
||||
|
||||
queue := newWriteJobQueue(1024, 64)
|
||||
|
||||
// Reading goroutine
|
||||
refsMx := sync.Mutex{}
|
||||
refs := map[HeadSeriesRef]bool{}
|
||||
|
||||
readersWG := sync.WaitGroup{}
|
||||
for i := 0; i < readGoroutines; i++ {
|
||||
readersWG.Add(1)
|
||||
|
||||
go func() {
|
||||
defer readersWG.Done()
|
||||
|
||||
for j, ok := queue.pop(); ok; j, ok = queue.pop() {
|
||||
refsMx.Lock()
|
||||
refs[j.seriesRef] = true
|
||||
refsMx.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
id := atomic.Uint64{}
|
||||
|
||||
writersWG := sync.WaitGroup{}
|
||||
for i := 0; i < writeGoroutines; i++ {
|
||||
writersWG.Add(1)
|
||||
|
||||
go func() {
|
||||
defer writersWG.Done()
|
||||
|
||||
for i := 0; i < writes; i++ {
|
||||
ref := id.Inc()
|
||||
|
||||
require.True(t, queue.push(chunkWriteJob{seriesRef: HeadSeriesRef(ref)}))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait until all writes are done.
|
||||
writersWG.Wait()
|
||||
|
||||
// Close the queue and wait for reading to be done.
|
||||
queue.close()
|
||||
readersWG.Wait()
|
||||
|
||||
// Check if we have all expected values
|
||||
require.Equal(t, writeGoroutines*writes, len(refs))
|
||||
}
|
||||
|
||||
func TestQueueSegmentIsKeptEvenIfEmpty(t *testing.T) {
|
||||
queue := newWriteJobQueue(1024, 64)
|
||||
|
||||
require.True(t, queue.push(chunkWriteJob{seriesRef: 1}))
|
||||
_, b := queue.pop()
|
||||
require.True(t, b)
|
||||
|
||||
require.NotNil(t, queue.first)
|
||||
require.Equal(t, 1, queue.first.nextRead)
|
||||
require.Equal(t, 1, queue.first.nextWrite)
|
||||
}
|
|
@ -888,7 +888,9 @@ func (db *DB) Compact() (returnErr error) {
|
|||
db.cmtx.Lock()
|
||||
defer db.cmtx.Unlock()
|
||||
defer func() {
|
||||
if returnErr != nil {
|
||||
if returnErr != nil && !errors.Is(returnErr, context.Canceled) {
|
||||
// If we got an error because context was canceled then we're most likely
|
||||
// shutting down TSDB and we don't need to report this on metrics
|
||||
db.metrics.compactionsFailed.Inc()
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -646,6 +646,7 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries)
|
|||
if !ok {
|
||||
slice := mmappedChunks[seriesRef]
|
||||
if len(slice) > 0 && slice[len(slice)-1].maxTime >= mint {
|
||||
h.metrics.mmapChunkCorruptionTotal.Inc()
|
||||
return errors.Errorf("out of sequence m-mapped chunk for series ref %d, last chunk: [%d, %d], new: [%d, %d]",
|
||||
seriesRef, slice[len(slice)-1].minTime, slice[len(slice)-1].maxTime, mint, maxt)
|
||||
}
|
||||
|
@ -660,6 +661,7 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries)
|
|||
}
|
||||
|
||||
if len(ms.mmappedChunks) > 0 && ms.mmappedChunks[len(ms.mmappedChunks)-1].maxTime >= mint {
|
||||
h.metrics.mmapChunkCorruptionTotal.Inc()
|
||||
return errors.Errorf("out of sequence m-mapped chunk for series ref %d, last chunk: [%d, %d], new: [%d, %d]",
|
||||
seriesRef, ms.mmappedChunks[len(ms.mmappedChunks)-1].minTime, ms.mmappedChunks[len(ms.mmappedChunks)-1].maxTime,
|
||||
mint, maxt)
|
||||
|
@ -1123,6 +1125,10 @@ func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
|
|||
var stones []tombstones.Stone
|
||||
for p.Next() {
|
||||
series := h.series.getByID(chunks.HeadSeriesRef(p.At()))
|
||||
if series == nil {
|
||||
level.Debug(h.logger).Log("msg", "Series not found in Head.Delete")
|
||||
continue
|
||||
}
|
||||
|
||||
series.RLock()
|
||||
t0, t1 := series.minTime(), series.maxTime()
|
||||
|
|
|
@ -537,10 +537,9 @@ func (wp *walSubsetProcessor) waitUntilIdle() {
|
|||
}
|
||||
wp.input <- []record.RefSample{}
|
||||
for len(wp.input) != 0 {
|
||||
time.Sleep(10 * time.Microsecond)
|
||||
select {
|
||||
case <-wp.output: // Allow output side to drain to avoid deadlock.
|
||||
default:
|
||||
case <-time.After(10 * time.Microsecond):
|
||||
}
|
||||
select {
|
||||
case <-wp.histogramsOutput: // Allow output side to drain to avoid deadlock.
|
||||
|
|
|
@ -45,6 +45,8 @@ func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger l
|
|||
}
|
||||
}()
|
||||
|
||||
sampleCount := 0
|
||||
const commitAfter = 10000
|
||||
ctx := context.Background()
|
||||
app := w.Appender(ctx)
|
||||
|
||||
|
@ -59,10 +61,19 @@ func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger l
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sampleCount++
|
||||
}
|
||||
if it.Err() != nil {
|
||||
return "", it.Err()
|
||||
}
|
||||
// Commit and make a new appender periodically, to avoid building up data in memory.
|
||||
if sampleCount > commitAfter {
|
||||
if err = app.Commit(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
app = w.Appender(ctx)
|
||||
sampleCount = 0
|
||||
}
|
||||
}
|
||||
|
||||
if err = app.Commit(); err != nil {
|
||||
|
|
|
@ -14,11 +14,11 @@
|
|||
package logging
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var timestampFormat = log.TimestampFormat(
|
||||
|
@ -40,7 +40,7 @@ func NewJSONFileLogger(s string) (*JSONFileLogger, error) {
|
|||
|
||||
f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't create json logger")
|
||||
return nil, fmt.Errorf("can't create json logger: %w", err)
|
||||
}
|
||||
|
||||
return &JSONFileLogger{
|
||||
|
|
|
@ -15,6 +15,7 @@ package treecache
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -23,7 +24,6 @@ import (
|
|||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/go-zookeeper/zk"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
|
@ -214,10 +214,10 @@ func (tc *ZookeeperTreeCache) loop(path string) {
|
|||
|
||||
func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTreeCacheNode) error {
|
||||
data, _, dataWatcher, err := tc.conn.GetW(path)
|
||||
if err == zk.ErrNoNode {
|
||||
if errors.Is(err, zk.ErrNoNode) {
|
||||
tc.recursiveDelete(path, node)
|
||||
if node == tc.head {
|
||||
return errors.Errorf("path %s does not exist", path)
|
||||
return fmt.Errorf("path %s does not exist", path)
|
||||
}
|
||||
return nil
|
||||
} else if err != nil {
|
||||
|
@ -230,7 +230,7 @@ func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTr
|
|||
}
|
||||
|
||||
children, _, childWatcher, err := tc.conn.ChildrenW(path)
|
||||
if err == zk.ErrNoNode {
|
||||
if errors.Is(err, zk.ErrNoNode) {
|
||||
tc.recursiveDelete(path, node)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
|
|
1
web/ui/.nvmrc
Normal file
1
web/ui/.nvmrc
Normal file
|
@ -0,0 +1 @@
|
|||
v16.14.2
|
3
web/ui/module/codemirror-promql/.gitignore
vendored
3
web/ui/module/codemirror-promql/.gitignore
vendored
|
@ -5,3 +5,6 @@ dist/
|
|||
lib/
|
||||
|
||||
/.nyc_output
|
||||
|
||||
LICENSE
|
||||
CHANGELOG.md
|
||||
|
|
|
@ -1,116 +0,0 @@
|
|||
0.19.0 / 2021-12-20
|
||||
===================
|
||||
|
||||
* **[Enhancement]**: Add a negative autocompletion boost to some trigonometric functions that can overlap with other more popular PromQL functions.
|
||||
* **[BugFix]**: Improve checking of whether a `PrometheusConfig` object was passed to `newCompleteStrategy()`.
|
||||
|
||||
0.18.0 / 2021-10-20
|
||||
===================
|
||||
|
||||
* **[Feature]**: Allow overriding the API prefix used to contact a remote Prometheus.
|
||||
* **[Feature]**: Add linter and autocompletion support for trigonometric functions (like `sin`, `cos`)
|
||||
* **[BreakingChange]**: The lib is now exposed under the `dist` folder. When importing `codemirror-promql`, it means you
|
||||
will need to add `dist` in the import. For example `import { newCompleteStrategy } from 'codemirror-promql/cjs/complete';`
|
||||
becomes `import { newCompleteStrategy } from 'codemirror-promql/dist/cjs/complete';`
|
||||
* **[BreakingChange]**: lezer-promql has been migrated into codemirror-promql in the `grammar` folder
|
||||
* **[BreakingChange]**: Support last version of Codemirror.next (v0.19.0).
|
||||
|
||||
0.17.0 / 2021-08-10
|
||||
===================
|
||||
|
||||
* **[Feature]**: Support `present_over_time`
|
||||
* **[Feature]**: HTTP method used to contact Prometheus is now configurable.
|
||||
|
||||
0.16.0 / 2021-05-20
|
||||
===================
|
||||
|
||||
* **[Feature]**: Support partial PromQL language called `MetricName`. Can be used to autocomplete only the metric
|
||||
name. (#142)
|
||||
* **[Feature]**: Autocomplete `NaN` and `Inf` (#141)
|
||||
* **[Enhancement]**: Fetch series using the HTTP `POST` method (#139)
|
||||
* **[Enhancement]**: Upgrade lezer-promql that fixed the parsing of metric names starting with `Inf`/`NaN` like infra (#142)
|
||||
* **[BreakingChange]**: The constant `promQLLanguage` has been changed to be a function. It takes a `LanguageType` as a
|
||||
parameter (#142)
|
||||
|
||||
0.15.0 / 2021-04-13
|
||||
===================
|
||||
|
||||
* **[Feature]**: Provide a way to inject an initial metric list for the autocompletion (#134)
|
||||
* **[Enhancement]**: Autocomplete metrics/function/aggregation when the editor is empty (#133)
|
||||
* **[Enhancement]**: Improve the documentation to reflect what the lib is providing. (#134)
|
||||
* **[Change]**: Export the essential interface in the root index of the lib. (#132)
|
||||
* **[Change]**: Downgrade the NodeJS version required (from 14 to 12) (#112)
|
||||
* **[BreakingChange]**: Support CommonJS module. (#130)
|
||||
|
||||
Note that this requires to change the import path if you are using something not exported by the root index of lib. For
|
||||
example: `import { labelMatchersToString } from 'codemirror-promql/parser/matcher';`
|
||||
becomes `import { labelMatchersToString } from 'codemirror-promql/esm/parser/matcher';`
|
||||
or `import { labelMatchersToString } from 'codemirror-promql/cjs/parser/matcher';`
|
||||
|
||||
0.14.1 / 2021-04-07
|
||||
===================
|
||||
|
||||
* **[Enhancement]**: Provide getter and setter to easily manipulate the different objects exposed by the lib
|
||||
* **[BugFix]**: fix the autocompletion of the labels after a comma (in a label matcher list or in a grouping label list)
|
||||
|
||||
0.14.0 / 2021-03-26
|
||||
===================
|
||||
|
||||
* **[Feature]**: Through the update of [lezer-promql](https://github.com/promlabs/lezer-promql/releases/tag/0.18.0)
|
||||
support negative offset
|
||||
* **[Enhancement]**: Add snippet to ease the usage of the aggregation `topk`, `bottomk` and `count_value`
|
||||
* **[Enhancement]**: Autocomplete the 2nd hard of subquery time selector
|
||||
|
||||
0.13.0 / 2021-03-22
|
||||
===================
|
||||
* **[Feature]**: Linter and Autocompletion support 3 new PromQL functions: `clamp` , `last_over_time`, `sgn`
|
||||
* **[Feature]**: Linter and Autocompletion support the `@` expression.
|
||||
* **[Enhancement]**: Signature of `CompleteStrategy.promQL` has been updated to support the type `Promise<null>`
|
||||
* **[BreakingChange]**: Support last version of Codemirror.next (v0.18.0)
|
||||
* **[BreakingChange]**: Remove the function `enricher`
|
||||
|
||||
0.12.0 / 2021-01-12
|
||||
===================
|
||||
|
||||
* **[Enhancement]**: Improve the parsing of `BinExpr` thanks to the changes provided by lezer-promql (v0.15.0)
|
||||
* **[BreakingChange]**: Support the new version of codemirror v0.17.x
|
||||
|
||||
0.11.0 / 2020-12-08
|
||||
===================
|
||||
|
||||
* **[Feature]**: Add the completion of the keyword `bool`. (#89)
|
||||
* **[Feature]**: Add a function `enricher` that can be used to enrich the completion with a custom one.
|
||||
* **[Feature]**: Add a LRU caching system. (#71)
|
||||
* **[Feature]**: You can now configure the maximum number of metrics in Prometheus for which metadata is fetched.
|
||||
* **[Feature]**: Allow the possibility to inject a custom `CompleteStrategy`. (#83)
|
||||
* **[Feature]**: Provide the Matchers in the PrometheusClient for the method `labelValues` and `series`. (#84)
|
||||
* **[Feature]**: Add the method `metricName` in the PrometheusClient that supports a prefix of the metric searched. (#84)
|
||||
* **[Enhancement]**: Caching mechanism and PrometheusClient are splitted. (#71)
|
||||
* **[Enhancement]**: Optimize the code of the PrometheusClient when no cache is used.
|
||||
* **[Enhancement]**: General improvement of the code thanks to Codemirror.next v0.14.0 (for the new tree management) and v0.15.0 (for the new tags/highlight management)
|
||||
* **[Enhancement]**: Improve the code coverage of the parser concerning the parsing of the function / aggregation.
|
||||
* **[BugFix]**: In certain case, the linter didn't ignore the comments. (#78)
|
||||
* **[BreakingChange]**: Use an object instead of a map when querying the metrics metadata.
|
||||
* **[BreakingChange]**: Support last version of Codemirror.next (v0.15.0).
|
||||
* **[BreakingChange]**: Change the way the completion configuration is structured.
|
||||
|
||||
0.10.2 / 2020-10-18
|
||||
===================
|
||||
|
||||
* **[BugFix]**: Fixed missing autocompletion of binary operators after aggregations
|
||||
|
||||
0.10.1 / 2020-10-16
|
||||
===================
|
||||
|
||||
* **[Enhancement]**: Caching of series label names and values for autocompletion is now optimized to be much faster
|
||||
* **[BugFix]**: Fixed incorrect linter errors around binary operator arguments not separated from the operator by a space
|
||||
|
||||
0.10.0 / 2020-10-14
|
||||
===================
|
||||
|
||||
* **[Enhancement]**: The Linter is now checking operation many-to-many, one-to-one, many-to-one and one-to-many
|
||||
* **[Enhancement]**: The autocompletion is now showing the type of the metric if the type is same for every possible definition of the same metric
|
||||
* **[Enhancement]**: The autocompletion is supporting the completion of the duration
|
||||
* **[Enhancement]**: Descriptions have been added for the snippet, the binary operator modifier and the aggregation operator modifier
|
||||
* **[Enhancement]**: Coverage of the code has been increased (a lot).
|
||||
* **[BreakingChange]**: Removing LSP support
|
|
@ -1,27 +1,17 @@
|
|||
CodeMirror-promql
|
||||
=================
|
||||
[![CircleCI](https://circleci.com/gh/prometheus/codemirror-promql.svg?style=shield)](https://circleci.com/gh/prometheus/codemirror-promql) [![GitHub license](https://img.shields.io/badge/license-Apache-blue.svg)](./LICENSE)
|
||||
[![NPM version](https://img.shields.io/npm/v/codemirror-promql.svg)](https://www.npmjs.org/package/codemirror-promql) [![codecov](https://codecov.io/gh/prometheus/codemirror-promql/branch/main/graph/badge.svg?token=rBHsyXshfl)](https://codecov.io/gh/prometheus/codemirror-promql)
|
||||
|
||||
## Overview
|
||||
|
||||
This project provides a mode for [CodeMirror Next](https://codemirror.net/6) that handles syntax highlighting, linting
|
||||
and autocompletion for PromQL ([Prometheus Query Language](https://prometheus.io/docs/introduction/overview/)).
|
||||
|
||||
![preview](https://user-images.githubusercontent.com/4548045/95660829-d5e4b680-0b2a-11eb-9ecb-41dca6396273.gif)
|
||||
|
||||
## Where does it come from?
|
||||
|
||||
The authoritative copy of this code lives in `prometheus/prometheus` and is synced to
|
||||
`prometheus/codemirror-promql` on a regular basis by a bot. Please contribute any code changes to the code
|
||||
in https://github.com/prometheus/prometheus/tree/main/web/ui/module/codemirror-promql.
|
||||
|
||||
### Installation
|
||||
## Installation
|
||||
|
||||
This mode is available as a npm package:
|
||||
|
||||
```bash
|
||||
npm install --save codemirror-promql
|
||||
npm install --save @prometheus-io/codemirror-promql
|
||||
```
|
||||
|
||||
**Note:** You will have to manually install different packages that are part
|
||||
|
@ -29,14 +19,14 @@ of [CodeMirror Next](https://codemirror.net/6), as they are a peer dependency to
|
|||
packages you need to install:
|
||||
|
||||
* **@codemirror/autocomplete**
|
||||
* **@codemirror/highlight**
|
||||
* **@codemirror/language**
|
||||
* **@codemirror/lint**
|
||||
* **@codemirror/state**
|
||||
* **@codemirror/view**
|
||||
* **@lezer/common**
|
||||
|
||||
```bash
|
||||
npm install --save @codemirror/autocomplete @codemirror/highlight @codemirror/language @codemirror/lint @codemirror/state @codemirror/view
|
||||
npm install --save @codemirror/autocomplete @codemirror/language @codemirror/lint @codemirror/state @codemirror/view @lezer/common
|
||||
```
|
||||
|
||||
**Note 2**: that's the minimum required to install the lib. You would probably need to install as well the dependency
|
||||
|
@ -57,10 +47,10 @@ If you want to enjoy about the different features provided without taking too mu
|
|||
them, then the easiest way is this one:
|
||||
|
||||
```typescript
|
||||
import { PromQLExtension } from 'codemirror-promql';
|
||||
import { basicSetup } from '@codemirror/basic-setup';
|
||||
import { EditorState } from '@codemirror/state';
|
||||
import { EditorView } from '@codemirror/view';
|
||||
import {PromQLExtension} from '@prometheus-io/codemirror-promql';
|
||||
import {basicSetup} from '@codemirror/basic-setup';
|
||||
import {EditorState} from '@codemirror/state';
|
||||
import {EditorView} from '@codemirror/view';
|
||||
|
||||
const promQL = new PromQLExtension()
|
||||
new EditorView({
|
||||
|
@ -108,7 +98,7 @@ By default, the limit is 10 000 metrics.
|
|||
Use it cautiously. A high value of this limit can cause a crash of your browser due to too many data fetched.
|
||||
|
||||
```typescript
|
||||
const promQL = new PromQLExtension().setComplete({ maxMetricsMetadata: 10000 })
|
||||
const promQL = new PromQLExtension().setComplete({maxMetricsMetadata: 10000})
|
||||
```
|
||||
|
||||
#### Connect the autocompletion extension to a remote Prometheus server
|
||||
|
@ -127,7 +117,7 @@ Note: this is the only mandatory parameter in case you want to use the default P
|
|||
parameter, the rest of the config will be ignored, and the Prometheus client won't be initialized.
|
||||
|
||||
```typescript
|
||||
const promQL = new PromQLExtension().setComplete({ remote: { url: 'https://prometheus.land' } })
|
||||
const promQL = new PromQLExtension().setComplete({remote: {url: 'https://prometheus.land'}})
|
||||
```
|
||||
|
||||
###### Override FetchFn
|
||||
|
@ -136,7 +126,7 @@ In case your Prometheus server is protected and requires a special HTTP client,
|
|||
that is used to perform any required HTTP request.
|
||||
|
||||
```typescript
|
||||
const promQL = new PromQLExtension().setComplete({ remote: { fetchFn: myHTTPClient } })
|
||||
const promQL = new PromQLExtension().setComplete({remote: {fetchFn: myHTTPClient}})
|
||||
```
|
||||
|
||||
###### Duration to use for looking back when retrieving metrics / labels
|
||||
|
@ -148,7 +138,7 @@ In case you would like to provide your own duration, you can override the variab
|
|||
value is `12 * 60 * 60 * 1000` (12h). The value must be defined in **milliseconds**.
|
||||
|
||||
```typescript
|
||||
const promQL = new PromQLExtension().setComplete({ remote: { lookbackInterval: 12 * 60 * 60 * 1000 } })
|
||||
const promQL = new PromQLExtension().setComplete({remote: {lookbackInterval: 12 * 60 * 60 * 1000}})
|
||||
```
|
||||
|
||||
###### Error Handling
|
||||
|
@ -157,7 +147,7 @@ You can set up your own error handler to catch any HTTP error that can occur whe
|
|||
Prometheus.
|
||||
|
||||
```typescript
|
||||
const promQL = new PromQLExtension().setComplete({ remote: { httpErrorHandler: (error: any) => console.error(error) } })
|
||||
const promQL = new PromQLExtension().setComplete({remote: {httpErrorHandler: (error: any) => console.error(error)}})
|
||||
```
|
||||
|
||||
###### HTTP method used
|
||||
|
@ -168,17 +158,18 @@ endpoints `/api/v1/labels` and `/api/v1/series`.
|
|||
You can change it to use the HTTP method `GET` if you prefer.
|
||||
|
||||
```typescript
|
||||
const promQL = new PromQLExtension().setComplete({ remote: { httpMethod: 'GET' } })
|
||||
const promQL = new PromQLExtension().setComplete({remote: {httpMethod: 'GET'}})
|
||||
```
|
||||
|
||||
###### Override the API Prefix
|
||||
|
||||
The default Prometheus Client, when building the query to get data from Prometheus, is using an API prefix which is by default `/api/v1`.
|
||||
The default Prometheus Client, when building the query to get data from Prometheus, is using an API prefix which is by
|
||||
default `/api/v1`.
|
||||
|
||||
You can override this value like this:
|
||||
|
||||
```typescript
|
||||
const promql = new PromQLExtension().setComplete({ remote: { apiPrefix: '/my/api/prefix' } })
|
||||
const promql = new PromQLExtension().setComplete({remote: {apiPrefix: '/my/api/prefix'}})
|
||||
```
|
||||
|
||||
###### Cache
|
||||
|
@ -192,7 +183,7 @@ The data are stored in the cache for a limited amount of time defined by the var
|
|||
minutes. The value must be defined in **milliseconds**.
|
||||
|
||||
```typescript
|
||||
const promQL = new PromQLExtension().setComplete({ remote: { cache: { maxAge: 5 * 60 * 1000 } } })
|
||||
const promQL = new PromQLExtension().setComplete({remote: {cache: {maxAge: 5 * 60 * 1000}}})
|
||||
```
|
||||
|
||||
###### Initial Metric List
|
||||
|
@ -226,7 +217,7 @@ interface [PrometheusClient](https://github.com/prometheus/codemirror-promql/blo
|
|||
.
|
||||
|
||||
```typescript
|
||||
const promQL = new PromQLExtension().setComplete({ remote: { prometheusClient: MyPrometheusClient } })
|
||||
const promQL = new PromQLExtension().setComplete({remote: {prometheusClient: MyPrometheusClient}})
|
||||
```
|
||||
|
||||
#### Provide your own implementation of the autocompletion
|
||||
|
@ -234,7 +225,7 @@ const promQL = new PromQLExtension().setComplete({ remote: { prometheusClient: M
|
|||
In case you would like to provide you own implementation of the autocompletion, you can simply do it like that:
|
||||
|
||||
```typescript
|
||||
const promQL = new PromQLExtension().setComplete({ completeStrategy: myCustomImpl })
|
||||
const promQL = new PromQLExtension().setComplete({completeStrategy: myCustomImpl})
|
||||
```
|
||||
|
||||
Note: In case this parameter is provided, then the rest of the configuration is ignored.
|
||||
|
@ -246,4 +237,4 @@ Note: In case this parameter is provided, then the rest of the configuration is
|
|||
|
||||
## License
|
||||
|
||||
Apache License 2.0, see [LICENSE](https://github.com/prometheus/codemirror-promql/blob/main/LICENSE).
|
||||
The code is licensed under an [Apache 2.0](https://github.com/prometheus/prometheus/blob/main/LICENSE) license.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@prometheus-io/codemirror-promql",
|
||||
"version": "0.19.0",
|
||||
"version": "0.37.0-rc.0",
|
||||
"description": "a CodeMirror mode for the PromQL language",
|
||||
"types": "dist/esm/index.d.ts",
|
||||
"module": "dist/esm/index.js",
|
||||
|
@ -29,7 +29,7 @@
|
|||
},
|
||||
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
|
||||
"dependencies": {
|
||||
"@prometheus-io/lezer-promql": "0.23.0",
|
||||
"@prometheus-io/lezer-promql": "^0.37.0-rc.0",
|
||||
"lru-cache": "^6.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
|
3
web/ui/module/lezer-promql/.gitignore
vendored
3
web/ui/module/lezer-promql/.gitignore
vendored
|
@ -3,3 +3,6 @@ dist/
|
|||
lib/
|
||||
src/parser.js
|
||||
src/parser.terms.js
|
||||
|
||||
LICENSE
|
||||
CHANGELOG.md
|
||||
|
|
6
web/ui/module/lezer-promql/.npmignore
Normal file
6
web/ui/module/lezer-promql/.npmignore
Normal file
|
@ -0,0 +1,6 @@
|
|||
build.sh
|
||||
generate-types.sh
|
||||
jest.config.cjs
|
||||
rollup.config.js
|
||||
/test/
|
||||
/src/
|
43
web/ui/module/lezer-promql/README.md
Normal file
43
web/ui/module/lezer-promql/README.md
Normal file
|
@ -0,0 +1,43 @@
|
|||
# lezer-promql
|
||||
|
||||
## Overview
|
||||
|
||||
This is a PromQL grammar for the [lezer](https://lezer.codemirror.net/) parser system. It is inspired by the initial
|
||||
grammar coming from [Prometheus](https://github.com/prometheus/prometheus/blob/main/promql/parser/generated_parser.y)
|
||||
written in yacc.
|
||||
|
||||
This library is stable but doesn't provide any guideline of how to use it as it has been integrated
|
||||
into [codemirror-promql](https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql). If you
|
||||
want to use this library, you perhaps want to actually use **@prometheus-io/codemirror-promql** instead.
|
||||
|
||||
**Note**: This library is a lezer-based implementation of the [authoritative, goyacc-based PromQL grammar](https://github.com/prometheus/prometheus/blob/main/promql/parser/generated_parser.y).
|
||||
Any changes to the authoritative grammar need to be reflected in this package as well.
|
||||
|
||||
## Installation
|
||||
|
||||
This package is available as an npm package:
|
||||
|
||||
```bash
|
||||
npm install --save @prometheus-io/lezer-promql
|
||||
```
|
||||
|
||||
**Note**: you will have to manually install the `lezer` dependencies as it is a peer dependency to this package.
|
||||
|
||||
```bash
|
||||
npm install --save @lezer/lr @lezer/highlight
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
### Building
|
||||
|
||||
npm i
|
||||
npm run build
|
||||
|
||||
### Testing
|
||||
|
||||
npm run test
|
||||
|
||||
## License
|
||||
|
||||
The code is licensed under an [Apache 2.0](https://github.com/prometheus/prometheus/blob/main/LICENSE) license.
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@prometheus-io/lezer-promql",
|
||||
"version": "0.23.0",
|
||||
"version": "0.37.0-rc.0",
|
||||
"description": "lezer-based PromQL grammar",
|
||||
"main": "index.cjs",
|
||||
"type": "module",
|
||||
|
|
14
web/ui/package-lock.json
generated
14
web/ui/package-lock.json
generated
|
@ -28,10 +28,10 @@
|
|||
},
|
||||
"module/codemirror-promql": {
|
||||
"name": "@prometheus-io/codemirror-promql",
|
||||
"version": "0.19.0",
|
||||
"version": "0.37.0-rc.0",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@prometheus-io/lezer-promql": "0.23.0",
|
||||
"@prometheus-io/lezer-promql": "^0.37.0-rc.0",
|
||||
"lru-cache": "^6.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
@ -61,7 +61,7 @@
|
|||
},
|
||||
"module/lezer-promql": {
|
||||
"name": "@prometheus-io/lezer-promql",
|
||||
"version": "0.23.0",
|
||||
"version": "0.37.0-rc.0",
|
||||
"license": "Apache-2.0",
|
||||
"devDependencies": {
|
||||
"@lezer/generator": "^1.0.0",
|
||||
|
@ -17518,7 +17518,7 @@
|
|||
},
|
||||
"react-app": {
|
||||
"name": "@prometheus-io/app",
|
||||
"version": "0.1.0",
|
||||
"version": "0.37.0-rc.0",
|
||||
"dependencies": {
|
||||
"@codemirror/autocomplete": "^6.0.0",
|
||||
"@codemirror/commands": "^6.0.0",
|
||||
|
@ -17536,7 +17536,7 @@
|
|||
"@lezer/lr": "^1.0.0",
|
||||
"@nexucis/fuzzy": "^0.4.0",
|
||||
"@nexucis/kvsearch": "^0.7.0",
|
||||
"@prometheus-io/codemirror-promql": "0.19.0",
|
||||
"@prometheus-io/codemirror-promql": "^0.37.0-rc.0",
|
||||
"bootstrap": "^4.6.1",
|
||||
"css.escape": "^1.5.1",
|
||||
"downshift": "^6.1.7",
|
||||
|
@ -19783,7 +19783,7 @@
|
|||
"@lezer/lr": "^1.0.0",
|
||||
"@nexucis/fuzzy": "^0.4.0",
|
||||
"@nexucis/kvsearch": "^0.7.0",
|
||||
"@prometheus-io/codemirror-promql": "0.19.0",
|
||||
"@prometheus-io/codemirror-promql": "^0.37.0-rc.0",
|
||||
"@testing-library/react-hooks": "^7.0.1",
|
||||
"@types/enzyme": "^3.10.10",
|
||||
"@types/flot": "0.0.32",
|
||||
|
@ -19835,7 +19835,7 @@
|
|||
"@lezer/common": "^1.0.0",
|
||||
"@lezer/highlight": "^1.0.0",
|
||||
"@lezer/lr": "^1.0.0",
|
||||
"@prometheus-io/lezer-promql": "0.23.0",
|
||||
"@prometheus-io/lezer-promql": "^0.37.0-rc.0",
|
||||
"@types/lru-cache": "^5.1.1",
|
||||
"isomorphic-fetch": "^3.0.0",
|
||||
"lru-cache": "^6.0.0",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@prometheus-io/app",
|
||||
"version": "0.1.0",
|
||||
"version": "0.37.0-rc.0",
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"@codemirror/autocomplete": "^6.0.0",
|
||||
|
@ -19,7 +19,7 @@
|
|||
"@lezer/common": "^1.0.0",
|
||||
"@nexucis/fuzzy": "^0.4.0",
|
||||
"@nexucis/kvsearch": "^0.7.0",
|
||||
"@prometheus-io/codemirror-promql": "0.19.0",
|
||||
"@prometheus-io/codemirror-promql": "^0.37.0-rc.0",
|
||||
"bootstrap": "^4.6.1",
|
||||
"css.escape": "^1.5.1",
|
||||
"downshift": "^6.1.7",
|
||||
|
|
|
@ -566,6 +566,9 @@ func TestAgentAPIEndPoints(t *testing.T) {
|
|||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusUnprocessableEntity, resp.StatusCode)
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, resp.Body.Close())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -584,6 +587,9 @@ func TestAgentAPIEndPoints(t *testing.T) {
|
|||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, resp.Body.Close())
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue