Merge branch 'master' into appender-context

Signed-off-by: Annanay <annanayagarwal@gmail.com>
This commit is contained in:
Annanay 2020-07-30 16:43:18 +05:30
commit 9bba8a6eae
94 changed files with 2393 additions and 581 deletions

View file

@ -41,7 +41,7 @@ jobs:
GOOPTS: "-p 2"
GOMAXPROCS: "2"
- prometheus/check_proto:
version: "3.11.4"
version: "3.12.3"
- prometheus/store_artifact:
file: prometheus
- prometheus/store_artifact:

View file

@ -8,7 +8,7 @@
* `prometheus-mixin`: @beorn7
* `storage`
* `remote`: @csmarchbanks, @cstyan, @bwplotka
* `tsdb`: @codesome, @krasi-georgiev
* `tsdb`: @codesome, @krasi-georgiev, @bwplotka
* `web`
* `ui`: @juliusv
* `Makefile` and related build configuration: @simonpasquier, @SuperQ

View file

@ -30,7 +30,6 @@ import (
"runtime"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
@ -47,6 +46,7 @@ import (
"github.com/prometheus/common/version"
jcfg "github.com/uber/jaeger-client-go/config"
jprom "github.com/uber/jaeger-lib/metrics/prometheus"
"go.uber.org/atomic"
kingpin "gopkg.in/alecthomas/kingpin.v2"
"k8s.io/klog"
@ -696,7 +696,13 @@ func main() {
return errors.Wrapf(err, "opening storage failed")
}
level.Info(logger).Log("fs_type", prom_runtime.Statfs(cfg.localStoragePath))
switch fsType := prom_runtime.Statfs(cfg.localStoragePath); fsType {
case "NFS_SUPER_MAGIC":
level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.")
default:
level.Info(logger).Log("fs_type", fsType)
}
level.Info(logger).Log("msg", "TSDB started")
level.Debug(logger).Log("msg", "TSDB options",
"MinBlockDuration", cfg.tsdb.MinBlockDuration,
@ -801,18 +807,18 @@ func openDBWithMetrics(dir string, logger log.Logger, reg prometheus.Registerer,
}
type safePromQLNoStepSubqueryInterval struct {
value int64
value atomic.Int64
}
func durationToInt64Millis(d time.Duration) int64 {
return int64(d / time.Millisecond)
}
func (i *safePromQLNoStepSubqueryInterval) Set(ev model.Duration) {
atomic.StoreInt64(&i.value, durationToInt64Millis(time.Duration(ev)))
i.value.Store(durationToInt64Millis(time.Duration(ev)))
}
func (i *safePromQLNoStepSubqueryInterval) Get(int64) int64 {
return atomic.LoadInt64(&i.value)
return i.value.Load()
}
func reloadConfig(filename string, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...func(*config.Config) error) (err error) {

View file

@ -363,15 +363,16 @@ func printBlocks(blocks []tsdb.BlockReader, humanReadable bool) {
tw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
defer tw.Flush()
fmt.Fprintln(tw, "BLOCK ULID\tMIN TIME\tMAX TIME\tNUM SAMPLES\tNUM CHUNKS\tNUM SERIES")
fmt.Fprintln(tw, "BLOCK ULID\tMIN TIME\tMAX TIME\tDURATION\tNUM SAMPLES\tNUM CHUNKS\tNUM SERIES")
for _, b := range blocks {
meta := b.Meta()
fmt.Fprintf(tw,
"%v\t%v\t%v\t%v\t%v\t%v\n",
"%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
meta.ULID,
getFormatedTime(meta.MinTime, humanReadable),
getFormatedTime(meta.MaxTime, humanReadable),
time.Duration(meta.MaxTime-meta.MinTime)*time.Millisecond,
meta.Stats.NumSamples,
meta.Stats.NumChunks,
meta.Stats.NumSeries,

View file

@ -18,8 +18,13 @@ import (
"testing"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestMapFromVMWithEmptyTags(t *testing.T) {
id := "test"
name := "name"

View file

@ -138,5 +138,10 @@ func (c *ServiceDiscoveryConfig) Validate() error {
return errors.New("empty or null section in static_configs")
}
}
for _, cfg := range c.TritonSDConfigs {
if cfg == nil {
return errors.New("empty or null section in triton_sd_configs")
}
}
return nil
}

View file

@ -0,0 +1,58 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"fmt"
"reflect"
"strings"
"testing"
"github.com/prometheus/prometheus/util/testutil"
"gopkg.in/yaml.v2"
)
func TestForNilSDConfig(t *testing.T) {
// Get all the yaml fields names of the ServiceDiscoveryConfig struct.
s := reflect.ValueOf(ServiceDiscoveryConfig{})
configType := s.Type()
n := s.NumField()
fieldsSlice := make([]string, n)
for i := 0; i < n; i++ {
field := configType.Field(i)
tag := field.Tag.Get("yaml")
tag = strings.Split(tag, ",")[0]
fieldsSlice = append(fieldsSlice, tag)
}
// Unmarshall all possible yaml keys and validate errors check upon nil
// SD config.
for _, f := range fieldsSlice {
if f == "" {
continue
}
t.Run(f, func(t *testing.T) {
c := &ServiceDiscoveryConfig{}
err := yaml.Unmarshal([]byte(fmt.Sprintf(`
---
%s:
-
`, f)), c)
testutil.Ok(t, err)
err = c.Validate()
testutil.NotOk(t, err)
testutil.Equals(t, fmt.Sprintf("empty or null section in %s", f), err.Error())
})
}
}

View file

@ -26,8 +26,13 @@ import (
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/testutil"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestConfiguredService(t *testing.T) {
conf := &SDConfig{
Services: []string{"configuredServiceName"}}
@ -283,10 +288,14 @@ func TestAllServices(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ch := make(chan []*targetgroup.Group)
go d.Run(ctx, ch)
go func() {
d.Run(ctx, ch)
close(ch)
}()
checkOneTarget(t, <-ch)
checkOneTarget(t, <-ch)
cancel()
<-ch
}
// Watch only the test service.
@ -319,9 +328,13 @@ func TestAllOptions(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ch := make(chan []*targetgroup.Group)
go d.Run(ctx, ch)
go func() {
d.Run(ctx, ch)
close(ch)
}()
checkOneTarget(t, <-ch)
cancel()
<-ch
}
func TestGetDatacenterShouldReturnError(t *testing.T) {

View file

@ -35,7 +35,10 @@ import (
const (
resolvConf = "/etc/resolv.conf"
dnsNameLabel = model.MetaLabelPrefix + "dns_name"
dnsNameLabel = model.MetaLabelPrefix + "dns_name"
dnsSrvRecordPrefix = model.MetaLabelPrefix + "dns_srv_record_"
dnsSrvRecordTargetLabel = dnsSrvRecordPrefix + "target"
dnsSrvRecordPortLabel = dnsSrvRecordPrefix + "port"
// Constants for instrumentation.
namespace = "prometheus"
@ -183,9 +186,13 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
}
for _, record := range response.Answer {
var target model.LabelValue
var target, dnsSrvRecordTarget, dnsSrvRecordPort model.LabelValue
switch addr := record.(type) {
case *dns.SRV:
dnsSrvRecordTarget = model.LabelValue(addr.Target)
dnsSrvRecordPort = model.LabelValue(fmt.Sprintf("%d", addr.Port))
// Remove the final dot from rooted DNS names to make them look more usual.
addr.Target = strings.TrimRight(addr.Target, ".")
@ -199,8 +206,10 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
continue
}
tg.Targets = append(tg.Targets, model.LabelSet{
model.AddressLabel: target,
dnsNameLabel: model.LabelValue(name),
model.AddressLabel: target,
dnsNameLabel: model.LabelValue(name),
dnsSrvRecordTargetLabel: dnsSrvRecordTarget,
dnsSrvRecordPortLabel: dnsSrvRecordPort,
})
}

View file

@ -22,6 +22,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/miekg/dns"
"go.uber.org/goleak"
"gopkg.in/yaml.v2"
"github.com/prometheus/common/model"
@ -29,6 +30,10 @@ import (
"github.com/prometheus/prometheus/util/testutil"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestDNS(t *testing.T) {
testCases := []struct {
name string
@ -70,7 +75,12 @@ func TestDNS(t *testing.T) {
{
Source: "web.example.com.",
Targets: []model.LabelSet{
{"__address__": "192.0.2.2:80", "__meta_dns_name": "web.example.com."},
{
"__address__": "192.0.2.2:80",
"__meta_dns_name": "web.example.com.",
"__meta_dns_srv_record_target": "",
"__meta_dns_srv_record_port": "",
},
},
},
},
@ -95,7 +105,12 @@ func TestDNS(t *testing.T) {
{
Source: "web.example.com.",
Targets: []model.LabelSet{
{"__address__": "[::1]:80", "__meta_dns_name": "web.example.com."},
{
"__address__": "[::1]:80",
"__meta_dns_name": "web.example.com.",
"__meta_dns_srv_record_target": "",
"__meta_dns_srv_record_port": "",
},
},
},
},
@ -120,8 +135,18 @@ func TestDNS(t *testing.T) {
{
Source: "_mysql._tcp.db.example.com.",
Targets: []model.LabelSet{
{"__address__": "db1.example.com:3306", "__meta_dns_name": "_mysql._tcp.db.example.com."},
{"__address__": "db2.example.com:3306", "__meta_dns_name": "_mysql._tcp.db.example.com."},
{
"__address__": "db1.example.com:3306",
"__meta_dns_name": "_mysql._tcp.db.example.com.",
"__meta_dns_srv_record_target": "db1.example.com.",
"__meta_dns_srv_record_port": "3306",
},
{
"__address__": "db2.example.com:3306",
"__meta_dns_name": "_mysql._tcp.db.example.com.",
"__meta_dns_srv_record_target": "db2.example.com.",
"__meta_dns_srv_record_port": "3306",
},
},
},
},
@ -145,7 +170,12 @@ func TestDNS(t *testing.T) {
{
Source: "_mysql._tcp.db.example.com.",
Targets: []model.LabelSet{
{"__address__": "db1.example.com:3306", "__meta_dns_name": "_mysql._tcp.db.example.com."},
{
"__address__": "db1.example.com:3306",
"__meta_dns_name": "_mysql._tcp.db.example.com.",
"__meta_dns_srv_record_target": "db1.example.com.",
"__meta_dns_srv_record_port": "3306",
},
},
},
},

View file

@ -57,42 +57,68 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group,
}
for _, s := range services {
for _, e := range s.Endpoint.Ports {
if e.Protocol != swarm.PortConfigProtocolTCP {
continue
commonLabels := map[string]string{
swarmLabelServiceID: s.ID,
swarmLabelServiceName: s.Spec.Name,
swarmLabelServiceTaskContainerHostname: s.Spec.TaskTemplate.ContainerSpec.Hostname,
swarmLabelServiceTaskContainerImage: s.Spec.TaskTemplate.ContainerSpec.Image,
}
commonLabels[swarmLabelServiceMode] = getServiceValueMode(s)
if s.UpdateStatus != nil {
commonLabels[swarmLabelServiceUpdatingStatus] = string(s.UpdateStatus.State)
}
for k, v := range s.Spec.Labels {
ln := strutil.SanitizeLabelName(k)
commonLabels[swarmLabelServiceLabelPrefix+ln] = v
}
for _, p := range s.Endpoint.VirtualIPs {
var added bool
ip, _, err := net.ParseCIDR(p.Addr)
if err != nil {
return nil, fmt.Errorf("error while parsing address %s: %w", p.Addr, err)
}
for _, p := range s.Endpoint.VirtualIPs {
for _, e := range s.Endpoint.Ports {
if e.Protocol != swarm.PortConfigProtocolTCP {
continue
}
labels := model.LabelSet{
swarmLabelServiceEndpointPortName: model.LabelValue(e.Name),
swarmLabelServiceEndpointPortPublishMode: model.LabelValue(e.PublishMode),
swarmLabelServiceID: model.LabelValue(s.ID),
swarmLabelServiceName: model.LabelValue(s.Spec.Name),
swarmLabelServiceTaskContainerHostname: model.LabelValue(s.Spec.TaskTemplate.ContainerSpec.Hostname),
swarmLabelServiceTaskContainerImage: model.LabelValue(s.Spec.TaskTemplate.ContainerSpec.Image),
}
labels[swarmLabelServiceMode] = model.LabelValue(getServiceValueMode(s))
if s.UpdateStatus != nil {
labels[swarmLabelServiceUpdatingStatus] = model.LabelValue(s.UpdateStatus.State)
for k, v := range commonLabels {
labels[model.LabelName(k)] = model.LabelValue(v)
}
for k, v := range s.Spec.Labels {
ln := strutil.SanitizeLabelName(k)
labels[model.LabelName(swarmLabelServiceLabelPrefix+ln)] = model.LabelValue(v)
}
ip, _, err := net.ParseCIDR(p.Addr)
if err != nil {
return nil, fmt.Errorf("error while parsing address %s: %w", p.Addr, err)
}
addr := net.JoinHostPort(ip.String(), strconv.FormatUint(uint64(e.PublishedPort), 10))
labels[model.AddressLabel] = model.LabelValue(addr)
for k, v := range networkLabels[p.NetworkID] {
labels[model.LabelName(k)] = model.LabelValue(v)
}
addr := net.JoinHostPort(ip.String(), strconv.FormatUint(uint64(e.PublishedPort), 10))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)
added = true
}
if !added {
labels := model.LabelSet{}
for k, v := range commonLabels {
labels[model.LabelName(k)] = model.LabelValue(v)
}
for k, v := range networkLabels[p.NetworkID] {
labels[model.LabelName(k)] = model.LabelValue(v)
}
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)
}
}

View file

@ -50,7 +50,7 @@ host: %s
tg := tgs[0]
testutil.Assert(t, tg != nil, "tg should not be nil")
testutil.Assert(t, tg.Targets != nil, "tg.targets should not be nil")
testutil.Equals(t, 10, len(tg.Targets))
testutil.Equals(t, 15, len(tg.Targets))
for i, lbls := range []model.LabelSet{
{
@ -65,8 +65,8 @@ host: %s
"__meta_dockerswarm_service_id": model.LabelValue("3qvd7bwfptmoht16t1f7jllb6"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0@sha256:1d2518ec0501dd848e718bf008df37852b1448c862d6f256f2d39244975758d6"),
},
@ -83,11 +83,75 @@ host: %s
"__meta_dockerswarm_service_id": model.LabelValue("3qvd7bwfptmoht16t1f7jllb6"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_node-exporter"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-node-exporter:v0.16.0@sha256:1d2518ec0501dd848e718bf008df37852b1448c862d6f256f2d39244975758d6"),
},
{
"__address__": model.LabelValue("10.0.1.34:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_id": model.LabelValue("9bbq7j55tzzz85k2gg52x73rg"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("cloudflare/unsee:v0.8.0"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_unsee"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("cloudflare/unsee:v0.8.0@sha256:28398f47f63feb1647887999701fa730da351fc6d3cc7228e5da44b40a663ada"),
},
{
"__address__": model.LabelValue("10.0.1.13:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_id": model.LabelValue("hv645udwaaewyw7wuc6nspm68"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-prometheus:v2.5.0"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_prometheus"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-prometheus:v2.5.0@sha256:f1a3781c4785637ba088dcf54889f77d9b6b69f21b2c0a167c1347473f4e2587"),
},
{
"__address__": model.LabelValue("10.0.1.23:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_id": model.LabelValue("iml8vdd2dudvq457sbirpf66z"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("google/cadvisor"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_cadvisor"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("google/cadvisor:latest@sha256:815386ebbe9a3490f38785ab11bda34ec8dacf4634af77b8912832d4f85dca04"),
},
{
"__address__": model.LabelValue("10.0.1.31:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_id": model.LabelValue("rl4jhdws3r4lkfvq8kx6c4xnr"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-alertmanager:v0.14.0"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_alertmanager"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-alertmanager:v0.14.0@sha256:7069b656bd5df0606ff1db81a7553a25dc72be51d3aca6a7e08d776566cefbd8"),
},
{
"__address__": model.LabelValue("10.0.0.13:9090"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
@ -100,8 +164,42 @@ host: %s
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
{
"__address__": model.LabelValue("10.0.0.13:9093"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
"__meta_dockerswarm_network_ingress": model.LabelValue("true"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_name": model.LabelValue("ingress"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""),
"__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"),
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
{
"__address__": model.LabelValue("10.0.0.13:9094"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
"__meta_dockerswarm_network_ingress": model.LabelValue("true"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_name": model.LabelValue("ingress"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""),
"__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"),
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
@ -118,25 +216,8 @@ host: %s
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
{
"__address__": model.LabelValue("10.0.0.13:9093"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
"__meta_dockerswarm_network_ingress": model.LabelValue("true"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_name": model.LabelValue("ingress"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""),
"__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"),
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
@ -153,25 +234,8 @@ host: %s
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
{
"__address__": model.LabelValue("10.0.0.13:9094"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
"__meta_dockerswarm_network_ingress": model.LabelValue("true"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_name": model.LabelValue("ingress"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_endpoint_port_name": model.LabelValue(""),
"__meta_dockerswarm_service_endpoint_port_publish_mode": model.LabelValue("ingress"),
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
@ -188,8 +252,8 @@ host: %s
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
@ -205,8 +269,8 @@ host: %s
"__meta_dockerswarm_service_id": model.LabelValue("uk9su5tb9ykfzew3qtmp14uzh"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-grafana:5.3.4"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_grafana"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_grafana"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-grafana:5.3.4@sha256:2aca8aa5716e6e0eed3fcdc88fec256a0a1828c491a8cf240486ae7cc473092d"),
},
@ -223,11 +287,27 @@ host: %s
"__meta_dockerswarm_service_id": model.LabelValue("uk9su5tb9ykfzew3qtmp14uzh"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-grafana:5.3.4"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_grafana"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_grafana"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/swarmprom-grafana:5.3.4@sha256:2aca8aa5716e6e0eed3fcdc88fec256a0a1828c491a8cf240486ae7cc473092d"),
},
{
"__address__": model.LabelValue("10.0.1.17:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_service_id": model.LabelValue("ul5qawv4s7f7msm7dtyfarw80"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_dockerd-exporter"),
"__meta_dockerswarm_service_task_container_hostname": model.LabelValue(""),
"__meta_dockerswarm_service_task_container_image": model.LabelValue("stefanprodan/caddy:latest@sha256:44541cfacb66f4799f81f17fcfb3cb757ccc8f327592745549f5930c42d115c9"),
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
testutil.Equals(t, lbls, tg.Targets[i])

View file

@ -106,12 +106,19 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err
tg.Targets = append(tg.Targets, labels)
}
for _, p := range servicePorts[s.ServiceID] {
if p.Protocol != swarm.PortConfigProtocolTCP {
continue
}
for _, network := range s.NetworksAttachments {
for _, address := range network.Addresses {
for _, network := range s.NetworksAttachments {
for _, address := range network.Addresses {
var added bool
ip, _, err := net.ParseCIDR(address)
if err != nil {
return nil, fmt.Errorf("error while parsing address %s: %w", address, err)
}
for _, p := range servicePorts[s.ServiceID] {
if p.Protocol != swarm.PortConfigProtocolTCP {
continue
}
labels := model.LabelSet{
swarmLabelTaskPortMode: model.LabelValue(p.PublishMode),
}
@ -124,13 +131,26 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err
labels[model.LabelName(k)] = model.LabelValue(v)
}
ip, _, err := net.ParseCIDR(address)
if err != nil {
return nil, fmt.Errorf("error while parsing address %s: %w", address, err)
}
addr := net.JoinHostPort(ip.String(), strconv.FormatUint(uint64(p.PublishedPort), 10))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)
added = true
}
if !added {
labels := model.LabelSet{}
for k, v := range commonLabels {
labels[model.LabelName(k)] = model.LabelValue(v)
}
for k, v := range networkLabels[network.Network.ID] {
labels[model.LabelName(k)] = model.LabelValue(v)
}
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)
}
}

View file

@ -50,7 +50,7 @@ host: %s
tg := tgs[0]
testutil.Assert(t, tg != nil, "tg should not be nil")
testutil.Assert(t, tg.Targets != nil, "tg.targets should not be nil")
testutil.Equals(t, 17, len(tg.Targets))
testutil.Equals(t, 27, len(tg.Targets))
for i, lbls := range []model.LabelSet{
{
@ -108,6 +108,33 @@ host: %s
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.88:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.2"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("master-2"),
"__meta_dockerswarm_node_id": model.LabelValue("i9woemzxymn1n98o9ufebclgm"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("iml8vdd2dudvq457sbirpf66z"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("google/cadvisor"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_cadvisor"),
"__meta_dockerswarm_task_container_id": model.LabelValue(""),
"__meta_dockerswarm_task_desired_state": model.LabelValue("ready"),
"__meta_dockerswarm_task_id": model.LabelValue("7ogolpkgw2d2amnht1fbtm9oq"),
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("rejected"),
},
{
"__address__": model.LabelValue("10.0.0.12:9100"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
@ -273,6 +300,168 @@ host: %s
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.35:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.4"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("worker-1"),
"__meta_dockerswarm_node_id": model.LabelValue("jkc2xd7p3xdazf0u6sh1m9dmr"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("worker"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("9bbq7j55tzzz85k2gg52x73rg"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("cloudflare/unsee:v0.8.0"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_unsee"),
"__meta_dockerswarm_task_container_id": model.LabelValue("a0734f9421e710b654ca9e67010cbb55c1d5f92c17cc9e6590ab61130a3587e0"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("i9skiw2n5jkjoq7gix2t9uzhy"),
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.14:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.1"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("oxygen"),
"__meta_dockerswarm_node_id": model.LabelValue("d3cw2msquo0d71yn42qrnb0tu"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("hv645udwaaewyw7wuc6nspm68"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-prometheus:v2.5.0"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_prometheus"),
"__meta_dockerswarm_task_container_id": model.LabelValue("f35e071f1251951a09566a2231cb318a1a29e158a3c3d15ad68fd937184c56b6"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("irj35fr4j59f1kvi0xdin9oly"),
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.20:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.1"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("oxygen"),
"__meta_dockerswarm_node_id": model.LabelValue("d3cw2msquo0d71yn42qrnb0tu"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("ul5qawv4s7f7msm7dtyfarw80"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_dockerd-exporter"),
"__meta_dockerswarm_task_container_id": model.LabelValue("3cee48a5b2b363576b255207aae2d1bd0d8872aa61c3ff3b6d180d78d672a943"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("k8x2806pevvxigagsxovjmur1"),
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.19:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.5"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("worker-2"),
"__meta_dockerswarm_node_id": model.LabelValue("ldawcom10uqi6owysgi28n4ve"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("worker"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("ul5qawv4s7f7msm7dtyfarw80"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_dockerd-exporter"),
"__meta_dockerswarm_task_container_id": model.LabelValue("d5fd335a33c1fca8fffa1ec17bb5010a6bb7fba7aa72fd2209c25be8f5d1b975"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("mita4y262u04g4c7zz7ns032a"),
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.18:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.4"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("worker-1"),
"__meta_dockerswarm_node_id": model.LabelValue("jkc2xd7p3xdazf0u6sh1m9dmr"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("worker"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("ul5qawv4s7f7msm7dtyfarw80"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_dockerd-exporter"),
"__meta_dockerswarm_task_container_id": model.LabelValue("ca72094e1ec0547e84aba227dd44446588172fa9ee273de9bf4e6040ff6937bd"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("nm3v4jifzhfkw3rz33gmnaq2z"),
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.75:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.5"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("worker-2"),
"__meta_dockerswarm_node_id": model.LabelValue("ldawcom10uqi6owysgi28n4ve"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("worker"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("iml8vdd2dudvq457sbirpf66z"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("google/cadvisor"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_cadvisor"),
"__meta_dockerswarm_task_container_id": model.LabelValue(""),
"__meta_dockerswarm_task_desired_state": model.LabelValue("shutdown"),
"__meta_dockerswarm_task_id": model.LabelValue("pxdv57mpvquv2zbt7faz3xux9"),
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("rejected"),
},
{
"__address__": model.LabelValue("10.0.0.19:9090"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
@ -300,6 +489,60 @@ host: %s
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("starting"),
},
{
"__address__": model.LabelValue("10.0.0.19:9093"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
"__meta_dockerswarm_network_ingress": model.LabelValue("true"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_name": model.LabelValue("ingress"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.2"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("master-2"),
"__meta_dockerswarm_node_id": model.LabelValue("i9woemzxymn1n98o9ufebclgm"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_task_container_id": model.LabelValue("5d00e145527f14d1bb2b13601000f1de9c95fc8ec1ccc1ff8291cadc31232957"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("qtr16op0cz5pdbx4rylm7up4g"),
"__meta_dockerswarm_task_port_publish_mode": model.LabelValue("ingress"),
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("starting"),
},
{
"__address__": model.LabelValue("10.0.0.19:9094"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
"__meta_dockerswarm_network_ingress": model.LabelValue("true"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_name": model.LabelValue("ingress"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.2"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("master-2"),
"__meta_dockerswarm_node_id": model.LabelValue("i9woemzxymn1n98o9ufebclgm"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_task_container_id": model.LabelValue("5d00e145527f14d1bb2b13601000f1de9c95fc8ec1ccc1ff8291cadc31232957"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("qtr16op0cz5pdbx4rylm7up4g"),
"__meta_dockerswarm_task_port_publish_mode": model.LabelValue("ingress"),
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("starting"),
},
{
"__address__": model.LabelValue("10.0.1.81:9090"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
@ -328,33 +571,6 @@ host: %s
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("starting"),
},
{
"__address__": model.LabelValue("10.0.0.19:9093"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
"__meta_dockerswarm_network_ingress": model.LabelValue("true"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_name": model.LabelValue("ingress"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.2"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("master-2"),
"__meta_dockerswarm_node_id": model.LabelValue("i9woemzxymn1n98o9ufebclgm"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_task_container_id": model.LabelValue("5d00e145527f14d1bb2b13601000f1de9c95fc8ec1ccc1ff8291cadc31232957"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("qtr16op0cz5pdbx4rylm7up4g"),
"__meta_dockerswarm_task_port_publish_mode": model.LabelValue("ingress"),
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("starting"),
},
{
"__address__": model.LabelValue("10.0.1.81:9093"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
@ -383,33 +599,6 @@ host: %s
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("starting"),
},
{
"__address__": model.LabelValue("10.0.0.19:9094"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
"__meta_dockerswarm_network_ingress": model.LabelValue("true"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_name": model.LabelValue("ingress"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.2"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("master-2"),
"__meta_dockerswarm_node_id": model.LabelValue("i9woemzxymn1n98o9ufebclgm"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("tkv91uso46cck763aaqcb4rek"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_caddy"),
"__meta_dockerswarm_task_container_id": model.LabelValue("5d00e145527f14d1bb2b13601000f1de9c95fc8ec1ccc1ff8291cadc31232957"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("qtr16op0cz5pdbx4rylm7up4g"),
"__meta_dockerswarm_task_port_publish_mode": model.LabelValue("ingress"),
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("starting"),
},
{
"__address__": model.LabelValue("10.0.1.81:9094"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
@ -438,6 +627,60 @@ host: %s
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("starting"),
},
{
"__address__": model.LabelValue("10.0.1.24:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.1"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("oxygen"),
"__meta_dockerswarm_node_id": model.LabelValue("d3cw2msquo0d71yn42qrnb0tu"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("iml8vdd2dudvq457sbirpf66z"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("google/cadvisor"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_cadvisor"),
"__meta_dockerswarm_task_container_id": model.LabelValue("81dd017b6d9f36d779a3217021111ffc77dcdbd5f26da86c2ae8fada86f33d17"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("s0rh3k9l7ii9vb62lsfp4wc93"),
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.32:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.3"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("master-3"),
"__meta_dockerswarm_node_id": model.LabelValue("bvtjl7pnrtg0k88ywialsldpd"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("rl4jhdws3r4lkfvq8kx6c4xnr"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/swarmprom-alertmanager:v0.14.0"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("replicated"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_alertmanager"),
"__meta_dockerswarm_task_container_id": model.LabelValue("2c753737f2ac7b01b577324513d9bd2a1754e65ab2a44714090eb5d83d2961b2"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("sxwkumr37wjaqzir89uepm4nc"),
"__meta_dockerswarm_task_slot": model.LabelValue("1"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.0.16:3000"),
"__meta_dockerswarm_network_id": model.LabelValue("qvwhwd6p61k4o0ulsknqb066z"),
@ -515,6 +758,33 @@ host: %s
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
{
"__address__": model.LabelValue("10.0.1.22:80"),
"__meta_dockerswarm_network_id": model.LabelValue("npq2closzy836m07eaq1425k3"),
"__meta_dockerswarm_network_ingress": model.LabelValue("false"),
"__meta_dockerswarm_network_internal": model.LabelValue("false"),
"__meta_dockerswarm_network_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_network_name": model.LabelValue("mon_net"),
"__meta_dockerswarm_network_scope": model.LabelValue("swarm"),
"__meta_dockerswarm_node_address": model.LabelValue("10.0.232.3"),
"__meta_dockerswarm_node_availability": model.LabelValue("active"),
"__meta_dockerswarm_node_hostname": model.LabelValue("master-3"),
"__meta_dockerswarm_node_id": model.LabelValue("bvtjl7pnrtg0k88ywialsldpd"),
"__meta_dockerswarm_node_platform_architecture": model.LabelValue("x86_64"),
"__meta_dockerswarm_node_platform_os": model.LabelValue("linux"),
"__meta_dockerswarm_node_role": model.LabelValue("manager"),
"__meta_dockerswarm_node_status": model.LabelValue("ready"),
"__meta_dockerswarm_service_id": model.LabelValue("ul5qawv4s7f7msm7dtyfarw80"),
"__meta_dockerswarm_service_label_com_docker_stack_image": model.LabelValue("stefanprodan/caddy"),
"__meta_dockerswarm_service_label_com_docker_stack_namespace": model.LabelValue("mon"),
"__meta_dockerswarm_service_mode": model.LabelValue("global"),
"__meta_dockerswarm_service_name": model.LabelValue("mon_dockerd-exporter"),
"__meta_dockerswarm_task_container_id": model.LabelValue("dd50c428ebc7a7bd786c7b0db873bf53ed5cbe5312995a76eb8fd325dcf16032"),
"__meta_dockerswarm_task_desired_state": model.LabelValue("running"),
"__meta_dockerswarm_task_id": model.LabelValue("zhoo9feqxfyi9zg6acnt1eam2"),
"__meta_dockerswarm_task_slot": model.LabelValue("0"),
"__meta_dockerswarm_task_state": model.LabelValue("running"),
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
testutil.Equals(t, lbls, tg.Targets[i])

View file

@ -26,11 +26,16 @@ import (
"time"
"github.com/prometheus/common/model"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/testutil"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
const defaultWait = time.Second
type testRunner struct {

View file

@ -29,6 +29,10 @@ import (
"github.com/prometheus/prometheus/util/testutil"
)
func TestMain(m *testing.M) {
testutil.TolerantVerifyLeak(m)
}
// makeDiscovery creates a kubernetes.Discovery instance for testing.
func makeDiscovery(role Role, nsDiscovery NamespaceDiscovery, objects ...runtime.Object) (*Discovery, kubernetes.Interface) {
clientset := fake.NewSimpleClientset(objects...)

View file

@ -25,7 +25,7 @@ import (
"time"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus/testutil"
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
common_config "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/config"
@ -33,9 +33,14 @@ import (
"github.com/prometheus/prometheus/discovery/consul"
"github.com/prometheus/prometheus/discovery/file"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/testutil"
"gopkg.in/yaml.v2"
)
func TestMain(m *testing.M) {
testutil.TolerantVerifyLeak(m)
}
// TestTargetUpdatesOrder checks that the target updates are received in the expected order.
func TestTargetUpdatesOrder(t *testing.T) {
@ -984,7 +989,7 @@ func TestGaugeFailedConfigs(t *testing.T) {
discoveryManager.ApplyConfig(c)
<-discoveryManager.SyncCh()
failedCount := testutil.ToFloat64(failedConfigs)
failedCount := client_testutil.ToFloat64(failedConfigs)
if failedCount != 3 {
t.Fatalf("Expected to have 3 failed configs, got: %v", failedCount)
}
@ -1004,7 +1009,7 @@ func TestGaugeFailedConfigs(t *testing.T) {
discoveryManager.ApplyConfig(c)
<-discoveryManager.SyncCh()
failedCount = testutil.ToFloat64(failedConfigs)
failedCount = client_testutil.ToFloat64(failedConfigs)
if failedCount != 0 {
t.Fatalf("Expected to get no failed config, got: %v", failedCount)
}

View file

@ -20,11 +20,16 @@ import (
"time"
"github.com/prometheus/common/model"
"go.uber.org/goleak"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/testutil"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestRefresh(t *testing.T) {
tg1 := []*targetgroup.Group{
{

View file

@ -18,8 +18,13 @@ import (
"time"
"github.com/prometheus/common/model"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestNewDiscoveryError(t *testing.T) {
_, err := NewDiscovery(
[]string{"unreachable.test"},

View file

@ -459,7 +459,10 @@ One of the following roles can be configured to discover targets:
#### `services`
The `services` role is used to discover [Swarm services](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks).
The `services` role discovers all [Swarm services](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks)
and exposes their ports as targets. For each published port of a service, a
single target is generated. If a service has no published ports, a target per
service is created using the `port` parameter defined in the SD configuration.
Available meta labels:
@ -481,7 +484,10 @@ Available meta labels:
#### `tasks`
The `tasks` role is used to discover [Swarm tasks](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks).
The `tasks` role discovers all [Swarm tasks](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks)
and exposes their ports as targets. For each published port of a task, a single
target is generated. If a task has no published ports, a target per task is
created using the `port` parameter defined in the SD configuration.
Available meta labels:
@ -552,7 +558,8 @@ tls_config:
# Role of the targets to retrieve. Must be `services`, `tasks`, or `nodes`.
role: <string>
# The port to scrape metrics from, when `role` is nodes.
# The port to scrape metrics from, when `role` is nodes, and for discovered
# tasks and services that don't have published ports.
[ port: <int> | default = 80 ]
# The time after which the droplets are refreshed.
@ -589,9 +596,11 @@ This service discovery method only supports basic DNS A, AAAA and SRV record
queries, but not the advanced DNS-SD approach specified in
[RFC6763](https://tools.ietf.org/html/rfc6763).
During the [relabeling phase](#relabel_config), the meta label
`__meta_dns_name` is available on each target and is set to the
record name that produced the discovered target.
The following meta labels are available on targets during [relabeling](#relabel_config):
* `__meta_dns_name`: the record name that produced the discovered target.
* `__meta_dns_srv_record_target`: the target field of the SRV record
* `__meta_dns_srv_record_port`: the port field of the SRV record
```yaml
# A list of DNS domain names to be queried.

View file

@ -55,10 +55,23 @@ Example:
### Float literals
Scalar float values can be literally written as numbers of the form
`[-](digits)[.(digits)]`.
Scalar float values can be written as literal integer or floating-point numbers in the format (whitespace only included for better readability):
[-+]?(
[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?
| 0[xX][0-9a-fA-F]+
| [nN][aA][nN]
| [iI][nN][fF]
)
Examples:
23
-2.43
3.4e-9
0x8f
-Inf
NaN
## Time series Selectors
@ -180,7 +193,7 @@ The same works for range vectors. This returns the 5-minute rate that
## Subquery
Subquery allows you to run an instant query for a given range and resolution. The result of a subquery is a range vector.
Subquery allows you to run an instant query for a given range and resolution. The result of a subquery is a range vector.
Syntax: `<instant_query> '[' <range> ':' [<resolution>] ']' [ offset <duration> ]`

View file

@ -139,7 +139,7 @@ to the nearest integer.
## `histogram_quantile()`
`histogram_quantile(φ float, b instant-vector)` calculates the φ-quantile (0 ≤ φ
`histogram_quantile(φ scalar, b instant-vector)` calculates the φ-quantile (0 ≤ φ
≤ 1) from the buckets `b` of a
[histogram](https://prometheus.io/docs/concepts/metric_types/#histogram). (See
[histograms and summaries](https://prometheus.io/docs/practices/histograms) for

View file

@ -73,7 +73,9 @@ needed_disk_space = retention_time_seconds * ingested_samples_per_second * bytes
To tune the rate of ingested samples per second, you can either reduce the number of time series you scrape (fewer targets or fewer series per target), or you can increase the scrape interval. However, reducing the number of series is likely more effective, due to compression of samples within a series.
If your local storage becomes corrupted for whatever reason, your best bet is to shut down Prometheus and remove the entire storage directory. Non POSIX compliant filesystems are not supported by Prometheus's local storage, corruptions may happen, without possibility to recover. NFS is only potentially POSIX, most implementations are not. You can try removing individual block directories to resolve the problem, this means losing a time window of around two hours worth of data per block directory. Again, Prometheus's local storage is not meant as durable long-term storage.
If your local storage becomes corrupted for whatever reason, your best bet is to shut down Prometheus and remove the entire storage directory. You can try removing individual block directories, or WAL directory, to resolve the problem, this means losing a time window of around two hours worth of data per block directory. Again, Prometheus's local storage is not meant as durable long-term storage.
CAUTION: Non-POSIX compliant filesystems are not supported by Prometheus' local storage as unrecoverable corruptions may happen. NFS filesystems (including AWS's EFS) are not supported. NFS could be POSIX-compliant, but most implementations are not. It is strongly recommended to use a local filesystem for reliability.
If both time and size retention policies are specified, whichever policy triggers first will be used at that instant.

View file

@ -70,6 +70,13 @@ scrape_configs:
# <kubernetes_sd_config>.
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
# If your node certificates are self-signed or use a different CA to the
# master CA, then disable certificate verification below. Note that
# certificate verification is an integral part of a secure infrastructure
# so this should only be disabled in a controlled environment. You can
# disable certificate verification by uncommenting the line below.
#
# insecure_skip_verify: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
@ -78,12 +85,6 @@ scrape_configs:
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics
# Scrape config for Kubelet cAdvisor.
#
@ -93,9 +94,9 @@ scrape_configs:
# retrieve those metrics.
#
# In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
# HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics"
# in that case (and ensure cAdvisor's HTTP server hasn't been disabled with
# the --cadvisor-port=0 Kubelet flag).
# HTTP endpoint; use the "/metrics" endpoint on the 4194 port of nodes. In
# that case (and ensure cAdvisor's HTTP server hasn't been disabled with the
# --cadvisor-port=0 Kubelet flag).
#
# This job is not necessary and should be removed in Kubernetes 1.6 and
# earlier versions, or it will cause the metrics to be scraped twice.
@ -105,6 +106,12 @@ scrape_configs:
# `http`.
scheme: https
# Starting Kubernetes 1.7.3 the cAdvisor metrics are under /metrics/cadvisor.
# Kubernetes CIS Benchmark recommends against enabling the insecure HTTP
# servers of Kubernetes, therefore the cAdvisor metrics on the secure handler
# are used.
metrics_path: /metrics/cadvisor
# This TLS & bearer token file config is used to connect to the actual scrape
# endpoints for cluster components. This is separate to discovery auth
# configuration because discovery & scraping are two separate concerns in
@ -113,6 +120,13 @@ scrape_configs:
# <kubernetes_sd_config>.
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
# If your node certificates are self-signed or use a different CA to the
# master CA, then disable certificate verification below. Note that
# certificate verification is an integral part of a secure infrastructure
# so this should only be disabled in a controlled environment. You can
# disable certificate verification by uncommenting the line below.
#
# insecure_skip_verify: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
@ -121,12 +135,6 @@ scrape_configs:
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
# Example scrape config for service endpoints.
#

View file

@ -1,4 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
# To have Prometheus retrieve metrics from Kubelets with authentication and
# authorization enabled (which is highly recommended and included in security
# benchmarks) the following flags must be set on the kubelet(s):
#
# --authentication-token-webhook
# --authorization-mode=Webhook
#
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus
@ -6,7 +13,7 @@ rules:
- apiGroups: [""]
resources:
- nodes
- nodes/proxy
- nodes/metrics
- services
- endpoints
- pods
@ -16,7 +23,7 @@ rules:
resources:
- ingresses
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
- nonResourceURLs: ["/metrics", "/metrics/cadvisor"]
verbs: ["get"]
---
apiVersion: v1
@ -25,7 +32,7 @@ metadata:
name: prometheus
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus

55
go.mod
View file

@ -1,9 +1,9 @@
module github.com/prometheus/prometheus
go 1.13
go 1.14
require (
github.com/Azure/azure-sdk-for-go v44.0.0+incompatible
github.com/Azure/azure-sdk-for-go v44.2.0+incompatible
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
github.com/Azure/go-autorest/autorest v0.11.2
github.com/Azure/go-autorest/autorest/adal v0.9.0
@ -12,12 +12,12 @@ require (
github.com/Microsoft/go-winio v0.4.14 // indirect
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d
github.com/armon/go-metrics v0.3.3 // indirect
github.com/aws/aws-sdk-go v1.33.5
github.com/aws/aws-sdk-go v1.33.12
github.com/cespare/xxhash v1.1.0
github.com/containerd/containerd v1.3.4 // indirect
github.com/davecgh/go-spew v1.1.1
github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b
github.com/digitalocean/godo v1.38.0
github.com/digitalocean/godo v1.42.0
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible
github.com/docker/go-connections v0.4.0 // indirect
@ -56,35 +56,64 @@ require (
github.com/prometheus/client_golang v1.7.1
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.10.0
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da
github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c
github.com/soheilhy/cmux v0.1.4
github.com/uber/jaeger-client-go v2.24.0+incompatible
github.com/uber/jaeger-client-go v2.25.0+incompatible
github.com/uber/jaeger-lib v2.2.0+incompatible
go.mongodb.org/mongo-driver v1.3.2 // indirect
go.uber.org/atomic v1.6.0 // indirect
go.uber.org/atomic v1.6.0
go.uber.org/goleak v1.0.0
golang.org/x/net v0.0.0-20200707034311-ab3426394381
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae
golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
golang.org/x/tools v0.0.0-20200710042808-f1c4188a97a1
golang.org/x/tools v0.0.0-20200725200936-102e7d357031
google.golang.org/api v0.29.0
google.golang.org/appengine v1.6.6 // indirect
google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e
google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7
google.golang.org/grpc v1.29.1
gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/fsnotify/fsnotify.v1 v1.4.7
gopkg.in/yaml.v2 v2.3.0
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776
gotest.tools v2.2.0+incompatible // indirect
k8s.io/api v0.18.5
k8s.io/apimachinery v0.18.5
k8s.io/client-go v0.18.5
k8s.io/api v0.18.6
k8s.io/apimachinery v0.18.6
k8s.io/client-go v0.18.6
k8s.io/klog v1.0.0
k8s.io/utils v0.0.0-20200414100711-2df71ebbae66 // indirect
)
replace k8s.io/klog => github.com/simonpasquier/klog-gokit v0.1.0
exclude (
// Exclude grpc v1.30.0 because of breaking changes. See #7621.
google.golang.org/grpc v1.30.0
// Exclude pre-go-mod kubernetes tags, as they are older
// than v0.x releases but are picked when we update the dependencies.
k8s.io/client-go v1.4.0
k8s.io/client-go v1.4.0+incompatible
k8s.io/client-go v1.5.0
k8s.io/client-go v1.5.0+incompatible
k8s.io/client-go v1.5.1
k8s.io/client-go v1.5.1+incompatible
k8s.io/client-go v10.0.0+incompatible
k8s.io/client-go v11.0.0+incompatible
k8s.io/client-go v2.0.0+incompatible
k8s.io/client-go v2.0.0-alpha.1+incompatible
k8s.io/client-go v3.0.0+incompatible
k8s.io/client-go v3.0.0-beta.0+incompatible
k8s.io/client-go v4.0.0+incompatible
k8s.io/client-go v4.0.0-beta.0+incompatible
k8s.io/client-go v5.0.0+incompatible
k8s.io/client-go v5.0.1+incompatible
k8s.io/client-go v6.0.0+incompatible
k8s.io/client-go v7.0.0+incompatible
k8s.io/client-go v8.0.0+incompatible
k8s.io/client-go v9.0.0+incompatible
k8s.io/client-go v9.0.0-invalid+incompatible
)

48
go.sum
View file

@ -26,8 +26,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v44.0.0+incompatible h1:e82Yv2HNpS0kuyeCrV29OPKvEiqfs2/uJHic3/3iKdg=
github.com/Azure/azure-sdk-for-go v44.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v44.2.0+incompatible h1:d0WY8HTXhnurVBAkLXzv4bRpd+P5r3U/W17Z88PJWiI=
github.com/Azure/azure-sdk-for-go v44.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
@ -104,8 +104,8 @@ github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 h1:zV3ejI06
github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.33.5 h1:p2fr1ryvNTU6avUWLI+/H7FGv0TBIjzVM5WDgXBBv4U=
github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go v1.33.12 h1:eydMoSwfrSTD9PWKUJOiDL7+/UwDW8AjInUGVE5Llh4=
github.com/aws/aws-sdk-go v1.33.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@ -151,8 +151,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b h1:Yqiad0+sloMPdd/0Fg22actpFx0dekpzt1xJmVNVkU0=
github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/digitalocean/godo v1.38.0 h1:to+pLe5RJqflJiyxhaLJfJgT3YzwHRSg19mOWkKt6A0=
github.com/digitalocean/godo v1.38.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
github.com/digitalocean/godo v1.42.0 h1:xQlEFLhQ1zZUryJAfiWb8meLPPCWnLO901U5Imhh0Mc=
github.com/digitalocean/godo v1.42.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible h1:+mzU0jHyjWpYHiD0StRlsVXkCvecWS2hc55M3OlUJSk=
@ -384,6 +384,7 @@ github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
@ -614,6 +615,7 @@ github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChl
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
@ -672,6 +674,8 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCLY7gIfzeQaUJ+kppEO5WQG3cL8iE8tGHU=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco=
github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
@ -713,6 +717,7 @@ github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
@ -725,8 +730,8 @@ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/uber/jaeger-client-go v2.24.0+incompatible h1:CGchgJcHsDd2jWnaL4XngByMrXoGHh3n8oCqAKx0uMo=
github.com/uber/jaeger-client-go v2.24.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U=
github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
@ -927,8 +932,8 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM/fAoGlaiiHYiFYdm80=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c h1:UIcGWL6/wpCfyGuJnRFJRurA+yj8RrW7Q6x2YMCXt6c=
golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -993,9 +998,10 @@ golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97 h1:DAuln/hGp+aJiHpID1Y1hYzMEPP5WLwtZHPb50mN0OE=
golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200710042808-f1c4188a97a1 h1:rD1FcWVsRaMY+l8biE9jbWP5MS/CJJ/90a9TMkMgNrM=
golang.org/x/tools v0.0.0-20200710042808-f1c4188a97a1/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200725200936-102e7d357031 h1:VtIxiVHWPhnny2ZTi4f9/2diZKqyLaq3FUTuud5+khA=
golang.org/x/tools v0.0.0-20200725200936-102e7d357031/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
@ -1048,10 +1054,11 @@ google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 h1:fiNLklpBwWK1mth30Hlwk+fcdBmIALlgF5iy77O37Ig=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e h1:k+p/u26/lVeNEpdxSeUrm7rTvoFckBKaf7gTzgmHyDA=
google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7 h1:AWgNCmk2V5HZp9AiCDRBExX/b9I0Ey9F8STHDZlhCC4=
google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@ -1074,6 +1081,7 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
@ -1118,12 +1126,12 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.18.5 h1:fKbCxr+U3fu7k6jB+QeYPD/c6xKYeSJ2KVWmyUypuWM=
k8s.io/api v0.18.5/go.mod h1:tN+e/2nbdGKOAH55NMV8oGrMG+3uRlA9GaRfvnCCSNk=
k8s.io/apimachinery v0.18.5 h1:Lh6tgsM9FMkC12K5T5QjRm7rDs6aQN5JHkA0JomULDM=
k8s.io/apimachinery v0.18.5/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/client-go v0.18.5 h1:cLhGZdOmyPhwtt20Lrb7uAqxxB1uvY+NTmNJvno1oKA=
k8s.io/client-go v0.18.5/go.mod h1:EsiD+7Fx+bRckKWZXnAXRKKetm1WuzPagH4iOSC8x58=
k8s.io/api v0.18.6 h1:osqrAXbOQjkKIWDTjrqxWQ3w0GkKb1KA1XkUGHHYpeE=
k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI=
k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag=
k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/client-go v0.18.6 h1:I+oWqJbibLSGsZj8Xs8F0aWVXJVIoUHWaaJV3kUN/Zw=
k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=

View file

@ -26,19 +26,19 @@ import (
"path"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/go-openapi/strfmt"
"github.com/pkg/errors"
"go.uber.org/atomic"
"github.com/prometheus/alertmanager/api/v2/models"
"github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/version"
"github.com/prometheus/alertmanager/api/v2/models"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/pkg/labels"
@ -466,7 +466,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
var (
wg sync.WaitGroup
numSuccess uint64
numSuccess atomic.Uint64
)
for _, ams := range amSets {
var (
@ -527,7 +527,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
level.Error(n.logger).Log("alertmanager", url, "count", len(alerts), "msg", "Error sending alert", "err", err)
n.metrics.errors.WithLabelValues(url).Inc()
} else {
atomic.AddUint64(&numSuccess, 1)
numSuccess.Inc()
}
n.metrics.latency.WithLabelValues(url).Observe(time.Since(begin).Seconds())
n.metrics.sent.WithLabelValues(url).Add(float64(len(alerts)))
@ -541,7 +541,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
wg.Wait()
return numSuccess > 0
return numSuccess.Load() > 0
}
func alertsToOpenAPIAlerts(alerts []*Alert) models.PostableAlerts {

View file

@ -22,7 +22,6 @@ import (
"net/http"
"net/http/httptest"
"net/url"
"sync/atomic"
"testing"
"time"
@ -30,6 +29,7 @@ import (
"github.com/prometheus/alertmanager/api/v2/models"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"go.uber.org/atomic"
yaml "gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/config"
@ -102,10 +102,12 @@ func TestHandlerSendAll(t *testing.T) {
var (
errc = make(chan error, 1)
expected = make([]*Alert, 0, maxBatchSize)
status1, status2 = int32(http.StatusOK), int32(http.StatusOK)
status1, status2 atomic.Int32
)
status1.Store(int32(http.StatusOK))
status2.Store(int32(http.StatusOK))
newHTTPServer := func(u, p string, status *int32) *httptest.Server {
newHTTPServer := func(u, p string, status *atomic.Int32) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var err error
defer func() {
@ -128,7 +130,7 @@ func TestHandlerSendAll(t *testing.T) {
if err == nil {
err = alertsEqual(expected, alerts)
}
w.WriteHeader(int(atomic.LoadInt32(status)))
w.WriteHeader(int(status.Load()))
}))
}
server1 := newHTTPServer("prometheus", "testing_password", &status1)
@ -194,11 +196,11 @@ func TestHandlerSendAll(t *testing.T) {
testutil.Assert(t, h.sendAll(h.queue...), "all sends failed unexpectedly")
checkNoErr()
atomic.StoreInt32(&status1, int32(http.StatusNotFound))
status1.Store(int32(http.StatusNotFound))
testutil.Assert(t, h.sendAll(h.queue...), "all sends failed unexpectedly")
checkNoErr()
atomic.StoreInt32(&status2, int32(http.StatusInternalServerError))
status2.Store(int32(http.StatusInternalServerError))
testutil.Assert(t, !h.sendAll(h.queue...), "all sends succeeded unexpectedly")
checkNoErr()
}

View file

@ -60,10 +60,10 @@ type engineMetrics struct {
maxConcurrentQueries prometheus.Gauge
queryLogEnabled prometheus.Gauge
queryLogFailures prometheus.Counter
queryQueueTime prometheus.Summary
queryPrepareTime prometheus.Summary
queryInnerEval prometheus.Summary
queryResultSort prometheus.Summary
queryQueueTime prometheus.Observer
queryPrepareTime prometheus.Observer
queryInnerEval prometheus.Observer
queryResultSort prometheus.Observer
}
// convertibleToInt64 returns true if v does not over-/underflow an int64.
@ -230,6 +230,16 @@ func NewEngine(opts EngineOpts) *Engine {
opts.Logger = log.NewNopLogger()
}
queryResultSummary := prometheus.NewSummaryVec(prometheus.SummaryOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "query_duration_seconds",
Help: "Query timings",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"slice"},
)
metrics := &engineMetrics{
currentQueries: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
@ -255,38 +265,10 @@ func NewEngine(opts EngineOpts) *Engine {
Name: "queries_concurrent_max",
Help: "The max number of concurrent queries.",
}),
queryQueueTime: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "query_duration_seconds",
Help: "Query timings",
ConstLabels: prometheus.Labels{"slice": "queue_time"},
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}),
queryPrepareTime: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "query_duration_seconds",
Help: "Query timings",
ConstLabels: prometheus.Labels{"slice": "prepare_time"},
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}),
queryInnerEval: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "query_duration_seconds",
Help: "Query timings",
ConstLabels: prometheus.Labels{"slice": "inner_eval"},
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}),
queryResultSort: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "query_duration_seconds",
Help: "Query timings",
ConstLabels: prometheus.Labels{"slice": "result_sort"},
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}),
queryQueueTime: queryResultSummary.WithLabelValues("queue_time"),
queryPrepareTime: queryResultSummary.WithLabelValues("prepare_time"),
queryInnerEval: queryResultSummary.WithLabelValues("inner_eval"),
queryResultSort: queryResultSummary.WithLabelValues("result_sort"),
}
if t := opts.ActiveQueryTracker; t != nil {
@ -308,10 +290,7 @@ func NewEngine(opts EngineOpts) *Engine {
metrics.maxConcurrentQueries,
metrics.queryLogEnabled,
metrics.queryLogFailures,
metrics.queryQueueTime,
metrics.queryPrepareTime,
metrics.queryInnerEval,
metrics.queryResultSort,
queryResultSummary,
)
}

View file

@ -28,8 +28,13 @@ import (
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/testutil"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestQueryConcurrency(t *testing.T) {
maxConcurrency := 10

View file

@ -26,7 +26,7 @@ import (
// Node is a generic interface for all nodes in an AST.
//
// Whenever numerous nodes are listed such as in a switch-case statement
// or a chain of function definitions (e.g. String(), expr(), etc.) convention is
// or a chain of function definitions (e.g. String(), PromQLExpr(), etc.) convention is
// to list them as follows:
//
// - Statements
@ -49,9 +49,9 @@ type Node interface {
type Statement interface {
Node
// stmt ensures that no other type accidentally implements the interface
// PromQLStmt ensures that no other type accidentally implements the interface
// nolint:unused
stmt()
PromQLStmt()
}
// EvalStmt holds an expression and information on the range it should
@ -66,7 +66,7 @@ type EvalStmt struct {
Interval time.Duration
}
func (*EvalStmt) stmt() {}
func (*EvalStmt) PromQLStmt() {}
// Expr is a generic interface for all expression types.
type Expr interface {
@ -75,8 +75,8 @@ type Expr interface {
// Type returns the type the expression evaluates to. It does not perform
// in-depth checks as this is done at parsing-time.
Type() ValueType
// expr ensures that no other types accidentally implement the interface.
expr()
// PromQLExpr ensures that no other types accidentally implement the interface.
PromQLExpr()
}
// Expressions is a list of expression nodes that implements Node.
@ -180,7 +180,7 @@ type VectorSelector struct {
type TestStmt func(context.Context) error
func (TestStmt) String() string { return "test statement" }
func (TestStmt) stmt() {}
func (TestStmt) PromQLStmt() {}
func (TestStmt) PositionRange() PositionRange {
return PositionRange{
@ -204,16 +204,16 @@ func (e *BinaryExpr) Type() ValueType {
return ValueTypeVector
}
func (*AggregateExpr) expr() {}
func (*BinaryExpr) expr() {}
func (*Call) expr() {}
func (*MatrixSelector) expr() {}
func (*SubqueryExpr) expr() {}
func (*NumberLiteral) expr() {}
func (*ParenExpr) expr() {}
func (*StringLiteral) expr() {}
func (*UnaryExpr) expr() {}
func (*VectorSelector) expr() {}
func (*AggregateExpr) PromQLExpr() {}
func (*BinaryExpr) PromQLExpr() {}
func (*Call) PromQLExpr() {}
func (*MatrixSelector) PromQLExpr() {}
func (*SubqueryExpr) PromQLExpr() {}
func (*NumberLiteral) PromQLExpr() {}
func (*ParenExpr) PromQLExpr() {}
func (*StringLiteral) PromQLExpr() {}
func (*UnaryExpr) PromQLExpr() {}
func (*VectorSelector) PromQLExpr() {}
// VectorMatchCardinality describes the cardinality relationship
// of two Vectors in a binary operation.

View file

@ -89,6 +89,15 @@ func TestExprString(t *testing.T) {
{
in: `{__name__="a"}`,
},
{
in: `a{b!="c"}[1m]`,
},
{
in: `a{b=~"c"}[1m]`,
},
{
in: `a{b!~"c"}[1m]`,
},
}
for _, test := range inputs {

View file

@ -26,6 +26,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"go.uber.org/goleak"
yaml "gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/pkg/labels"
@ -39,6 +40,10 @@ import (
"github.com/prometheus/prometheus/util/testutil"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestAlertingRule(t *testing.T) {
suite, err := promql.NewTest(t, `
load 5m

View file

@ -46,6 +46,10 @@ import (
"github.com/prometheus/prometheus/util/testutil"
)
func TestMain(m *testing.M) {
testutil.TolerantVerifyLeak(m)
}
func TestNewScrapePool(t *testing.T) {
var (
app = &nopAppendable{}
@ -1785,6 +1789,7 @@ func TestReuseScrapeCache(t *testing.T) {
}
proxyURL, _ = url.Parse("http://localhost:2128")
)
defer sp.stop()
sp.sync([]*Target{t1})
steps := []struct {
@ -1937,6 +1942,7 @@ func TestReuseCacheRace(t *testing.T) {
},
}
)
defer sp.stop()
sp.sync([]*Target{t1})
start := time.Now()

View file

@ -10,8 +10,8 @@ if ! [[ "$0" =~ "scripts/genproto.sh" ]]; then
exit 255
fi
if ! [[ $(protoc --version) =~ "3.11.4" ]]; then
echo "could not find protoc 3.11.4, is it installed + in PATH?"
if ! [[ $(protoc --version) =~ "3.12.3" ]]; then
echo "could not find protoc 3.12.3, is it installed + in PATH?"
exit 255
fi
@ -25,9 +25,9 @@ done
PROM_ROOT="${PWD}"
PROM_PATH="${PROM_ROOT}/prompb"
GOGOPROTO_ROOT="$(GO111MODULE=on go list -f '{{ .Dir }}' -m github.com/gogo/protobuf)"
GOGOPROTO_ROOT="$(GO111MODULE=on go list -mod=readonly -f '{{ .Dir }}' -m github.com/gogo/protobuf)"
GOGOPROTO_PATH="${GOGOPROTO_ROOT}:${GOGOPROTO_ROOT}/protobuf"
GRPC_GATEWAY_ROOT="$(GO111MODULE=on go list -f '{{ .Dir }}' -m github.com/grpc-ecosystem/grpc-gateway)"
GRPC_GATEWAY_ROOT="$(GO111MODULE=on go list -mod=readonly -f '{{ .Dir }}' -m github.com/grpc-ecosystem/grpc-gateway)"
DIRS="prompb"

View file

@ -140,7 +140,7 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
}, nil
}
type recoverableError struct {
type RecoverableError struct {
error
}
@ -177,7 +177,7 @@ func (c *client) Store(ctx context.Context, req []byte) error {
if err != nil {
// Errors from client.Do are from (for example) network errors, so are
// recoverable.
return recoverableError{err}
return RecoverableError{err}
}
defer func() {
io.Copy(ioutil.Discard, httpResp.Body)
@ -193,7 +193,7 @@ func (c *client) Store(ctx context.Context, req []byte) error {
err = errors.Errorf("server returned HTTP status %s: %s", httpResp.Status, line)
}
if httpResp.StatusCode/100 == 5 {
return recoverableError{err}
return RecoverableError{err}
}
return err
}

View file

@ -49,7 +49,7 @@ func TestStoreHTTPErrorHandling(t *testing.T) {
},
{
code: 500,
err: recoverableError{errors.New("server returned HTTP status 500 Internal Server Error: " + longErrMessage[:maxErrMsgLen])},
err: RecoverableError{errors.New("server returned HTTP status 500 Internal Server Error: " + longErrMessage[:maxErrMsgLen])},
},
}

View file

@ -15,15 +15,14 @@ package remote
import (
"sync"
"sync/atomic"
"time"
"go.uber.org/atomic"
)
// ewmaRate tracks an exponentially weighted moving average of a per-second rate.
type ewmaRate struct {
// Keep all 64bit atomically accessed variables at the top of this struct.
// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG for more info.
newEvents int64
newEvents atomic.Int64
alpha float64
interval time.Duration
@ -50,7 +49,7 @@ func (r *ewmaRate) rate() float64 {
// tick assumes to be called every r.interval.
func (r *ewmaRate) tick() {
newEvents := atomic.SwapInt64(&r.newEvents, 0)
newEvents := r.newEvents.Swap(0)
instantRate := float64(newEvents) / r.interval.Seconds()
r.mutex.Lock()
@ -66,5 +65,5 @@ func (r *ewmaRate) tick() {
// inc counts one event.
func (r *ewmaRate) incr(incr int64) {
atomic.AddInt64(&r.newEvents, incr)
r.newEvents.Add(incr)
}

View file

@ -20,7 +20,8 @@ package remote
import (
"sync"
"sync/atomic"
"go.uber.org/atomic"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@ -40,13 +41,15 @@ type pool struct {
}
type entry struct {
// Keep all 64bit atomically accessed variables at the top of this struct.
// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG for more info.
refs int64
refs atomic.Int64
s string
}
func newEntry(s string) *entry {
return &entry{s: s}
}
func newPool() *pool {
return &pool{
pool: map[string]*entry{},
@ -62,20 +65,18 @@ func (p *pool) intern(s string) string {
interned, ok := p.pool[s]
p.mtx.RUnlock()
if ok {
atomic.AddInt64(&interned.refs, 1)
interned.refs.Inc()
return interned.s
}
p.mtx.Lock()
defer p.mtx.Unlock()
if interned, ok := p.pool[s]; ok {
atomic.AddInt64(&interned.refs, 1)
interned.refs.Inc()
return interned.s
}
p.pool[s] = &entry{
s: s,
refs: 1,
}
p.pool[s] = newEntry(s)
p.pool[s].refs.Store(1)
return s
}
@ -89,14 +90,14 @@ func (p *pool) release(s string) {
return
}
refs := atomic.AddInt64(&interned.refs, -1)
refs := interned.refs.Dec()
if refs > 0 {
return
}
p.mtx.Lock()
defer p.mtx.Unlock()
if atomic.LoadInt64(&interned.refs) != 0 {
if interned.refs.Load() != 0 {
return
}
delete(p.pool, s)

View file

@ -20,7 +20,6 @@ package remote
import (
"fmt"
"sync/atomic"
"testing"
"time"
@ -33,7 +32,7 @@ func TestIntern(t *testing.T) {
interned, ok := interner.pool[testString]
testutil.Equals(t, true, ok)
testutil.Assert(t, interned.refs == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs))
testutil.Assert(t, interned.refs.Load() == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs))
}
func TestIntern_MultiRef(t *testing.T) {
@ -43,13 +42,13 @@ func TestIntern_MultiRef(t *testing.T) {
interned, ok := interner.pool[testString]
testutil.Equals(t, true, ok)
testutil.Assert(t, interned.refs == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs))
testutil.Assert(t, interned.refs.Load() == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs))
interner.intern(testString)
interned, ok = interner.pool[testString]
testutil.Equals(t, true, ok)
testutil.Assert(t, interned.refs == 2, fmt.Sprintf("expected refs to be 2 but it was %d", interned.refs))
testutil.Assert(t, interned.refs.Load() == 2, fmt.Sprintf("expected refs to be 2 but it was %d", interned.refs))
}
func TestIntern_DeleteRef(t *testing.T) {
@ -59,7 +58,7 @@ func TestIntern_DeleteRef(t *testing.T) {
interned, ok := interner.pool[testString]
testutil.Equals(t, true, ok)
testutil.Assert(t, interned.refs == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs))
testutil.Assert(t, interned.refs.Load() == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs))
interner.release(testString)
_, ok = interner.pool[testString]
@ -72,7 +71,7 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) {
interner.intern(testString)
interned, ok := interner.pool[testString]
testutil.Equals(t, true, ok)
testutil.Assert(t, interned.refs == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs))
testutil.Assert(t, interned.refs.Load() == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs))
go interner.release(testString)
@ -84,5 +83,5 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) {
interned, ok = interner.pool[testString]
interner.mtx.RUnlock()
testutil.Equals(t, true, ok)
testutil.Assert(t, atomic.LoadInt64(&interned.refs) == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs))
testutil.Assert(t, interned.refs.Load() == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs))
}

View file

@ -18,7 +18,6 @@ import (
"math"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/go-kit/kit/log"
@ -27,6 +26,7 @@ import (
"github.com/golang/snappy"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"go.uber.org/atomic"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/config"
@ -235,8 +235,7 @@ type WriteClient interface {
// indicated by the provided WriteClient. Implements writeTo interface
// used by WAL Watcher.
type QueueManager struct {
// https://golang.org/pkg/sync/atomic/#pkg-note-BUG
lastSendTimestamp int64
lastSendTimestamp atomic.Int64
logger log.Logger
flushDeadline time.Duration
@ -537,7 +536,7 @@ func (t *QueueManager) shouldReshard(desiredShards int) bool {
// We shouldn't reshard if Prometheus hasn't been able to send to the
// remote endpoint successfully within some period of time.
minSendTimestamp := time.Now().Add(-2 * time.Duration(t.cfg.BatchSendDeadline)).Unix()
lsts := atomic.LoadInt64(&t.lastSendTimestamp)
lsts := t.lastSendTimestamp.Load()
if lsts < minSendTimestamp {
level.Warn(t.logger).Log("msg", "Skipping resharding, last successful send was beyond threshold", "lastSendTimestamp", lsts, "minSendTimestamp", minSendTimestamp)
return false
@ -663,7 +662,7 @@ type shards struct {
// Emulate a wait group with a channel and an atomic int, as you
// cannot select on a wait group.
done chan struct{}
running int32
running atomic.Int32
// Soft shutdown context will prevent new enqueues and deadlocks.
softShutdown chan struct{}
@ -671,7 +670,7 @@ type shards struct {
// Hard shutdown context is used to terminate outgoing HTTP connections
// after giving them a chance to terminate.
hardShutdown context.CancelFunc
droppedOnHardShutdown uint32
droppedOnHardShutdown atomic.Uint32
}
// start the shards; must be called before any call to enqueue.
@ -692,9 +691,9 @@ func (s *shards) start(n int) {
var hardShutdownCtx context.Context
hardShutdownCtx, s.hardShutdown = context.WithCancel(context.Background())
s.softShutdown = make(chan struct{})
s.running = int32(n)
s.running.Store(int32(n))
s.done = make(chan struct{})
atomic.StoreUint32(&s.droppedOnHardShutdown, 0)
s.droppedOnHardShutdown.Store(0)
for i := 0; i < n; i++ {
go s.runShard(hardShutdownCtx, i, newQueues[i])
}
@ -727,7 +726,7 @@ func (s *shards) stop() {
// Force an unclean shutdown.
s.hardShutdown()
<-s.done
if dropped := atomic.LoadUint32(&s.droppedOnHardShutdown); dropped > 0 {
if dropped := s.droppedOnHardShutdown.Load(); dropped > 0 {
level.Error(s.qm.logger).Log("msg", "Failed to flush all samples on shutdown", "count", dropped)
}
}
@ -756,7 +755,7 @@ func (s *shards) enqueue(ref uint64, sample sample) bool {
func (s *shards) runShard(ctx context.Context, shardID int, queue chan sample) {
defer func() {
if atomic.AddInt32(&s.running, -1) == 0 {
if s.running.Dec() == 0 {
close(s.done)
}
}()
@ -792,7 +791,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan sample) {
droppedSamples := nPending + len(queue)
s.qm.metrics.pendingSamples.Sub(float64(droppedSamples))
s.qm.metrics.failedSamplesTotal.Add(float64(droppedSamples))
atomic.AddUint32(&s.droppedOnHardShutdown, uint32(droppedSamples))
s.droppedOnHardShutdown.Add(uint32(droppedSamples))
return
case sample, ok := <-queue:
@ -847,7 +846,7 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, b
// should be maintained irrespective of success or failure.
s.qm.samplesOut.incr(int64(len(samples)))
s.qm.samplesOutDuration.incr(int64(time.Since(begin)))
atomic.StoreInt64(&s.qm.lastSendTimestamp, time.Now().Unix())
s.qm.lastSendTimestamp.Store(time.Now().Unix())
}
// sendSamples to the remote storage with backoff for recoverable errors.
@ -902,7 +901,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
if err != nil {
// If the error is unrecoverable, we should not retry.
if _, ok := err.(recoverableError); !ok {
if _, ok := err.(RecoverableError); !ok {
return err
}

View file

@ -24,13 +24,13 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/go-kit/kit/log"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
"go.uber.org/atomic"
"github.com/prometheus/client_golang/prometheus"
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
@ -336,7 +336,7 @@ func TestShouldReshard(t *testing.T) {
m.numShards = c.startingShards
m.samplesIn.incr(c.samplesIn)
m.samplesOut.incr(c.samplesOut)
m.lastSendTimestamp = c.lastSendTimestamp
m.lastSendTimestamp.Store(c.lastSendTimestamp)
m.Start()
@ -497,7 +497,7 @@ func (c *TestWriteClient) Endpoint() string {
// point the `numCalls` property will contain a count of how many times Store()
// was called.
type TestBlockingWriteClient struct {
numCalls uint64
numCalls atomic.Uint64
}
func NewTestBlockedWriteClient() *TestBlockingWriteClient {
@ -505,13 +505,13 @@ func NewTestBlockedWriteClient() *TestBlockingWriteClient {
}
func (c *TestBlockingWriteClient) Store(ctx context.Context, _ []byte) error {
atomic.AddUint64(&c.numCalls, 1)
c.numCalls.Inc()
<-ctx.Done()
return nil
}
func (c *TestBlockingWriteClient) NumCalls() uint64 {
return atomic.LoadUint64(&c.numCalls)
return c.numCalls.Load()
}
func (c *TestBlockingWriteClient) Name() string {
@ -667,7 +667,7 @@ func TestCalculateDesiredShards(t *testing.T) {
highestSent := startedAt.Add(ts - time.Duration(pendingSamples/inputRate)*time.Second)
m.metrics.highestSentTimestamp.Set(float64(highestSent.Unix()))
atomic.StoreInt64(&m.lastSendTimestamp, time.Now().Unix())
m.lastSendTimestamp.Store(time.Now().Unix())
}
ts := time.Duration(0)

View file

@ -25,12 +25,12 @@ import (
"sort"
"strconv"
"sync"
"sync/atomic"
"github.com/pkg/errors"
"github.com/prometheus/prometheus/tsdb/chunkenc"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
"go.uber.org/atomic"
)
// Head chunk file header fields constants.
@ -78,9 +78,7 @@ func (e *CorruptionErr) Error() string {
// ChunkDiskMapper is for writing the Head block chunks to the disk
// and access chunks via mmapped file.
type ChunkDiskMapper struct {
// Keep all 64bit atomically accessed variables at the top of this struct.
// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG for more info.
curFileNumBytes int64 // Bytes written in current open file.
curFileNumBytes atomic.Int64 // Bytes written in current open file.
/// Writer.
dir *os.File
@ -343,7 +341,7 @@ func (cdm *ChunkDiskMapper) cut() (returnErr error) {
}()
cdm.size += cdm.curFileSize()
atomic.StoreInt64(&cdm.curFileNumBytes, int64(n))
cdm.curFileNumBytes.Store(int64(n))
if cdm.curFile != nil {
cdm.readPathMtx.Lock()
@ -394,7 +392,7 @@ func (cdm *ChunkDiskMapper) finalizeCurFile() error {
func (cdm *ChunkDiskMapper) write(b []byte) error {
n, err := cdm.chkWriter.Write(b)
atomic.AddInt64(&cdm.curFileNumBytes, int64(n))
cdm.curFileNumBytes.Add(int64(n))
return err
}
@ -736,7 +734,7 @@ func (cdm *ChunkDiskMapper) Size() int64 {
}
func (cdm *ChunkDiskMapper) curFileSize() int64 {
return atomic.LoadInt64(&cdm.curFileNumBytes)
return cdm.curFileNumBytes.Load()
}
// Close closes all the open files in ChunkDiskMapper.

View file

@ -762,7 +762,7 @@ func (db *DB) Compact() (err error) {
break
}
mint := db.head.MinTime()
maxt := rangeForTimestamp(mint, db.head.chunkRange)
maxt := rangeForTimestamp(mint, db.head.chunkRange.Load())
// Wrap head into a range that bounds all reads to it.
// We remove 1 millisecond from maxt because block

View file

@ -587,7 +587,7 @@ func TestDB_Snapshot_ChunksOutsideOfCompactedRange(t *testing.T) {
testutil.Ok(t, err)
// Hackingly introduce "race", by having lower max time then maxTime in last chunk.
db.head.maxTime = db.head.maxTime - 10
db.head.maxTime.Sub(10)
defer func() {
testutil.Ok(t, os.RemoveAll(snap))

View file

@ -22,7 +22,6 @@ import (
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/go-kit/kit/log"
@ -39,6 +38,7 @@ import (
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/wal"
"go.uber.org/atomic"
)
var (
@ -52,13 +52,11 @@ var (
// Head handles reads and writes of time series data within a time window.
type Head struct {
// Keep all 64bit atomically accessed variables at the top of this struct.
// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG for more info.
chunkRange int64
numSeries uint64
minTime, maxTime int64 // Current min and max of the samples included in the head.
minValidTime int64 // Mint allowed to be added to the head. It shouldn't be lower than the maxt of the last persisted block.
lastSeriesID uint64
chunkRange atomic.Int64
numSeries atomic.Uint64
minTime, maxTime atomic.Int64 // Current min and max of the samples included in the head.
minValidTime atomic.Int64 // Mint allowed to be added to the head. It shouldn't be lower than the maxt of the last persisted block.
lastSeriesID atomic.Uint64
metrics *headMetrics
wal *wal.WAL
@ -303,9 +301,6 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, chunkRange int
h := &Head{
wal: wal,
logger: l,
chunkRange: chunkRange,
minTime: math.MaxInt64,
maxTime: math.MinInt64,
series: newStripeSeries(stripeSize, seriesCallback),
values: map[string]stringset{},
symbols: map[string]struct{}{},
@ -321,6 +316,9 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, chunkRange int
chunkDirRoot: chkDirRoot,
seriesCallback: seriesCallback,
}
h.chunkRange.Store(chunkRange)
h.minTime.Store(math.MaxInt64)
h.maxTime.Store(math.MinInt64)
h.metrics = newHeadMetrics(h, r)
if pool == nil {
@ -390,7 +388,7 @@ func (h *Head) updateMinMaxTime(mint, maxt int64) {
if mint >= lt {
break
}
if atomic.CompareAndSwapInt64(&h.minTime, lt, mint) {
if h.minTime.CAS(lt, mint) {
break
}
}
@ -399,7 +397,7 @@ func (h *Head) updateMinMaxTime(mint, maxt int64) {
if maxt <= ht {
break
}
if atomic.CompareAndSwapInt64(&h.maxTime, ht, maxt) {
if h.maxTime.CAS(ht, maxt) {
break
}
}
@ -408,7 +406,7 @@ func (h *Head) updateMinMaxTime(mint, maxt int64) {
func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks map[uint64][]*mmappedChunk) (err error) {
// Track number of samples that referenced a series we don't know about
// for error reporting.
var unknownRefs uint64
var unknownRefs atomic.Uint64
// Start workers that each process samples for a partition of the series ID space.
// They are connected through a ring of channels which ensures that all sample batches
@ -460,8 +458,8 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks
inputs[i] = make(chan []record.RefSample, 300)
go func(input <-chan []record.RefSample, output chan<- []record.RefSample) {
unknown := h.processWALSamples(h.minValidTime, input, output)
atomic.AddUint64(&unknownRefs, unknown)
unknown := h.processWALSamples(h.minValidTime.Load(), input, output)
unknownRefs.Add(unknown)
wg.Done()
}(inputs[i], outputs[i])
}
@ -547,8 +545,8 @@ Outer:
multiRef[s.Ref] = series.ref
}
if h.lastSeriesID < s.Ref {
h.lastSeriesID = s.Ref
if h.lastSeriesID.Load() < s.Ref {
h.lastSeriesID.Store(s.Ref)
}
}
//lint:ignore SA6002 relax staticcheck verification.
@ -589,11 +587,11 @@ Outer:
case []tombstones.Stone:
for _, s := range v {
for _, itv := range s.Intervals {
if itv.Maxt < h.minValidTime {
if itv.Maxt < h.minValidTime.Load() {
continue
}
if m := h.series.getByID(s.Ref); m == nil {
unknownRefs++
unknownRefs.Inc()
continue
}
h.tombstones.AddInterval(s.Ref, itv)
@ -628,7 +626,7 @@ Outer:
return errors.Wrap(r.Err(), "read records")
}
if unknownRefs > 0 {
if unknownRefs.Load() > 0 {
level.Warn(h.logger).Log("msg", "Unknown series references", "count", unknownRefs)
}
return nil
@ -638,7 +636,7 @@ Outer:
// It should be called before using an appender so that it
// limits the ingested samples to the head min valid time.
func (h *Head) Init(minValidTime int64) error {
h.minValidTime = minValidTime
h.minValidTime.Store(minValidTime)
defer h.postings.EnsureOrder()
defer h.gc() // After loading the wal remove the obsolete data from the head.
@ -730,7 +728,7 @@ func (h *Head) Init(minValidTime int64) error {
func (h *Head) loadMmappedChunks() (map[uint64][]*mmappedChunk, error) {
mmappedChunks := map[uint64][]*mmappedChunk{}
if err := h.chunkDiskMapper.IterateAllChunks(func(seriesRef, chunkRef uint64, mint, maxt int64, numSamples uint16) error {
if maxt < h.minValidTime {
if maxt < h.minValidTime.Load() {
return nil
}
@ -787,12 +785,12 @@ func (h *Head) Truncate(mint int64) (err error) {
if h.MinTime() >= mint && !initialize {
return nil
}
atomic.StoreInt64(&h.minTime, mint)
atomic.StoreInt64(&h.minValidTime, mint)
h.minTime.Store(mint)
h.minValidTime.Store(mint)
// Ensure that max time is at least as high as min time.
for h.MaxTime() < mint {
atomic.CompareAndSwapInt64(&h.maxTime, h.MaxTime(), mint)
h.maxTime.CAS(h.MaxTime(), mint)
}
// This was an initial call to Truncate after loading blocks on startup.
@ -895,12 +893,12 @@ func (h *Head) Truncate(mint int64) (err error) {
// for a completely fresh head with an empty WAL.
// Returns true if the initialization took an effect.
func (h *Head) initTime(t int64) (initialized bool) {
if !atomic.CompareAndSwapInt64(&h.minTime, math.MaxInt64, t) {
if !h.minTime.CAS(math.MaxInt64, t) {
return false
}
// Ensure that max time is initialized to at least the min time we just set.
// Concurrent appenders may already have set it to a higher value.
atomic.CompareAndSwapInt64(&h.maxTime, math.MinInt64, t)
h.maxTime.CAS(math.MinInt64, t)
return true
}
@ -1031,7 +1029,7 @@ func (h *Head) appender() *headAppender {
head: h,
// Set the minimum valid time to whichever is greater the head min valid time or the compaction window.
// This ensures that no samples will be added within the compaction window to avoid races.
minValidTime: max(atomic.LoadInt64(&h.minValidTime), h.MaxTime()-h.chunkRange/2),
minValidTime: max(h.minValidTime.Load(), h.MaxTime()-h.chunkRange.Load()/2),
mint: math.MaxInt64,
maxt: math.MinInt64,
samples: h.getAppendBuffer(),
@ -1321,9 +1319,7 @@ func (h *Head) gc() {
h.metrics.seriesRemoved.Add(float64(seriesRemoved))
h.metrics.chunksRemoved.Add(float64(chunksRemoved))
h.metrics.chunks.Sub(float64(chunksRemoved))
// Using AddUint64 to subtract series removed.
// See: https://golang.org/pkg/sync/atomic/#AddUint64.
atomic.AddUint64(&h.numSeries, ^uint64(seriesRemoved-1))
h.numSeries.Sub(uint64(seriesRemoved))
// Remove deleted series IDs from the postings lists.
h.postings.Delete(deleted)
@ -1411,7 +1407,7 @@ func (h *Head) chunksRange(mint, maxt int64, is *isolationState) (*headChunkRead
// NumSeries returns the number of active series in the head.
func (h *Head) NumSeries() uint64 {
return atomic.LoadUint64(&h.numSeries)
return h.numSeries.Load()
}
// Meta returns meta information about the head.
@ -1431,19 +1427,19 @@ func (h *Head) Meta() BlockMeta {
// MinTime returns the lowest time bound on visible data in the head.
func (h *Head) MinTime() int64 {
return atomic.LoadInt64(&h.minTime)
return h.minTime.Load()
}
// MaxTime returns the highest timestamp seen in data of the head.
func (h *Head) MaxTime() int64 {
return atomic.LoadInt64(&h.maxTime)
return h.maxTime.Load()
}
// compactable returns whether the head has a compactable range.
// The head has a compactable range when the head time range is 1.5 times the chunk range.
// The 0.5 acts as a buffer of the appendable window.
func (h *Head) compactable() bool {
return h.MaxTime()-h.MinTime() > h.chunkRange/2*3
return h.MaxTime()-h.MinTime() > h.chunkRange.Load()/2*3
}
// Close flushes the WAL and closes the head.
@ -1697,13 +1693,13 @@ func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, e
}
// Optimistically assume that we are the first one to create the series.
id := atomic.AddUint64(&h.lastSeriesID, 1)
id := h.lastSeriesID.Inc()
return h.getOrCreateWithID(id, hash, lset)
}
func (h *Head) getOrCreateWithID(id, hash uint64, lset labels.Labels) (*memSeries, bool, error) {
s := newMemSeries(lset, id, h.chunkRange, &h.memChunkPool)
s := newMemSeries(lset, id, h.chunkRange.Load(), &h.memChunkPool)
s, created, err := h.series.getOrSet(hash, s)
if err != nil {
@ -1714,7 +1710,7 @@ func (h *Head) getOrCreateWithID(id, hash uint64, lset labels.Labels) (*memSerie
}
h.metrics.seriesCreated.Inc()
atomic.AddUint64(&h.numSeries, 1)
h.numSeries.Inc()
h.symMtx.Lock()
defer h.symMtx.Unlock()

View file

@ -17,9 +17,10 @@ import (
"io/ioutil"
"os"
"strconv"
"sync/atomic"
"testing"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/util/testutil"
)
@ -51,11 +52,11 @@ func BenchmarkHeadStripeSeriesCreateParallel(b *testing.B) {
testutil.Ok(b, err)
defer h.Close()
var count int64
var count atomic.Int64
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
i := atomic.AddInt64(&count, 1)
i := count.Inc()
h.getOrCreate(uint64(i), labels.FromStrings("a", strconv.Itoa(int(i))))
}
})

View file

@ -235,7 +235,7 @@ func TestHead_ReadWAL(t *testing.T) {
populateTestWAL(t, w, entries)
testutil.Ok(t, head.Init(math.MinInt64))
testutil.Equals(t, uint64(101), head.lastSeriesID)
testutil.Equals(t, uint64(101), head.lastSeriesID.Load())
s10 := head.series.getByID(10)
s11 := head.series.getByID(11)
@ -1723,16 +1723,16 @@ func TestOutOfOrderSamplesMetric(t *testing.T) {
testutil.Ok(t, err)
testutil.Ok(t, app.Commit())
testutil.Equals(t, int64(math.MinInt64), db.head.minValidTime)
testutil.Equals(t, int64(math.MinInt64), db.head.minValidTime.Load())
testutil.Ok(t, db.Compact())
testutil.Assert(t, db.head.minValidTime > 0, "")
testutil.Assert(t, db.head.minValidTime.Load() > 0, "")
app = db.Appender(ctx)
_, err = app.Add(labels.FromStrings("a", "b"), db.head.minValidTime-2, 99)
_, err = app.Add(labels.FromStrings("a", "b"), db.head.minValidTime.Load()-2, 99)
testutil.Equals(t, storage.ErrOutOfBounds, err)
testutil.Equals(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples))
_, err = app.Add(labels.FromStrings("a", "b"), db.head.minValidTime-1, 99)
_, err = app.Add(labels.FromStrings("a", "b"), db.head.minValidTime.Load()-1, 99)
testutil.Equals(t, storage.ErrOutOfBounds, err)
testutil.Equals(t, 2.0, prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples))
testutil.Ok(t, app.Commit())
@ -1740,22 +1740,22 @@ func TestOutOfOrderSamplesMetric(t *testing.T) {
// Some more valid samples for out of order.
app = db.Appender(ctx)
for i := 1; i <= 5; i++ {
_, err = app.Add(labels.FromStrings("a", "b"), db.head.minValidTime+DefaultBlockDuration+int64(i), 99)
_, err = app.Add(labels.FromStrings("a", "b"), db.head.minValidTime.Load()+DefaultBlockDuration+int64(i), 99)
testutil.Ok(t, err)
}
testutil.Ok(t, app.Commit())
// Test out of order metric.
app = db.Appender(ctx)
_, err = app.Add(labels.FromStrings("a", "b"), db.head.minValidTime+DefaultBlockDuration+2, 99)
_, err = app.Add(labels.FromStrings("a", "b"), db.head.minValidTime.Load()+DefaultBlockDuration+2, 99)
testutil.Equals(t, storage.ErrOutOfOrderSample, err)
testutil.Equals(t, 4.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples))
_, err = app.Add(labels.FromStrings("a", "b"), db.head.minValidTime+DefaultBlockDuration+3, 99)
_, err = app.Add(labels.FromStrings("a", "b"), db.head.minValidTime.Load()+DefaultBlockDuration+3, 99)
testutil.Equals(t, storage.ErrOutOfOrderSample, err)
testutil.Equals(t, 5.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples))
_, err = app.Add(labels.FromStrings("a", "b"), db.head.minValidTime+DefaultBlockDuration+4, 99)
_, err = app.Add(labels.FromStrings("a", "b"), db.head.minValidTime.Load()+DefaultBlockDuration+4, 99)
testutil.Equals(t, storage.ErrOutOfOrderSample, err)
testutil.Equals(t, 6.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples))
testutil.Ok(t, app.Commit())

View file

@ -25,9 +25,11 @@ package testutil
import (
"fmt"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/pmezard/go-difflib/difflib"
"go.uber.org/goleak"
)
// This package is imported by non-test code and therefore cannot import the
@ -154,3 +156,14 @@ func formatMessage(msgAndArgs []interface{}) string {
}
return ""
}
// TolerantVerifyLeak verifies go leaks but excludes the go routines that are
// launched as side effects of some of our dependencies.
func TolerantVerifyLeak(m *testing.M) {
goleak.VerifyTestMain(m,
// https://github.com/census-instrumentation/opencensus-go/blob/d7677d6af5953e0506ac4c08f349c62b917a443a/stats/view/worker.go#L34
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
// https://github.com/kubernetes/klog/blob/c85d02d1c76a9ebafa81eb6d35c980734f2c4727/klog.go#L417
goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"),
)
}

View file

@ -18,4 +18,4 @@ package version
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// Number contains the semantic version of this SDK.
const Number = "v44.0.0"
const Number = "v44.2.0"

View file

@ -43,7 +43,7 @@ type Config struct {
// An optional endpoint URL (hostname only or fully qualified URI)
// that overrides the default generated endpoint for a client. Set this
// to `nil` to use the default generated endpoint.
// to `nil` or the value to `""` to use the default generated endpoint.
//
// Note: You must still provide a `Region` value when specifying an
// endpoint for a client.
@ -138,7 +138,7 @@ type Config struct {
// `ExpectContinueTimeout` for information on adjusting the continue wait
// timeout. https://golang.org/pkg/net/http/#Transport
//
// You should use this flag to disble 100-Continue if you experience issues
// You should use this flag to disable 100-Continue if you experience issues
// with proxies or third party S3 compatible services.
S3Disable100Continue *bool
@ -183,7 +183,7 @@ type Config struct {
//
// Example:
// sess := session.Must(session.NewSession(aws.NewConfig()
// .WithEC2MetadataDiableTimeoutOverride(true)))
// .WithEC2MetadataDisableTimeoutOverride(true)))
//
// svc := s3.New(sess)
//
@ -194,7 +194,7 @@ type Config struct {
// both IPv4 and IPv6 addressing.
//
// Setting this for a service which does not support dual stack will fail
// to make requets. It is not recommended to set this value on the session
// to make requests. It is not recommended to set this value on the session
// as it will apply to all service clients created with the session. Even
// services which don't support dual stack endpoints.
//

View file

@ -225,6 +225,8 @@ var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointH
if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
r.Error = aws.ErrMissingRegion
} else if r.ClientInfo.Endpoint == "" {
// Was any endpoint provided by the user, or one was derived by the
// SDK's endpoint resolver?
r.Error = aws.ErrMissingEndpoint
}
}}

View file

@ -662,6 +662,7 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
@ -2538,6 +2539,7 @@ var awsPartition = partition{
"firehose": service{
Endpoints: endpoints{
"af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@ -2547,6 +2549,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
"eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@ -2587,6 +2590,7 @@ var awsPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
@ -2688,11 +2692,12 @@ var awsPartition = partition{
Region: "us-west-2",
},
},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
"us-west-2": endpoint{},
"me-south-1": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
"forecast": service{
@ -3066,6 +3071,7 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
@ -3787,19 +3793,7 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-east-1-fips": endpoint{
Hostname: "models-fips.lex.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
"us-west-2": endpoint{},
"us-west-2-fips": endpoint{
Hostname: "models-fips.lex.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
},
"us-west-2": endpoint{},
},
},
"monitoring": service{
@ -3997,6 +3991,12 @@ var awsPartition = partition{
Region: "us-east-2",
},
},
"us-west-1": endpoint{
Hostname: "rds.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
},
"us-west-2": endpoint{
Hostname: "rds.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
@ -4130,6 +4130,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
"eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@ -4603,6 +4604,7 @@ var awsPartition = partition{
Protocols: []string{"https"},
},
Endpoints: endpoints{
"af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@ -4612,6 +4614,7 @@ var awsPartition = partition{
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
"eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
@ -6418,6 +6421,19 @@ var awscnPartition = partition{
},
},
},
"ce": service{
PartitionEndpoint: "aws-cn-global",
IsRegionalized: boxedFalse,
Endpoints: endpoints{
"aws-cn-global": endpoint{
Hostname: "ce.cn-northwest-1.amazonaws.com.cn",
CredentialScope: credentialScope{
Region: "cn-northwest-1",
},
},
},
},
"cloudformation": service{
Endpoints: endpoints{
@ -6480,6 +6496,12 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
"cur": service{
Endpoints: endpoints{
"cn-northwest-1": endpoint{},
},
},
"dax": service{
Endpoints: endpoints{
@ -8962,6 +8984,20 @@ var awsisoPartition = partition{
"us-iso-east-1": endpoint{},
},
},
"transcribe": service{
Defaults: endpoint{
Protocols: []string{"https"},
},
Endpoints: endpoints{
"us-iso-east-1": endpoint{},
},
},
"transcribestreaming": service{
Endpoints: endpoints{
"us-iso-east-1": endpoint{},
},
},
"workspaces": service{
Endpoints: endpoints{

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.33.5"
const SDKVersion = "1.33.12"

View file

@ -17464,7 +17464,9 @@ func (c *EC2) DescribeLaunchTemplateVersionsRequest(input *DescribeLaunchTemplat
// DescribeLaunchTemplateVersions API operation for Amazon Elastic Compute Cloud.
//
// Describes one or more versions of a specified launch template. You can describe
// all versions, individual versions, or a range of versions.
// all versions, individual versions, or a range of versions. You can also describe
// all the latest versions or all the default versions of all the launch templates
// in your account.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@ -28649,6 +28651,12 @@ func (c *EC2) GetLaunchTemplateDataRequest(input *GetLaunchTemplateDataInput) (r
// Retrieves the configuration data of the specified instance. You can use this
// data to create a launch template.
//
// This action calls on other describe actions to get instance information.
// Depending on your instance configuration, you may need to allow the following
// actions in your IAM policy: DescribeSpotInstanceRequests, DescribeInstanceCreditSpecifications,
// DescribeVolumes, DescribeInstanceAttribute, and DescribeElasticGpus. Or,
// you can allow describe* depending on your instance requirements.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@ -34085,7 +34093,7 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ
//
// Registers an AMI. When you're creating an AMI, this is the final step you
// must complete before you can launch an instance from the AMI. For more information
// about creating AMIs, see Creating Your Own AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami.html)
// about creating AMIs, see Creating your own AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// For Amazon EBS-backed instances, CreateImage creates and registers the AMI
@ -34093,12 +34101,12 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ
//
// You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from
// a snapshot of a root device volume. You specify the snapshot using the block
// device mapping. For more information, see Launching a Linux Instance from
// a Backup (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-launch-snapshot.html)
// device mapping. For more information, see Launching a Linux instance from
// a backup (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-launch-snapshot.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// You can't register an image where a secondary (non-root) snapshot has AWS
// Marketplace product codes.
// If any snapshots have AWS Marketplace product codes, they are copied to the
// new AMI.
//
// Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL)
// and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code
@ -34119,7 +34127,7 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ
// a Reserved Instance without the matching billing product code, the Reserved
// Instance will not be applied to the On-Demand Instance. For information about
// how to obtain the platform details and billing information of an AMI, see
// Obtaining Billing Information (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html)
// Obtaining billing information (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html)
// in the Amazon Elastic Compute Cloud User Guide.
//
// If needed, you can deregister an AMI at any time. Any modifications you make
@ -38824,9 +38832,11 @@ type AllocateAddressInput struct {
// address from the address pool.
CustomerOwnedIpv4Pool *string `type:"string"`
// Set to vpc to allocate the address for use with instances in a VPC.
// Indicates whether the Elastic IP address is for use with instances in a VPC
// or instances in EC2-Classic.
//
// Default: The address is for use with instances in EC2-Classic.
// Default: If the Region supports EC2-Classic, the default is standard. Otherwise,
// the default is vpc.
Domain *string `type:"string" enum:"DomainType"`
// Checks whether you have the required permissions for the action, without
@ -38915,8 +38925,8 @@ type AllocateAddressOutput struct {
// The ID of the customer-owned address pool.
CustomerOwnedIpv4Pool *string `locationName:"customerOwnedIpv4Pool" type:"string"`
// Indicates whether this Elastic IP address is for use with instances in EC2-Classic
// (standard) or instances in a VPC (vpc).
// Indicates whether the Elastic IP address is for use with instances in a VPC
// (vpc) or instances in EC2-Classic (standard).
Domain *string `locationName:"domain" type:"string" enum:"DomainType"`
// The location from which the IP address is advertised.
@ -40994,11 +41004,13 @@ type AuthorizeClientVpnIngressInput struct {
_ struct{} `type:"structure"`
// The ID of the group to grant access to, for example, the Active Directory
// group or identity provider (IdP) group.
// group or identity provider (IdP) group. Required if AuthorizeAllGroups is
// false or not specified.
AccessGroupId *string `type:"string"`
// Indicates whether to grant access to all clients. Use true to grant all clients
// who successfully establish a VPN connection access to the network.
// Indicates whether to grant access to all clients. Specify true to grant all
// clients who successfully establish a VPN connection access to the network.
// Must be set to true if AccessGroupId is not specified.
AuthorizeAllGroups *bool `type:"boolean"`
// Unique, case-sensitive identifier that you provide to ensure the idempotency
@ -44230,6 +44242,9 @@ type CoipPool struct {
// The ID of the local gateway route table.
LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"`
// The ARN of the address pool.
PoolArn *string `locationName:"poolArn" min:"1" type:"string"`
// The address ranges of the address pool.
PoolCidrs []*string `locationName:"poolCidrSet" locationNameList:"item" type:"list"`
@ -44256,6 +44271,12 @@ func (s *CoipPool) SetLocalGatewayRouteTableId(v string) *CoipPool {
return s
}
// SetPoolArn sets the PoolArn field's value.
func (s *CoipPool) SetPoolArn(v string) *CoipPool {
s.PoolArn = &v
return s
}
// SetPoolCidrs sets the PoolCidrs field's value.
func (s *CoipPool) SetPoolCidrs(v []*string) *CoipPool {
s.PoolCidrs = v
@ -44376,7 +44397,8 @@ func (s *ConfirmProductInstanceOutput) SetReturn(v bool) *ConfirmProductInstance
type ConnectionLogOptions struct {
_ struct{} `type:"structure"`
// The name of the CloudWatch Logs log group.
// The name of the CloudWatch Logs log group. Required if connection logging
// is enabled.
CloudwatchLogGroup *string `type:"string"`
// The name of the CloudWatch Logs log stream to which the connection data is
@ -48998,6 +49020,9 @@ type CreateRouteTableInput struct {
// it is UnauthorizedOperation.
DryRun *bool `locationName:"dryRun" type:"boolean"`
// The tags to assign to the route table.
TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
// The ID of the VPC.
//
// VpcId is a required field
@ -49033,6 +49058,12 @@ func (s *CreateRouteTableInput) SetDryRun(v bool) *CreateRouteTableInput {
return s
}
// SetTagSpecifications sets the TagSpecifications field's value.
func (s *CreateRouteTableInput) SetTagSpecifications(v []*TagSpecification) *CreateRouteTableInput {
s.TagSpecifications = v
return s
}
// SetVpcId sets the VpcId field's value.
func (s *CreateRouteTableInput) SetVpcId(v string) *CreateRouteTableInput {
s.VpcId = &v
@ -51733,6 +51764,9 @@ type CreateVpcPeeringConnectionInput struct {
// You must specify this parameter in the request.
PeerVpcId *string `locationName:"peerVpcId" type:"string"`
// The tags to assign to the peering connection.
TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"`
// The ID of the requester VPC. You must specify this parameter in the request.
VpcId *string `locationName:"vpcId" type:"string"`
}
@ -51771,6 +51805,12 @@ func (s *CreateVpcPeeringConnectionInput) SetPeerVpcId(v string) *CreateVpcPeeri
return s
}
// SetTagSpecifications sets the TagSpecifications field's value.
func (s *CreateVpcPeeringConnectionInput) SetTagSpecifications(v []*TagSpecification) *CreateVpcPeeringConnectionInput {
s.TagSpecifications = v
return s
}
// SetVpcId sets the VpcId field's value.
func (s *CreateVpcPeeringConnectionInput) SetVpcId(v string) *CreateVpcPeeringConnectionInput {
s.VpcId = &v
@ -52082,12 +52122,12 @@ func (s *CreateVpnGatewayOutput) SetVpnGateway(v *VpnGateway) *CreateVpnGatewayO
return s
}
// Describes the credit option for CPU usage of a T2 or T3 instance.
// Describes the credit option for CPU usage of a T2, T3, or T3a instance.
type CreditSpecification struct {
_ struct{} `type:"structure"`
// The credit option for CPU usage of a T2 or T3 instance. Valid values are
// standard and unlimited.
// The credit option for CPU usage of a T2, T3, or T3a instance. Valid values
// are standard and unlimited.
CpuCredits *string `locationName:"cpuCredits" type:"string"`
}
@ -52107,12 +52147,12 @@ func (s *CreditSpecification) SetCpuCredits(v string) *CreditSpecification {
return s
}
// The credit option for CPU usage of a T2 or T3 instance.
// The credit option for CPU usage of a T2, T3, or T3a instance.
type CreditSpecificationRequest struct {
_ struct{} `type:"structure"`
// The credit option for CPU usage of a T2 or T3 instance. Valid values are
// standard and unlimited.
// The credit option for CPU usage of a T2, T3, or T3a instance. Valid values
// are standard and unlimited.
//
// CpuCredits is a required field
CpuCredits *string `type:"string" required:"true"`
@ -56628,6 +56668,9 @@ type DescribeAvailabilityZonesInput struct {
//
// * opt-in-status - The opt in status (opted-in, and not-opted-in | opt-in-not-required).
//
// * The ID of the zone that handles some of the Local Zone control plane
// operations, such as API calls.
//
// * region-name - The name of the Region for the Zone (for example, us-east-1).
//
// * state - The state of the Availability Zone or Local Zone (available
@ -56636,8 +56679,12 @@ type DescribeAvailabilityZonesInput struct {
// * zone-id - The ID of the Availability Zone (for example, use1-az1) or
// the Local Zone (for example, use usw2-lax1-az1).
//
// * zone-type - The type of zone, for example, local-zone.
//
// * zone-name - The name of the Availability Zone (for example, us-east-1a)
// or the Local Zone (for example, use us-west-2-lax-1a).
//
// * zone-type - The type of zone, for example, local-zone.
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
// The IDs of the Zones.
@ -60474,11 +60521,13 @@ type DescribeImagesInput struct {
//
// * name - The name of the AMI (provided during image creation).
//
// * owner-alias - String value from an Amazon-maintained list (amazon |
// aws-marketplace | microsoft) of snapshot owners. Not to be confused with
// the user-configured AWS account alias, which is set from the IAM console.
// * owner-alias - The owner alias, from an Amazon-maintained list (amazon
// | aws-marketplace). This is not the user-configured AWS account alias
// set using the IAM console. We recommend that you use the related parameter
// instead of this filter.
//
// * owner-id - The AWS account ID of the image owner.
// * owner-id - The AWS account ID of the owner. We recommend that you use
// the related parameter instead of this filter.
//
// * platform - The platform. To only list Windows-based AMIs, use windows.
//
@ -60520,10 +60569,10 @@ type DescribeImagesInput struct {
// Default: Describes all images available to you.
ImageIds []*string `locationName:"ImageId" locationNameList:"ImageId" type:"list"`
// Filters the images by the owner. Specify an AWS account ID, self (owner is
// the sender of the request), or an AWS owner alias (valid values are amazon
// | aws-marketplace | microsoft). Omitting this option returns all images for
// which you have launch permissions, regardless of ownership.
// Scopes the results to images with the specified owners. You can specify a
// combination of AWS account IDs, self, amazon, and aws-marketplace. If you
// omit this parameter, the results include all images for which you have launch
// permissions, regardless of ownership.
Owners []*string `locationName:"Owner" locationNameList:"Owner" type:"list"`
}
@ -62367,12 +62416,16 @@ type DescribeLaunchTemplateVersionsInput struct {
// * ram-disk-id - The RAM disk ID.
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
// The ID of the launch template. You must specify either the launch template
// ID or launch template name in the request.
// The ID of the launch template. To describe one or more versions of a specified
// launch template, you must specify either the launch template ID or the launch
// template name in the request. To describe all the latest or default launch
// template versions in your account, you must omit this parameter.
LaunchTemplateId *string `type:"string"`
// The name of the launch template. You must specify either the launch template
// ID or launch template name in the request.
// The name of the launch template. To describe one or more versions of a specified
// launch template, you must specify either the launch template ID or the launch
// template name in the request. To describe all the latest or default launch
// template versions in your account, you must omit this parameter.
LaunchTemplateName *string `min:"3" type:"string"`
// The maximum number of results to return in a single call. To retrieve the
@ -62389,7 +62442,18 @@ type DescribeLaunchTemplateVersionsInput struct {
// The token to request the next page of results.
NextToken *string `type:"string"`
// One or more versions of the launch template.
// One or more versions of the launch template. Valid values depend on whether
// you are describing a specified launch template (by ID or name) or all launch
// templates in your account.
//
// To describe one or more versions of a specified launch template, valid values
// are $Latest, $Default, and numbers.
//
// To describe all launch templates in your account that are defined as the
// latest version, the valid value is $Latest. To describe all launch templates
// in your account that are defined as the default version, the valid value
// is $Default. You can specify $Latest and $Default in the same call. You cannot
// specify numbers.
Versions []*string `locationName:"LaunchTemplateVersion" locationNameList:"item" type:"list"`
}
@ -65435,8 +65499,6 @@ type DescribeRouteTablesInput struct {
// to find all resources assigned a tag with a specific key, regardless of
// the tag value.
//
// * transit-gateway-id - The ID of a transit gateway.
//
// * vpc-id - The ID of the VPC for the route table.
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
@ -97195,8 +97257,8 @@ type RequestLaunchTemplateData struct {
// in the Amazon Elastic Compute Cloud User Guide.
CpuOptions *LaunchTemplateCpuOptionsRequest `type:"structure"`
// The credit option for CPU usage of the instance. Valid for T2 or T3 instances
// only.
// The credit option for CPU usage of the instance. Valid for T2, T3, or T3a
// instances only.
CreditSpecification *CreditSpecificationRequest `type:"structure"`
// If you set this parameter to true, you can't terminate the instance using
@ -106068,8 +106130,8 @@ type TagSpecification struct {
// | dhcp-options | export-image-task | export-instance-task | fleet | fpga-image
// | host-reservation | import-image-task | import-snapshot-task | instance
// | internet-gateway | ipv4pool-ec2 | ipv6pool-ec2 | key-pair | launch-template
// | placement-group | prefix-list | launch-template | natgateway | network-acl
// | security-group | spot-fleet-request | snapshot | subnet | traffic-mirror-filter
// | placement-group | prefix-list | natgateway | network-acl | security-group
// | spot-fleet-request | spot-instances-request | snapshot | subnet | traffic-mirror-filter
// | traffic-mirror-session | traffic-mirror-target | transit-gateway | transit-gateway-attachment
// | transit-gateway-route-table | volume |vpc | vpc-endpoint (for interface
// and gateway endpoints) | vpc-endpoint-service (for AWS PrivateLink) | vpc-flow-log.
@ -108183,28 +108245,28 @@ type TransitGatewayRequestOptions struct {
// A private Autonomous System Number (ASN) for the Amazon side of a BGP session.
// The range is 64512 to 65534 for 16-bit ASNs and 4200000000 to 4294967294
// for 32-bit ASNs.
// for 32-bit ASNs. The default is 64512.
AmazonSideAsn *int64 `type:"long"`
// Enable or disable automatic acceptance of attachment requests. The default
// is disable.
// Enable or disable automatic acceptance of attachment requests. Disabled by
// default.
AutoAcceptSharedAttachments *string `type:"string" enum:"AutoAcceptSharedAttachmentsValue"`
// Enable or disable automatic association with the default association route
// table. The default is enable.
// table. Enabled by default.
DefaultRouteTableAssociation *string `type:"string" enum:"DefaultRouteTableAssociationValue"`
// Enable or disable automatic propagation of routes to the default propagation
// route table. The default is enable.
// route table. Enabled by default.
DefaultRouteTablePropagation *string `type:"string" enum:"DefaultRouteTablePropagationValue"`
// Enable or disable DNS support.
// Enable or disable DNS support. Enabled by default.
DnsSupport *string `type:"string" enum:"DnsSupportValue"`
// Indicates whether multicast is enabled on the transit gateway
MulticastSupport *string `type:"string" enum:"MulticastSupportValue"`
// Enable or disable Equal Cost Multipath Protocol support.
// Enable or disable Equal Cost Multipath Protocol support. Enabled by default.
VpnEcmpSupport *string `type:"string" enum:"VpnEcmpSupportValue"`
}

View file

@ -5,4 +5,4 @@
"issueSettings": {
"minSeverityLevel": "LOW"
}
}
}

View file

@ -13,7 +13,7 @@ const oneClickBasePath = "v2/1-clicks"
// See: https://developers.digitalocean.com/documentation/v2/#1-click-applications
type OneClickService interface {
List(context.Context, string) ([]*OneClick, *Response, error)
InstallKubernetes(context.Context, *InstallKubernetesAppsRequest)(*InstallKubernetesAppsResponse, *Response, error)
InstallKubernetes(context.Context, *InstallKubernetesAppsRequest) (*InstallKubernetesAppsResponse, *Response, error)
}
var _ OneClickService = &OneClickServiceOp{}
@ -42,7 +42,7 @@ type InstallKubernetesAppsRequest struct {
// InstallKubernetesAppsResponse is the response of a kubernetes 1-click install request
type InstallKubernetesAppsResponse struct {
Message string `json:"message"`
Message string `json:"message"`
}
// List returns a list of the available 1-click applications.
@ -64,8 +64,8 @@ func (ocs *OneClickServiceOp) List(ctx context.Context, oneClickType string) ([]
}
// InstallKubernetes installs an addon on a kubernetes cluster
func (ocs *OneClickServiceOp) InstallKubernetes(ctx context.Context, install *InstallKubernetesAppsRequest ) (*InstallKubernetesAppsResponse, *Response, error) {
path := fmt.Sprintf(oneClickBasePath+"/kubernetes")
func (ocs *OneClickServiceOp) InstallKubernetes(ctx context.Context, install *InstallKubernetesAppsRequest) (*InstallKubernetesAppsResponse, *Response, error) {
path := fmt.Sprintf(oneClickBasePath + "/kubernetes")
req, err := ocs.client.NewRequest(ctx, http.MethodPost, path, install)
if err != nil {

View file

@ -1,5 +1,23 @@
# Change Log
## [v1.42.0] - 2020-07-22
- #357 invoices: add category to InvoiceItem - @rbutler
- #358 apps: add support for following logs - @nanzhong
## [v1.41.0] - 2020-07-17
- #355 kubernetes: Add support for surge upgrades - @varshavaradarajan
## [v1.40.0] - 2020-07-16
- #347 Make Rate limits thread safe - @roidelapluie
- #353 Reuse TCP connection - @itsksaurabh
## [v1.39.0] - 2020-07-14
- #345, #346 Add app platform support [beta] - @nanzhong
## [v1.38.0] - 2020-06-18
- #341 Install 1-click applications on a Kubernetes cluster - @keladhruv

View file

@ -25,7 +25,7 @@ go test -mod=vendor .
Godo follows [semver](https://www.semver.org) versioning semantics.
New functionality should be accompanied by increment to the minor
version number. Any code merged to master is subject to release.
version number. Any code merged to main is subject to release.
## Releasing

View file

@ -98,9 +98,7 @@ func DropletList(ctx context.Context, client *godo.Client) ([]godo.Droplet, erro
}
// append the current page's droplets to our list
for _, d := range droplets {
list = append(list, d)
}
list = append(list, droplets...)
// if we are at the last page, break out the for loop
if resp.Links == nil || resp.Links.IsLastPage() {

136
vendor/github.com/digitalocean/godo/apps.gen.go generated vendored Normal file
View file

@ -0,0 +1,136 @@
// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
// $ bundle -pkg godo -prefix ./dev/dist/godo
package godo
import ()
// AppDatabaseSpec struct for AppDatabaseSpec
type AppDatabaseSpec struct {
Name string `json:"name"`
Engine AppDatabaseSpecEngine `json:"engine,omitempty"`
Version string `json:"version,omitempty"`
Size string `json:"size,omitempty"`
NumNodes int64 `json:"num_nodes,omitempty"`
}
// AppDatabaseSpecEngine the model 'AppDatabaseSpecEngine'
type AppDatabaseSpecEngine string
// List of AppDatabaseSpecEngine
const (
APPDATABASESPECENGINE_UNSET AppDatabaseSpecEngine = "UNSET"
APPDATABASESPECENGINE_MYSQL AppDatabaseSpecEngine = "MYSQL"
APPDATABASESPECENGINE_PG AppDatabaseSpecEngine = "PG"
APPDATABASESPECENGINE_REDIS AppDatabaseSpecEngine = "REDIS"
)
// AppDomainSpec struct for AppDomainSpec
type AppDomainSpec struct {
Domain string `json:"domain"`
}
// AppRouteSpec struct for AppRouteSpec
type AppRouteSpec struct {
Path string `json:"path,omitempty"`
}
// AppServiceSpec struct for AppServiceSpec
type AppServiceSpec struct {
Name string `json:"name"`
RunCommand string `json:"run_command,omitempty"`
BuildCommand string `json:"build_command,omitempty"`
HTTPPort int64 `json:"http_port,omitempty"`
DockerfilePath string `json:"dockerfile_path,omitempty"`
Git GitSourceSpec `json:"git,omitempty"`
GitHub GitHubSourceSpec `json:"github,omitempty"`
Envs []AppVariableDefinition `json:"envs,omitempty"`
InstanceSizeSlug string `json:"instance_size_slug,omitempty"`
InstanceCount int64 `json:"instance_count,omitempty"`
Routes []AppRouteSpec `json:"routes,omitempty"`
SourceDir string `json:"source_dir,omitempty"`
EnvironmentSlug string `json:"environment_slug,omitempty"`
}
// AppSpec struct for AppSpec
type AppSpec struct {
Services []AppServiceSpec `json:"services,omitempty"`
StaticSites []AppStaticSiteSpec `json:"static_sites,omitempty"`
Databases []AppDatabaseSpec `json:"databases,omitempty"`
Workers []AppWorkerSpec `json:"workers,omitempty"`
Region string `json:"region,omitempty"`
Name string `json:"name"`
Domains []AppDomainSpec `json:"domains,omitempty"`
}
// AppStaticSiteSpec struct for AppStaticSiteSpec
type AppStaticSiteSpec struct {
Name string `json:"name"`
BuildCommand string `json:"build_command,omitempty"`
Git GitSourceSpec `json:"git,omitempty"`
GitHub GitHubSourceSpec `json:"github,omitempty"`
Envs []AppVariableDefinition `json:"envs,omitempty"`
Routes []AppRouteSpec `json:"routes,omitempty"`
SourceDir string `json:"source_dir,omitempty"`
EnvironmentSlug string `json:"environment_slug,omitempty"`
}
// AppVariableDefinition struct for AppVariableDefinition
type AppVariableDefinition struct {
Value string `json:"value,omitempty"`
Scope VariableScope `json:"scope,omitempty"`
// POSIX allows a broader env var definition, but we restrict to what is allowed by bash. http://git.savannah.gnu.org/cgit/bash.git/tree/general.h?h=bash-5.0#n124 Based on the POSIX spec and some casting to unsigned char in bash code I think this is restricted to ASCII (not unicode).
Key string `json:"key"`
Type VariableType `json:"type,omitempty"`
EncryptedValue string `json:"encrypted_value,omitempty"`
}
// AppWorkerSpec struct for AppWorkerSpec
type AppWorkerSpec struct {
Name string `json:"name"`
RunCommand string `json:"run_command,omitempty"`
BuildCommand string `json:"build_command,omitempty"`
DockerfilePath string `json:"dockerfile_path,omitempty"`
Git GitSourceSpec `json:"git,omitempty"`
GitHub GitHubSourceSpec `json:"github,omitempty"`
Envs []AppVariableDefinition `json:"envs,omitempty"`
InstanceSizeSlug string `json:"instance_size_slug,omitempty"`
InstanceCount int64 `json:"instance_count,omitempty"`
SourceDir string `json:"source_dir,omitempty"`
EnvironmentSlug string `json:"environment_slug,omitempty"`
}
// GitHubSourceSpec struct for GitHubSourceSpec
type GitHubSourceSpec struct {
Repo string `json:"repo"`
Branch string `json:"branch"`
DeployOnPush bool `json:"deploy_on_push,omitempty"`
}
// GitSourceSpec struct for GitSourceSpec
type GitSourceSpec struct {
Repo string `json:"repo,omitempty"`
RequiresAuth bool `json:"requires_auth,omitempty"`
Branch string `json:"branch,omitempty"`
RepoCloneURL string `json:"repo_clone_url,omitempty"`
}
// VariableScope the model 'VariableScope'
type VariableScope string
// List of VariableScope
const (
VARIABLESCOPE_UNSET VariableScope = "UNSET"
VARIABLESCOPE_RUN_TIME VariableScope = "RUN_TIME"
VARIABLESCOPE_BUILD_TIME VariableScope = "BUILD_TIME"
VARIABLESCOPE_RUN_AND_BUILD_TIME VariableScope = "RUN_AND_BUILD_TIME"
)
// VariableType the model 'VariableType'
type VariableType string
// List of VariableType
const (
VARIABLETYPE_GENERAL VariableType = "GENERAL"
VARIABLETYPE_SECRET VariableType = "SECRET"
)

278
vendor/github.com/digitalocean/godo/apps.go generated vendored Normal file
View file

@ -0,0 +1,278 @@
package godo
import (
"context"
"fmt"
"net/http"
"time"
)
const (
appsBasePath = "/v2/apps"
)
// AppLogType is the type of app logs.
type AppLogType string
const (
// AppLogTypeBuild represents build logs.
AppLogTypeBuild AppLogType = "BUILD"
// AppLogTypeDeploy represents deploy logs.
AppLogTypeDeploy AppLogType = "DEPLOY"
// AppLogTypeRun represents run logs.
AppLogTypeRun AppLogType = "RUN"
)
// AppsService is an interface for interfacing with the App Platform endpoints
// of the DigitalOcean API.
type AppsService interface {
Create(ctx context.Context, create *AppCreateRequest) (*App, *Response, error)
Get(ctx context.Context, appID string) (*App, *Response, error)
List(ctx context.Context, opts *ListOptions) ([]*App, *Response, error)
Update(ctx context.Context, appID string, update *AppUpdateRequest) (*App, *Response, error)
Delete(ctx context.Context, appID string) (*Response, error)
GetDeployment(ctx context.Context, appID, deploymentID string) (*Deployment, *Response, error)
ListDeployments(ctx context.Context, appID string, opts *ListOptions) ([]*Deployment, *Response, error)
CreateDeployment(ctx context.Context, appID string) (*Deployment, *Response, error)
GetLogs(ctx context.Context, appID, deploymentID, component string, logType AppLogType, follow bool) (*AppLogs, *Response, error)
}
// App represents an app.
type App struct {
ID string `json:"id"`
Spec *AppSpec `json:"spec"`
DefaultIngress string `json:"default_ingress"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at,omitempty"`
ActiveDeployment *Deployment `json:"active_deployment,omitempty"`
InProgressDeployment *Deployment `json:"in_progress_deployment,omitempty"`
}
// Deployment represents a deployment for an app.
type Deployment struct {
ID string `json:"id"`
Spec *AppSpec `json:"spec"`
Services []*DeploymentService `json:"services,omitempty"`
Workers []*DeploymentWorker `json:"workers,omitempty"`
StaticSites []*DeploymentStaticSite `json:"static_sites,omitempty"`
Cause string `json:"cause"`
Progress *DeploymentProgress `json:"progress"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at,omitempty"`
}
// DeploymentService represents a service component in a deployment.
type DeploymentService struct {
Name string `json:"name,omitempty"`
SourceCommitHash string `json:"source_commit_hash"`
}
// DeploymentWorker represents a worker component in a deployment.
type DeploymentWorker struct {
Name string `json:"name,omitempty"`
SourceCommitHash string `json:"source_commit_hash"`
}
// DeploymentStaticSite represents a static site component in a deployment.
type DeploymentStaticSite struct {
Name string `json:"name,omitempty"`
SourceCommitHash string `json:"source_commit_hash"`
}
// DeploymentProgress represents the total progress of a deployment.
type DeploymentProgress struct {
PendingSteps int `json:"pending_steps"`
RunningSteps int `json:"running_steps"`
SuccessSteps int `json:"success_steps"`
ErrorSteps int `json:"error_steps"`
TotalSteps int `json:"total_steps"`
Steps []*DeploymentProgressStep `json:"steps"`
}
// DeploymentProgressStep represents the progress of a deployment step.
type DeploymentProgressStep struct {
Name string `json:"name"`
Status string `json:"status"`
Steps []*DeploymentProgressStep `json:"steps,omitempty"`
Attempts uint32 `json:"attempts"`
StartedAt time.Time `json:"started_at,omitempty"`
EndedAt time.Time `json:"ended_at,omitempty"`
}
// AppLogs represent app logs.
type AppLogs struct {
LiveURL string `json:"live_url"`
HistoricURLs []string `json:"historic_urls"`
}
// AppCreateRequest represents a request to create an app.
type AppCreateRequest struct {
Spec *AppSpec `json:"spec"`
}
// AppUpdateRequest represents a request to update an app.
type AppUpdateRequest struct {
Spec *AppSpec `json:"spec"`
}
type appRoot struct {
App *App `json:"app"`
}
type appsRoot struct {
Apps []*App `json:"apps"`
}
type deploymentRoot struct {
Deployment *Deployment `json:"deployment"`
}
type deploymentsRoot struct {
Deployments []*Deployment `json:"deployments"`
}
// AppsServiceOp handles communication with Apps methods of the DigitalOcean API.
type AppsServiceOp struct {
client *Client
}
// Creates an app.
func (s *AppsServiceOp) Create(ctx context.Context, create *AppCreateRequest) (*App, *Response, error) {
path := appsBasePath
req, err := s.client.NewRequest(ctx, http.MethodPost, path, create)
if err != nil {
return nil, nil, err
}
root := new(appRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.App, resp, nil
}
// Get an app.
func (s *AppsServiceOp) Get(ctx context.Context, appID string) (*App, *Response, error) {
path := fmt.Sprintf("%s/%s", appsBasePath, appID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(appRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.App, resp, nil
}
// List apps.
func (s *AppsServiceOp) List(ctx context.Context, opts *ListOptions) ([]*App, *Response, error) {
path := appsBasePath
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(appsRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Apps, resp, nil
}
// Update an app.
func (s *AppsServiceOp) Update(ctx context.Context, appID string, update *AppUpdateRequest) (*App, *Response, error) {
path := fmt.Sprintf("%s/%s", appsBasePath, appID)
req, err := s.client.NewRequest(ctx, http.MethodPut, path, update)
if err != nil {
return nil, nil, err
}
root := new(appRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.App, resp, nil
}
// Delete an app.
func (s *AppsServiceOp) Delete(ctx context.Context, appID string) (*Response, error) {
path := fmt.Sprintf("%s/%s", appsBasePath, appID)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// GetDeployment gets an app deployment.
func (s *AppsServiceOp) GetDeployment(ctx context.Context, appID, deploymentID string) (*Deployment, *Response, error) {
path := fmt.Sprintf("%s/%s/deployments/%s", appsBasePath, appID, deploymentID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(deploymentRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Deployment, resp, nil
}
// ListDeployments lists an app deployments.
func (s *AppsServiceOp) ListDeployments(ctx context.Context, appID string, opts *ListOptions) ([]*Deployment, *Response, error) {
path := fmt.Sprintf("%s/%s/deployments", appsBasePath, appID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(deploymentsRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Deployments, resp, nil
}
// CreateDeployment creates an app deployment.
func (s *AppsServiceOp) CreateDeployment(ctx context.Context, appID string) (*Deployment, *Response, error) {
path := fmt.Sprintf("%s/%s/deployments", appsBasePath, appID)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, nil)
if err != nil {
return nil, nil, err
}
root := new(deploymentRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Deployment, resp, nil
}
// GetLogs retrieves app logs.
func (s *AppsServiceOp) GetLogs(ctx context.Context, appID, deploymentID, component string, logType AppLogType, follow bool) (*AppLogs, *Response, error) {
url := fmt.Sprintf("%s/%s/deployments/%s/components/%s/logs?type=%s&follow=%t", appsBasePath, appID, deploymentID, component, logType, follow)
req, err := s.client.NewRequest(ctx, http.MethodGet, url, nil)
if err != nil {
return nil, nil, err
}
logs := new(AppLogs)
resp, err := s.client.Do(ctx, req, logs)
if err != nil {
return nil, resp, err
}
return logs, resp, nil
}

View file

@ -11,6 +11,7 @@ import (
"net/url"
"reflect"
"strconv"
"sync"
"time"
"github.com/google/go-querystring/query"
@ -18,7 +19,7 @@ import (
)
const (
libraryVersion = "1.38.0"
libraryVersion = "1.42.0"
defaultBaseURL = "https://api.digitalocean.com/"
userAgent = "godo/" + libraryVersion
mediaType = "application/json"
@ -40,12 +41,14 @@ type Client struct {
UserAgent string
// Rate contains the current rate limit for the client as determined by the most recent
// API call.
Rate Rate
// API call. It is not thread-safe. Please consider using GetRate() instead.
Rate Rate
ratemtx sync.Mutex
// Services used for communicating with the API
Account AccountService
Actions ActionsService
Apps AppsService
Balance BalanceService
BillingHistory BillingHistoryService
CDNs CDNService
@ -186,6 +189,7 @@ func NewClient(httpClient *http.Client) *Client {
c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent}
c.Account = &AccountServiceOp{client: c}
c.Actions = &ActionsServiceOp{client: c}
c.Apps = &AppsServiceOp{client: c}
c.Balance = &BalanceServiceOp{client: c}
c.BillingHistory = &BillingHistoryServiceOp{client: c}
c.CDNs = &CDNServiceOp{client: c}
@ -286,6 +290,14 @@ func (c *Client) OnRequestCompleted(rc RequestCompletionCallback) {
c.onRequestCompleted = rc
}
// GetRate returns the current rate limit for the client as determined by the most recent
// API call. It is thread-safe.
func (c *Client) GetRate() Rate {
c.ratemtx.Lock()
defer c.ratemtx.Unlock()
return c.Rate
}
// newResponse creates a new Response for the provided http.Response
func newResponse(r *http.Response) *Response {
response := Response{Response: r}
@ -322,13 +334,26 @@ func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*Res
}
defer func() {
// Ensure the response body is fully read and closed
// before we reconnect, so that we reuse the same TCPconnection.
// Close the previous response's body. But read at least some of
// the body so if it's small the underlying TCP connection will be
// re-used. No need to check for errors: if it fails, the Transport
// won't reuse it anyway.
const maxBodySlurpSize = 2 << 10
if resp.ContentLength == -1 || resp.ContentLength <= maxBodySlurpSize {
io.CopyN(ioutil.Discard, resp.Body, maxBodySlurpSize)
}
if rerr := resp.Body.Close(); err == nil {
err = rerr
}
}()
response := newResponse(resp)
c.ratemtx.Lock()
c.Rate = response.Rate
c.ratemtx.Unlock()
err = CheckResponse(resp)
if err != nil {

View file

@ -49,6 +49,7 @@ type InvoiceItem struct {
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
ProjectName string `json:"project_name"`
Category string `json:"category"`
}
// InvoiceList contains a paginated list of all of a customer's invoices.

View file

@ -66,6 +66,7 @@ type KubernetesClusterCreateRequest struct {
MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy"`
AutoUpgrade bool `json:"auto_upgrade"`
SurgeUpgrade bool `json:"surge_upgrade"`
}
// KubernetesClusterUpdateRequest represents a request to update a Kubernetes cluster.
@ -74,6 +75,7 @@ type KubernetesClusterUpdateRequest struct {
Tags []string `json:"tags,omitempty"`
MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"`
AutoUpgrade *bool `json:"auto_upgrade,omitempty"`
SurgeUpgrade bool `json:"surge_upgrade,omitempty"`
}
// KubernetesClusterUpgradeRequest represents a request to upgrade a Kubernetes cluster.
@ -143,6 +145,7 @@ type KubernetesCluster struct {
MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"`
AutoUpgrade bool `json:"auto_upgrade,omitempty"`
SurgeUpgrade bool `json:"surge_upgrade,omitempty"`
Status *KubernetesClusterStatus `json:"status,omitempty"`
CreatedAt time.Time `json:"created_at,omitempty"`

View file

@ -1,6 +1,19 @@
Changes by Version
==================
2.25.0 (2020-07-13)
-------------------
## Breaking changes
- [feat] Periodically re-resolve UDP server address, with opt-out (#520) -- Trevor Foster
The re-resolving of UDP address is now enabled by default, to make the client more robust in Kubernetes deployments.
The old resolve-once behavior can be restored by setting DisableAttemptReconnecting=true in the Configuration struct,
or via JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED=true environment variable.
## Bug fixes
- Do not add invalid context to references (#521) -- Yuri Shkuro
2.24.0 (2020-06-14)
-------------------
- Mention FromEnv() in the README, docs, and examples (#518) -- Martin Lercher

View file

@ -142,10 +142,19 @@
version = "v0.0.5"
[[projects]]
digest = "1:0496f0e99014b7fd0a560c539f51d0882731137b85494142f47e550e4657176a"
digest = "1:ac83cf90d08b63ad5f7e020ef480d319ae890c208f8524622a2f3136e2686b02"
name = "github.com/stretchr/objx"
packages = ["."]
pruneopts = "UT"
revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c"
version = "v0.1.1"
[[projects]]
digest = "1:d88ba57c4e8f5db6ce9ab6605a89f4542ee751b576884ba5271c2ba3d4b6f2d2"
name = "github.com/stretchr/testify"
packages = [
"assert",
"mock",
"require",
"suite",
]
@ -153,6 +162,42 @@
revision = "221dbe5ed46703ee255b1da0dec05086f5035f62"
version = "v1.4.0"
[[projects]]
digest = "1:5b98956718573850caf7e0fd00b571a6657c4ef1f345ddf0c96b43ce355fe862"
name = "github.com/uber/jaeger-client-go"
packages = [
".",
"config",
"crossdock/client",
"crossdock/common",
"crossdock/endtoend",
"crossdock/log",
"crossdock/server",
"crossdock/thrift/tracetest",
"internal/baggage",
"internal/baggage/remote",
"internal/reporterstats",
"internal/spanlog",
"internal/throttler",
"internal/throttler/remote",
"log",
"log/zap/mock_opentracing",
"rpcmetrics",
"testutils",
"thrift",
"thrift-gen/agent",
"thrift-gen/baggage",
"thrift-gen/jaeger",
"thrift-gen/sampling",
"thrift-gen/zipkincore",
"transport",
"transport/zipkin",
"utils",
]
pruneopts = "UT"
revision = "66c008c3d6ad856cac92a0af53186efbffa8e6a5"
version = "v2.24.0"
[[projects]]
digest = "1:0ec60ffd594af00ba1660bc746aa0e443d27dd4003dee55f9d08a0b4ff5431a3"
name = "github.com/uber/jaeger-lib"
@ -314,8 +359,36 @@
"github.com/pkg/errors",
"github.com/prometheus/client_golang/prometheus",
"github.com/stretchr/testify/assert",
"github.com/stretchr/testify/mock",
"github.com/stretchr/testify/require",
"github.com/stretchr/testify/suite",
"github.com/uber/jaeger-client-go",
"github.com/uber/jaeger-client-go/config",
"github.com/uber/jaeger-client-go/crossdock/client",
"github.com/uber/jaeger-client-go/crossdock/common",
"github.com/uber/jaeger-client-go/crossdock/endtoend",
"github.com/uber/jaeger-client-go/crossdock/log",
"github.com/uber/jaeger-client-go/crossdock/server",
"github.com/uber/jaeger-client-go/crossdock/thrift/tracetest",
"github.com/uber/jaeger-client-go/internal/baggage",
"github.com/uber/jaeger-client-go/internal/baggage/remote",
"github.com/uber/jaeger-client-go/internal/reporterstats",
"github.com/uber/jaeger-client-go/internal/spanlog",
"github.com/uber/jaeger-client-go/internal/throttler",
"github.com/uber/jaeger-client-go/internal/throttler/remote",
"github.com/uber/jaeger-client-go/log",
"github.com/uber/jaeger-client-go/log/zap/mock_opentracing",
"github.com/uber/jaeger-client-go/rpcmetrics",
"github.com/uber/jaeger-client-go/testutils",
"github.com/uber/jaeger-client-go/thrift",
"github.com/uber/jaeger-client-go/thrift-gen/agent",
"github.com/uber/jaeger-client-go/thrift-gen/baggage",
"github.com/uber/jaeger-client-go/thrift-gen/jaeger",
"github.com/uber/jaeger-client-go/thrift-gen/sampling",
"github.com/uber/jaeger-client-go/thrift-gen/zipkincore",
"github.com/uber/jaeger-client-go/transport",
"github.com/uber/jaeger-client-go/transport/zipkin",
"github.com/uber/jaeger-client-go/utils",
"github.com/uber/jaeger-lib/metrics",
"github.com/uber/jaeger-lib/metrics/metricstest",
"github.com/uber/jaeger-lib/metrics/prometheus",

View file

@ -61,6 +61,8 @@ JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the coll
JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans" `true` or `false` (default `false`).
JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size (default `100`).
JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. `500ms` or `2s` ([valid units][timeunits]; default `1s`).
JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED | When true, disables udp connection helper that periodically re-resolves the agent's hostname and reconnects if there was a change (default `false`).
JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL | Controls how often the agent client re-resolves the provided hostname in order to detect address changes ([valid units][timeunits]; default `30s`).
JAEGER_SAMPLER_TYPE | The sampler type: `remote`, `const`, `probabilistic`, `ratelimiting` (default `remote`). See also https://www.jaegertracing.io/docs/latest/sampling/.
JAEGER_SAMPLER_PARAM | The sampler parameter (number).
JAEGER_SAMPLER_MANAGER_HOST_PORT | (deprecated) The HTTP endpoint when using the `remote` sampler.

View file

@ -22,6 +22,7 @@ import (
"time"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go/utils"
"github.com/uber/jaeger-client-go"
"github.com/uber/jaeger-client-go/internal/baggage/remote"
@ -124,6 +125,17 @@ type ReporterConfig struct {
// Can be provided by FromEnv() via the environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT
LocalAgentHostPort string `yaml:"localAgentHostPort"`
// DisableAttemptReconnecting when true, disables udp connection helper that periodically re-resolves
// the agent's hostname and reconnects if there was a change. This option only
// applies if LocalAgentHostPort is specified.
// Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED
DisableAttemptReconnecting bool `yaml:"disableAttemptReconnecting"`
// AttemptReconnectInterval controls how often the agent client re-resolves the provided hostname
// in order to detect address changes. This option only applies if DisableAttemptReconnecting is false.
// Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL
AttemptReconnectInterval time.Duration
// CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL.
// Can be provided by FromEnv() via the environment variable named JAEGER_ENDPOINT
CollectorEndpoint string `yaml:"collectorEndpoint"`
@ -384,7 +396,7 @@ func (rc *ReporterConfig) NewReporter(
metrics *jaeger.Metrics,
logger jaeger.Logger,
) (jaeger.Reporter, error) {
sender, err := rc.newTransport()
sender, err := rc.newTransport(logger)
if err != nil {
return nil, err
}
@ -401,7 +413,7 @@ func (rc *ReporterConfig) NewReporter(
return reporter, err
}
func (rc *ReporterConfig) newTransport() (jaeger.Transport, error) {
func (rc *ReporterConfig) newTransport(logger jaeger.Logger) (jaeger.Transport, error) {
switch {
case rc.CollectorEndpoint != "":
httpOptions := []transport.HTTPOption{transport.HTTPBatchSize(1), transport.HTTPHeaders(rc.HTTPHeaders)}
@ -410,6 +422,13 @@ func (rc *ReporterConfig) newTransport() (jaeger.Transport, error) {
}
return transport.NewHTTPTransport(rc.CollectorEndpoint, httpOptions...), nil
default:
return jaeger.NewUDPTransport(rc.LocalAgentHostPort, 0)
return jaeger.NewUDPTransportWithParams(jaeger.UDPTransportParams{
AgentClientUDPParams: utils.AgentClientUDPParams{
HostPort: rc.LocalAgentHostPort,
Logger: logger,
DisableAttemptReconnecting: rc.DisableAttemptReconnecting,
AttemptReconnectInterval: rc.AttemptReconnectInterval,
},
})
}
}

View file

@ -24,30 +24,31 @@ import (
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/uber/jaeger-client-go"
)
const (
// environment variable names
envServiceName = "JAEGER_SERVICE_NAME"
envDisabled = "JAEGER_DISABLED"
envRPCMetrics = "JAEGER_RPC_METRICS"
envTags = "JAEGER_TAGS"
envSamplerType = "JAEGER_SAMPLER_TYPE"
envSamplerParam = "JAEGER_SAMPLER_PARAM"
envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT" // Deprecated by envSamplingEndpoint
envSamplingEndpoint = "JAEGER_SAMPLING_ENDPOINT"
envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS"
envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL"
envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE"
envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL"
envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS"
envEndpoint = "JAEGER_ENDPOINT"
envUser = "JAEGER_USER"
envPassword = "JAEGER_PASSWORD"
envAgentHost = "JAEGER_AGENT_HOST"
envAgentPort = "JAEGER_AGENT_PORT"
envServiceName = "JAEGER_SERVICE_NAME"
envDisabled = "JAEGER_DISABLED"
envRPCMetrics = "JAEGER_RPC_METRICS"
envTags = "JAEGER_TAGS"
envSamplerType = "JAEGER_SAMPLER_TYPE"
envSamplerParam = "JAEGER_SAMPLER_PARAM"
envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT" // Deprecated by envSamplingEndpoint
envSamplingEndpoint = "JAEGER_SAMPLING_ENDPOINT"
envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS"
envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL"
envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE"
envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL"
envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS"
envReporterAttemptReconnectingDisabled = "JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED"
envReporterAttemptReconnectInterval = "JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL"
envEndpoint = "JAEGER_ENDPOINT"
envUser = "JAEGER_USER"
envPassword = "JAEGER_PASSWORD"
envAgentHost = "JAEGER_AGENT_HOST"
envAgentPort = "JAEGER_AGENT_PORT"
)
// FromEnv uses environment variables to set the tracer's Configuration
@ -206,6 +207,24 @@ func (rc *ReporterConfig) reporterConfigFromEnv() (*ReporterConfig, error) {
if useEnv || rc.LocalAgentHostPort == "" {
rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port)
}
if e := os.Getenv(envReporterAttemptReconnectingDisabled); e != "" {
if value, err := strconv.ParseBool(e); err == nil {
rc.DisableAttemptReconnecting = value
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectingDisabled, e)
}
}
if !rc.DisableAttemptReconnecting {
if e := os.Getenv(envReporterAttemptReconnectInterval); e != "" {
if value, err := time.ParseDuration(e); err == nil {
rc.AttemptReconnectInterval = value
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectInterval, e)
}
}
}
}
return rc, nil

View file

@ -22,7 +22,7 @@ import (
const (
// JaegerClientVersion is the version of the client library reported as Span tag.
JaegerClientVersion = "Go-2.24.0"
JaegerClientVersion = "Go-2.25.0"
// JaegerClientVersionTagKey is the name of the tag used to report client version.
JaegerClientVersionTagKey = "jaeger.version"

View file

@ -212,10 +212,14 @@ func (c SpanContext) SetFirehose() {
}
func (c SpanContext) String() string {
if c.traceID.High == 0 {
return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load())
var flags int32
if c.samplingState != nil {
flags = c.samplingState.stateFlags.Load()
}
return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load())
if c.traceID.High == 0 {
return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags)
}
return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags)
}
// ContextFromString reconstructs the Context encoded in a string

View file

@ -216,10 +216,10 @@ func (t *Tracer) startSpanWithOptions(
options.StartTime = t.timeNow()
}
// Predicate whether the given span context is a valid reference
// which may be used as parent / debug ID / baggage items source
isValidReference := func(ctx SpanContext) bool {
return ctx.IsValid() || ctx.isDebugIDContainerOnly() || len(ctx.baggage) != 0
// Predicate whether the given span context is an empty reference
// or may be used as parent / debug ID / baggage items source
isEmptyReference := func(ctx SpanContext) bool {
return !ctx.IsValid() && !ctx.isDebugIDContainerOnly() && len(ctx.baggage) == 0
}
var references []Reference
@ -235,7 +235,7 @@ func (t *Tracer) startSpanWithOptions(
reflect.ValueOf(ref.ReferencedContext)))
continue
}
if !isValidReference(ctxRef) {
if isEmptyReference(ctxRef) {
continue
}
@ -245,14 +245,17 @@ func (t *Tracer) startSpanWithOptions(
continue
}
references = append(references, Reference{Type: ref.Type, Context: ctxRef})
if ctxRef.IsValid() {
// we don't want empty context that contains only debug-id or baggage
references = append(references, Reference{Type: ref.Type, Context: ctxRef})
}
if !hasParent {
parent = ctxRef
hasParent = ref.Type == opentracing.ChildOfRef
}
}
if !hasParent && isValidReference(parent) {
if !hasParent && !isEmptyReference(parent) {
// If ChildOfRef wasn't found but a FollowFromRef exists, use the context from
// the FollowFromRef as the parent
hasParent = true

View file

@ -19,6 +19,7 @@ import (
"fmt"
"github.com/uber/jaeger-client-go/internal/reporterstats"
"github.com/uber/jaeger-client-go/log"
"github.com/uber/jaeger-client-go/thrift"
j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
"github.com/uber/jaeger-client-go/utils"
@ -57,35 +58,57 @@ type udpSender struct {
failedToEmitSpans int64
}
// NewUDPTransport creates a reporter that submits spans to jaeger-agent.
// UDPTransportParams allows specifying options for initializing a UDPTransport. An instance of this struct should
// be passed to NewUDPTransportWithParams.
type UDPTransportParams struct {
utils.AgentClientUDPParams
}
// NewUDPTransportWithParams creates a reporter that submits spans to jaeger-agent.
// TODO: (breaking change) move to transport/ package.
func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) {
if len(hostPort) == 0 {
hostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort)
func NewUDPTransportWithParams(params UDPTransportParams) (Transport, error) {
if len(params.HostPort) == 0 {
params.HostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort)
}
if maxPacketSize == 0 {
maxPacketSize = utils.UDPPacketMaxLength
if params.Logger == nil {
params.Logger = log.StdLogger
}
if params.MaxPacketSize == 0 {
params.MaxPacketSize = utils.UDPPacketMaxLength
}
protocolFactory := thrift.NewTCompactProtocolFactory()
// Each span is first written to thriftBuffer to determine its size in bytes.
thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize)
thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize)
thriftProtocol := protocolFactory.GetProtocol(thriftBuffer)
client, err := utils.NewAgentClientUDP(hostPort, maxPacketSize)
client, err := utils.NewAgentClientUDPWithParams(params.AgentClientUDPParams)
if err != nil {
return nil, err
}
return &udpSender{
client: client,
maxSpanBytes: maxPacketSize - emitBatchOverhead,
maxSpanBytes: params.MaxPacketSize - emitBatchOverhead,
thriftBuffer: thriftBuffer,
thriftProtocol: thriftProtocol,
}, nil
}
// NewUDPTransport creates a reporter that submits spans to jaeger-agent.
// TODO: (breaking change) move to transport/ package.
func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) {
return NewUDPTransportWithParams(UDPTransportParams{
AgentClientUDPParams: utils.AgentClientUDPParams{
HostPort: hostPort,
MaxPacketSize: maxPacketSize,
},
})
}
// SetReporterStats implements reporterstats.Receiver.
func (s *udpSender) SetReporterStats(rs reporterstats.ReporterStats) {
s.reporterStats = rs

View file

@ -0,0 +1,189 @@
// Copyright (c) 2020 The Jaeger Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"fmt"
"net"
"sync"
"sync/atomic"
"time"
"github.com/uber/jaeger-client-go/log"
)
// reconnectingUDPConn is an implementation of udpConn that resolves hostPort every resolveTimeout, if the resolved address is
// different than the current conn then the new address is dialed and the conn is swapped.
type reconnectingUDPConn struct {
hostPort string
resolveFunc resolveFunc
dialFunc dialFunc
logger log.Logger
bufferBytes int64
connMtx sync.RWMutex
conn *net.UDPConn
destAddr *net.UDPAddr
closeChan chan struct{}
}
type resolveFunc func(network string, hostPort string) (*net.UDPAddr, error)
type dialFunc func(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error)
// newReconnectingUDPConn returns a new udpConn that resolves hostPort every resolveTimeout, if the resolved address is
// different than the current conn then the new address is dialed and the conn is swapped.
func newReconnectingUDPConn(hostPort string, resolveTimeout time.Duration, resolveFunc resolveFunc, dialFunc dialFunc, logger log.Logger) (*reconnectingUDPConn, error) {
conn := &reconnectingUDPConn{
hostPort: hostPort,
resolveFunc: resolveFunc,
dialFunc: dialFunc,
logger: logger,
closeChan: make(chan struct{}),
}
if err := conn.attemptResolveAndDial(); err != nil {
logger.Error(fmt.Sprintf("failed resolving destination address on connection startup, with err: %q. retrying in %s", err.Error(), resolveTimeout))
}
go conn.reconnectLoop(resolveTimeout)
return conn, nil
}
func (c *reconnectingUDPConn) reconnectLoop(resolveTimeout time.Duration) {
ticker := time.NewTicker(resolveTimeout)
defer ticker.Stop()
for {
select {
case <-c.closeChan:
return
case <-ticker.C:
if err := c.attemptResolveAndDial(); err != nil {
c.logger.Error(err.Error())
}
}
}
}
func (c *reconnectingUDPConn) attemptResolveAndDial() error {
newAddr, err := c.resolveFunc("udp", c.hostPort)
if err != nil {
return fmt.Errorf("failed to resolve new addr for host %q, with err: %w", c.hostPort, err)
}
c.connMtx.RLock()
curAddr := c.destAddr
c.connMtx.RUnlock()
// dont attempt dial if an addr was successfully dialed previously and, resolved addr is the same as current conn
if curAddr != nil && newAddr.String() == curAddr.String() {
return nil
}
if err := c.attemptDialNewAddr(newAddr); err != nil {
return fmt.Errorf("failed to dial newly resolved addr '%s', with err: %w", newAddr, err)
}
return nil
}
func (c *reconnectingUDPConn) attemptDialNewAddr(newAddr *net.UDPAddr) error {
connUDP, err := c.dialFunc(newAddr.Network(), nil, newAddr)
if err != nil {
return err
}
if bufferBytes := int(atomic.LoadInt64(&c.bufferBytes)); bufferBytes != 0 {
if err = connUDP.SetWriteBuffer(bufferBytes); err != nil {
return err
}
}
c.connMtx.Lock()
c.destAddr = newAddr
// store prev to close later
prevConn := c.conn
c.conn = connUDP
c.connMtx.Unlock()
if prevConn != nil {
return prevConn.Close()
}
return nil
}
// Write calls net.udpConn.Write, if it fails an attempt is made to connect to a new addr, if that succeeds the write is retried before returning
func (c *reconnectingUDPConn) Write(b []byte) (int, error) {
var bytesWritten int
var err error
c.connMtx.RLock()
if c.conn == nil {
// if connection is not initialized indicate this with err in order to hook into retry logic
err = fmt.Errorf("UDP connection not yet initialized, an address has not been resolved")
} else {
bytesWritten, err = c.conn.Write(b)
}
c.connMtx.RUnlock()
if err == nil {
return bytesWritten, nil
}
// attempt to resolve and dial new address in case that's the problem, if resolve and dial succeeds, try write again
if reconnErr := c.attemptResolveAndDial(); reconnErr == nil {
c.connMtx.RLock()
defer c.connMtx.RUnlock()
return c.conn.Write(b)
}
// return original error if reconn fails
return bytesWritten, err
}
// Close stops the reconnectLoop, then closes the connection via net.udpConn 's implementation
func (c *reconnectingUDPConn) Close() error {
close(c.closeChan)
// acquire rw lock before closing conn to ensure calls to Write drain
c.connMtx.Lock()
defer c.connMtx.Unlock()
if c.conn != nil {
return c.conn.Close()
}
return nil
}
// SetWriteBuffer defers to the net.udpConn SetWriteBuffer implementation wrapped with a RLock. if no conn is currently held
// and SetWriteBuffer is called store bufferBytes to be set for new conns
func (c *reconnectingUDPConn) SetWriteBuffer(bytes int) error {
var err error
c.connMtx.RLock()
if c.conn != nil {
err = c.conn.SetWriteBuffer(bytes)
}
c.connMtx.RUnlock()
if err == nil {
atomic.StoreInt64(&c.bufferBytes, int64(bytes))
}
return err
}

View file

@ -19,7 +19,9 @@ import (
"fmt"
"io"
"net"
"time"
"github.com/uber/jaeger-client-go/log"
"github.com/uber/jaeger-client-go/thrift"
"github.com/uber/jaeger-client-go/thrift-gen/agent"
@ -35,41 +37,90 @@ type AgentClientUDP struct {
agent.Agent
io.Closer
connUDP *net.UDPConn
connUDP udpConn
client *agent.AgentClient
maxPacketSize int // max size of datagram in bytes
thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
}
// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP.
func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) {
if maxPacketSize == 0 {
maxPacketSize = UDPPacketMaxLength
type udpConn interface {
Write([]byte) (int, error)
SetWriteBuffer(int) error
Close() error
}
// AgentClientUDPParams allows specifying options for initializing an AgentClientUDP. An instance of this struct should
// be passed to NewAgentClientUDPWithParams.
type AgentClientUDPParams struct {
HostPort string
MaxPacketSize int
Logger log.Logger
DisableAttemptReconnecting bool
AttemptReconnectInterval time.Duration
}
// NewAgentClientUDPWithParams creates a client that sends spans to Jaeger Agent over UDP.
func NewAgentClientUDPWithParams(params AgentClientUDPParams) (*AgentClientUDP, error) {
// validate hostport
if _, _, err := net.SplitHostPort(params.HostPort); err != nil {
return nil, err
}
thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize)
if params.MaxPacketSize == 0 {
params.MaxPacketSize = UDPPacketMaxLength
}
if params.Logger == nil {
params.Logger = log.StdLogger
}
if !params.DisableAttemptReconnecting && params.AttemptReconnectInterval == 0 {
params.AttemptReconnectInterval = time.Second * 30
}
thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize)
protocolFactory := thrift.NewTCompactProtocolFactory()
client := agent.NewAgentClientFactory(thriftBuffer, protocolFactory)
destAddr, err := net.ResolveUDPAddr("udp", hostPort)
if err != nil {
var connUDP udpConn
var err error
if params.DisableAttemptReconnecting {
destAddr, err := net.ResolveUDPAddr("udp", params.HostPort)
if err != nil {
return nil, err
}
connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr)
if err != nil {
return nil, err
}
} else {
// host is hostname, setup resolver loop in case host record changes during operation
connUDP, err = newReconnectingUDPConn(params.HostPort, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger)
if err != nil {
return nil, err
}
}
if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil {
return nil, err
}
connUDP, err := net.DialUDP(destAddr.Network(), nil, destAddr)
if err != nil {
return nil, err
}
if err := connUDP.SetWriteBuffer(maxPacketSize); err != nil {
return nil, err
}
clientUDP := &AgentClientUDP{
return &AgentClientUDP{
connUDP: connUDP,
client: client,
maxPacketSize: maxPacketSize,
thriftBuffer: thriftBuffer}
return clientUDP, nil
maxPacketSize: params.MaxPacketSize,
thriftBuffer: thriftBuffer,
}, nil
}
// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP.
func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) {
return NewAgentClientUDPWithParams(AgentClientUDPParams{
HostPort: hostPort,
MaxPacketSize: maxPacketSize,
})
}
// EmitZipkinBatch implements EmitZipkinBatch() of Agent interface

View file

@ -325,6 +325,7 @@ includes_OpenBSD='
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/select.h>
#include <sys/sched.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/stat.h>
@ -507,6 +508,8 @@ ccflags="$@"
$2 ~ /^(CLOCK|TIMER)_/ ||
$2 ~ /^CAN_/ ||
$2 ~ /^CAP_/ ||
$2 ~ /^CP_/ ||
$2 ~ /^CPUSTATES$/ ||
$2 ~ /^ALG_/ ||
$2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE)/ ||
$2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|[GS]ETFLAGS)/ ||

View file

@ -527,6 +527,23 @@ func SysctlClockinfo(name string) (*Clockinfo, error) {
return &ci, nil
}
func SysctlTimeval(name string) (*Timeval, error) {
mib, err := sysctlmib(name)
if err != nil {
return nil, err
}
var tv Timeval
n := uintptr(unsafe.Sizeof(tv))
if err := sysctl(mib, (*byte)(unsafe.Pointer(&tv)), &n, nil, 0); err != nil {
return nil, err
}
if n != unsafe.Sizeof(tv) {
return nil, EIO
}
return &tv, nil
}
//sys utimes(path string, timeval *[2]Timeval) (err error)
func Utimes(path string, tv []Timeval) error {

View file

@ -2122,6 +2122,18 @@ func Klogset(typ int, arg int) (err error) {
return nil
}
// RemoteIovec is Iovec with the pointer replaced with an integer.
// It is used for ProcessVMReadv and ProcessVMWritev, where the pointer
// refers to a location in a different process' address space, which
// would confuse the Go garbage collector.
type RemoteIovec struct {
Base uintptr
Len int
}
//sys ProcessVMReadv(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) = SYS_PROCESS_VM_READV
//sys ProcessVMWritev(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) = SYS_PROCESS_VM_WRITEV
/*
* Unimplemented
*/

View file

@ -339,6 +339,12 @@ const (
CLOCK_UPTIME_FAST = 0x8
CLOCK_UPTIME_PRECISE = 0x7
CLOCK_VIRTUAL = 0x1
CPUSTATES = 0x5
CP_IDLE = 0x4
CP_INTR = 0x3
CP_NICE = 0x1
CP_SYS = 0x2
CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x30000
CS5 = 0x0

View file

@ -339,6 +339,12 @@ const (
CLOCK_UPTIME_FAST = 0x8
CLOCK_UPTIME_PRECISE = 0x7
CLOCK_VIRTUAL = 0x1
CPUSTATES = 0x5
CP_IDLE = 0x4
CP_INTR = 0x3
CP_NICE = 0x1
CP_SYS = 0x2
CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x30000
CS5 = 0x0

View file

@ -339,6 +339,12 @@ const (
CLOCK_UPTIME_FAST = 0x8
CLOCK_UPTIME_PRECISE = 0x7
CLOCK_VIRTUAL = 0x1
CPUSTATES = 0x5
CP_IDLE = 0x4
CP_INTR = 0x3
CP_NICE = 0x1
CP_SYS = 0x2
CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x30000
CS5 = 0x0

View file

@ -339,6 +339,12 @@ const (
CLOCK_UPTIME_FAST = 0x8
CLOCK_UPTIME_PRECISE = 0x7
CLOCK_VIRTUAL = 0x1
CPUSTATES = 0x5
CP_IDLE = 0x4
CP_INTR = 0x3
CP_NICE = 0x1
CP_SYS = 0x2
CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x30000
CS5 = 0x0

View file

@ -146,6 +146,13 @@ const (
BRKINT = 0x2
CFLUSH = 0xf
CLOCAL = 0x8000
CPUSTATES = 0x6
CP_IDLE = 0x5
CP_INTR = 0x4
CP_NICE = 0x1
CP_SPIN = 0x3
CP_SYS = 0x2
CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0

View file

@ -153,6 +153,13 @@ const (
CLOCK_REALTIME = 0x0
CLOCK_THREAD_CPUTIME_ID = 0x4
CLOCK_UPTIME = 0x5
CPUSTATES = 0x6
CP_IDLE = 0x5
CP_INTR = 0x4
CP_NICE = 0x1
CP_SPIN = 0x3
CP_SYS = 0x2
CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0

View file

@ -146,6 +146,13 @@ const (
BRKINT = 0x2
CFLUSH = 0xf
CLOCAL = 0x8000
CPUSTATES = 0x6
CP_IDLE = 0x5
CP_INTR = 0x4
CP_NICE = 0x1
CP_SPIN = 0x3
CP_SYS = 0x2
CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0

View file

@ -156,6 +156,13 @@ const (
CLOCK_REALTIME = 0x0
CLOCK_THREAD_CPUTIME_ID = 0x4
CLOCK_UPTIME = 0x5
CPUSTATES = 0x6
CP_IDLE = 0x5
CP_INTR = 0x4
CP_NICE = 0x1
CP_SPIN = 0x3
CP_SYS = 0x2
CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0

View file

@ -1847,6 +1847,52 @@ func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ProcessVMReadv(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) {
var _p0 unsafe.Pointer
if len(localIov) > 0 {
_p0 = unsafe.Pointer(&localIov[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
var _p1 unsafe.Pointer
if len(remoteIov) > 0 {
_p1 = unsafe.Pointer(&remoteIov[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PROCESS_VM_READV, uintptr(pid), uintptr(_p0), uintptr(len(localIov)), uintptr(_p1), uintptr(len(remoteIov)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ProcessVMWritev(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) {
var _p0 unsafe.Pointer
if len(localIov) > 0 {
_p0 = unsafe.Pointer(&localIov[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
var _p1 unsafe.Pointer
if len(remoteIov) > 0 {
_p1 = unsafe.Pointer(&remoteIov[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PROCESS_VM_WRITEV, uintptr(pid), uintptr(_p0), uintptr(len(localIov)), uintptr(_p1), uintptr(len(remoteIov)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe2(p *[2]_C_int, flags int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
if e1 != 0 {

View file

@ -873,6 +873,9 @@ const (
// FieldManagerConflict is used to report when another client claims to manage this field,
// It should only be returned for a request using server-side apply.
CauseTypeFieldManagerConflict CauseType = "FieldManagerConflict"
// CauseTypeResourceVersionTooLarge is used to report that the requested resource version
// is newer than the data observed by the API server, so the request cannot be served.
CauseTypeResourceVersionTooLarge CauseType = "ResourceVersionTooLarge"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View file

@ -446,7 +446,7 @@ redirectLoop:
// Only follow redirects to the same host. Otherwise, propagate the redirect response back.
if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() {
break redirectLoop
return nil, nil, fmt.Errorf("hostname mismatch: expected %s, found %s", originalLocation.Hostname(), location.Hostname())
}
// Reset the connection.

View file

@ -82,9 +82,9 @@ type Reflector struct {
// observed when doing a sync with the underlying store
// it is thread safe, but not synchronized with the underlying store
lastSyncResourceVersion string
// isLastSyncResourceVersionGone is true if the previous list or watch request with lastSyncResourceVersion
// failed with an HTTP 410 (Gone) status code.
isLastSyncResourceVersionGone bool
// isLastSyncResourceVersionUnavailable is true if the previous list or watch request with
// lastSyncResourceVersion failed with an "expired" or "too large resource version" error.
isLastSyncResourceVersionUnavailable bool
// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
lastSyncResourceVersionMutex sync.RWMutex
// WatchListPageSize is the requested chunk size of initial and resync watch lists.
@ -256,13 +256,14 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
}
list, paginatedResult, err = pager.List(context.Background(), options)
if isExpiredError(err) {
r.setIsLastSyncResourceVersionExpired(true)
// Retry immediately if the resource version used to list is expired.
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
r.setIsLastSyncResourceVersionUnavailable(true)
// Retry immediately if the resource version used to list is unavailable.
// The pager already falls back to full list if paginated list calls fail due to an "Expired" error on
// continuation pages, but the pager might not be enabled, or the full list might fail because the
// resource version it is listing at is expired, so we need to fallback to resourceVersion="" in all
// to recover and ensure the reflector makes forward progress.
// continuation pages, but the pager might not be enabled, the full list might fail because the
// resource version it is listing at is expired or the cache may not yet be synced to the provided
// resource version. So we need to fallback to resourceVersion="" in all to recover and ensure
// the reflector makes forward progress.
list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
}
close(listCh)
@ -292,7 +293,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
r.paginatedResult = true
}
r.setIsLastSyncResourceVersionExpired(false) // list was successful
r.setIsLastSyncResourceVersionUnavailable(false) // list was successful
initTrace.Step("Objects listed")
listMetaInterface, err := meta.ListAccessor(list)
if err != nil {
@ -396,7 +397,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
if err != errorStopRequested {
switch {
case isExpiredError(err):
// Don't set LastSyncResourceVersionExpired - LIST call with ResourceVersion=RV already
// Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already
// has a semantic that it returns data at least as fresh as provided RV.
// So first try to LIST with setting RV to resource version of last observed object.
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
@ -519,9 +520,9 @@ func (r *Reflector) relistResourceVersion() string {
r.lastSyncResourceVersionMutex.RLock()
defer r.lastSyncResourceVersionMutex.RUnlock()
if r.isLastSyncResourceVersionGone {
if r.isLastSyncResourceVersionUnavailable {
// Since this reflector makes paginated list requests, and all paginated list requests skip the watch cache
// if the lastSyncResourceVersion is expired, we set ResourceVersion="" and list again to re-establish reflector
// if the lastSyncResourceVersion is unavailable, we set ResourceVersion="" and list again to re-establish reflector
// to the latest available ResourceVersion, using a consistent read from etcd.
return ""
}
@ -533,12 +534,12 @@ func (r *Reflector) relistResourceVersion() string {
return r.lastSyncResourceVersion
}
// setIsLastSyncResourceVersionExpired sets if the last list or watch request with lastSyncResourceVersion returned a
// expired error: HTTP 410 (Gone) Status Code.
func (r *Reflector) setIsLastSyncResourceVersionExpired(isExpired bool) {
// setIsLastSyncResourceVersionUnavailable sets if the last list or watch request with lastSyncResourceVersion returned
// "expired" or "too large resource version" error.
func (r *Reflector) setIsLastSyncResourceVersionUnavailable(isUnavailable bool) {
r.lastSyncResourceVersionMutex.Lock()
defer r.lastSyncResourceVersionMutex.Unlock()
r.isLastSyncResourceVersionGone = isExpired
r.isLastSyncResourceVersionUnavailable = isUnavailable
}
func isExpiredError(err error) bool {
@ -548,3 +549,7 @@ func isExpiredError(err error) bool {
// check when we fully drop support for Kubernetes 1.17 servers from reflectors.
return apierrors.IsResourceExpired(err) || apierrors.IsGone(err)
}
func isTooLargeResourceVersionError(err error) bool {
return apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge)
}

108
vendor/modules.txt vendored
View file

@ -1,27 +1,35 @@
# cloud.google.com/go v0.56.0
cloud.google.com/go/compute/metadata
# github.com/Azure/azure-sdk-for-go v44.0.0+incompatible
# github.com/Azure/azure-sdk-for-go v44.2.0+incompatible
## explicit
github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute
github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network
github.com/Azure/azure-sdk-for-go/version
# github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
## explicit
# github.com/Azure/go-autorest v14.2.0+incompatible
github.com/Azure/go-autorest
# github.com/Azure/go-autorest/autorest v0.11.2
## explicit
github.com/Azure/go-autorest/autorest
github.com/Azure/go-autorest/autorest/azure
# github.com/Azure/go-autorest/autorest/adal v0.9.0
## explicit
github.com/Azure/go-autorest/autorest/adal
# github.com/Azure/go-autorest/autorest/date v0.3.0
github.com/Azure/go-autorest/autorest/date
# github.com/Azure/go-autorest/autorest/to v0.3.0
## explicit
github.com/Azure/go-autorest/autorest/to
# github.com/Azure/go-autorest/autorest/validation v0.2.0
## explicit
github.com/Azure/go-autorest/autorest/validation
# github.com/Azure/go-autorest/logger v0.2.0
github.com/Azure/go-autorest/logger
# github.com/Azure/go-autorest/tracing v0.6.0
github.com/Azure/go-autorest/tracing
# github.com/Microsoft/go-winio v0.4.14
## explicit
github.com/Microsoft/go-winio
github.com/Microsoft/go-winio/pkg/guid
# github.com/PuerkitoBio/purell v1.1.1
@ -32,12 +40,15 @@ github.com/PuerkitoBio/urlesc
github.com/alecthomas/template
github.com/alecthomas/template/parse
# github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d
## explicit
github.com/alecthomas/units
# github.com/armon/go-metrics v0.3.3
## explicit
github.com/armon/go-metrics
# github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496
github.com/asaskevich/govalidator
# github.com/aws/aws-sdk-go v1.33.5
# github.com/aws/aws-sdk-go v1.33.12
## explicit
github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/awserr
github.com/aws/aws-sdk-go/aws/awsutil
@ -78,24 +89,31 @@ github.com/aws/aws-sdk-go/service/sts/stsiface
# github.com/beorn7/perks v1.0.1
github.com/beorn7/perks/quantile
# github.com/cespare/xxhash v1.1.0
## explicit
github.com/cespare/xxhash
# github.com/cespare/xxhash/v2 v2.1.1
github.com/cespare/xxhash/v2
# github.com/containerd/containerd v1.3.4
## explicit
github.com/containerd/containerd/errdefs
# github.com/davecgh/go-spew v1.1.1
## explicit
github.com/davecgh/go-spew/spew
# github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/dgrijalva/jwt-go
# github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b
## explicit
github.com/dgryski/go-sip13
# github.com/digitalocean/godo v1.38.0
# github.com/digitalocean/godo v1.42.0
## explicit
github.com/digitalocean/godo
# github.com/docker/distribution v2.7.1+incompatible
## explicit
github.com/docker/distribution/digestset
github.com/docker/distribution/reference
github.com/docker/distribution/registry/api/errcode
# github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible
## explicit
github.com/docker/docker/api
github.com/docker/docker/api/types
github.com/docker/docker/api/types/blkiodev
@ -115,12 +133,14 @@ github.com/docker/docker/api/types/volume
github.com/docker/docker/client
github.com/docker/docker/errdefs
# github.com/docker/go-connections v0.4.0
## explicit
github.com/docker/go-connections/nat
github.com/docker/go-connections/sockets
github.com/docker/go-connections/tlsconfig
# github.com/docker/go-units v0.4.0
github.com/docker/go-units
# github.com/edsrzf/mmap-go v1.0.0
## explicit
github.com/edsrzf/mmap-go
# github.com/evanphx/json-patch v4.2.0+incompatible
github.com/evanphx/json-patch
@ -129,9 +149,11 @@ github.com/fatih/color
# github.com/ghodss/yaml v1.0.0
github.com/ghodss/yaml
# github.com/go-kit/kit v0.10.0
## explicit
github.com/go-kit/kit/log
github.com/go-kit/kit/log/level
# github.com/go-logfmt/logfmt v0.5.0
## explicit
github.com/go-logfmt/logfmt
# github.com/go-logr/logr v0.1.0
github.com/go-logr/logr
@ -151,6 +173,7 @@ github.com/go-openapi/runtime
# github.com/go-openapi/spec v0.19.8
github.com/go-openapi/spec
# github.com/go-openapi/strfmt v0.19.5
## explicit
github.com/go-openapi/strfmt
# github.com/go-openapi/swag v0.19.9
github.com/go-openapi/swag
@ -159,6 +182,7 @@ github.com/go-openapi/validate
# github.com/go-stack/stack v1.8.0
github.com/go-stack/stack
# github.com/gogo/protobuf v1.3.1
## explicit
github.com/gogo/protobuf/gogoproto
github.com/gogo/protobuf/plugin/compare
github.com/gogo/protobuf/plugin/defaultcheck
@ -204,6 +228,7 @@ github.com/golang/protobuf/ptypes/struct
github.com/golang/protobuf/ptypes/timestamp
github.com/golang/protobuf/ptypes/wrappers
# github.com/golang/snappy v0.0.1
## explicit
github.com/golang/snappy
# github.com/google/go-cmp v0.4.0
github.com/google/go-cmp/cmp
@ -216,14 +241,17 @@ github.com/google/go-querystring/query
# github.com/google/gofuzz v1.1.0
github.com/google/gofuzz
# github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99
## explicit
github.com/google/pprof/profile
# github.com/googleapis/gax-go/v2 v2.0.5
github.com/googleapis/gax-go/v2
# github.com/googleapis/gnostic v0.4.0
## explicit
github.com/googleapis/gnostic/OpenAPIv2
github.com/googleapis/gnostic/compiler
github.com/googleapis/gnostic/extensions
# github.com/gophercloud/gophercloud v0.12.0
## explicit
github.com/gophercloud/gophercloud
github.com/gophercloud/gophercloud/openstack
github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips
@ -237,6 +265,7 @@ github.com/gophercloud/gophercloud/openstack/identity/v3/tokens
github.com/gophercloud/gophercloud/openstack/utils
github.com/gophercloud/gophercloud/pagination
# github.com/grpc-ecosystem/grpc-gateway v1.14.6
## explicit
github.com/grpc-ecosystem/grpc-gateway/codegenerator
github.com/grpc-ecosystem/grpc-gateway/internal
github.com/grpc-ecosystem/grpc-gateway/internal/casing
@ -251,31 +280,39 @@ github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options
github.com/grpc-ecosystem/grpc-gateway/runtime
github.com/grpc-ecosystem/grpc-gateway/utilities
# github.com/hashicorp/consul/api v1.5.0
## explicit
github.com/hashicorp/consul/api
# github.com/hashicorp/go-cleanhttp v0.5.1
github.com/hashicorp/go-cleanhttp
# github.com/hashicorp/go-hclog v0.12.2
## explicit
github.com/hashicorp/go-hclog
# github.com/hashicorp/go-immutable-radix v1.2.0
## explicit
github.com/hashicorp/go-immutable-radix
# github.com/hashicorp/go-rootcerts v1.0.2
github.com/hashicorp/go-rootcerts
# github.com/hashicorp/golang-lru v0.5.4
## explicit
github.com/hashicorp/golang-lru
github.com/hashicorp/golang-lru/simplelru
# github.com/hashicorp/serf v0.9.0
github.com/hashicorp/serf/coordinate
# github.com/influxdata/influxdb v1.8.1
## explicit
github.com/influxdata/influxdb/client/v2
github.com/influxdata/influxdb/models
github.com/influxdata/influxdb/pkg/escape
# github.com/jmespath/go-jmespath v0.3.0
github.com/jmespath/go-jmespath
# github.com/jpillora/backoff v1.0.0
## explicit
github.com/jpillora/backoff
# github.com/json-iterator/go v1.1.10
## explicit
github.com/json-iterator/go
# github.com/julienschmidt/httprouter v1.3.0
## explicit
github.com/julienschmidt/httprouter
# github.com/konsorten/go-windows-terminal-sequences v1.0.2
github.com/konsorten/go-windows-terminal-sequences
@ -284,45 +321,61 @@ github.com/mailru/easyjson/buffer
github.com/mailru/easyjson/jlexer
github.com/mailru/easyjson/jwriter
# github.com/mattn/go-colorable v0.1.6
## explicit
github.com/mattn/go-colorable
# github.com/mattn/go-isatty v0.0.12
github.com/mattn/go-isatty
# github.com/matttproud/golang_protobuf_extensions v1.0.1
github.com/matttproud/golang_protobuf_extensions/pbutil
# github.com/miekg/dns v1.1.30
## explicit
github.com/miekg/dns
# github.com/mitchellh/go-homedir v1.1.0
github.com/mitchellh/go-homedir
# github.com/mitchellh/mapstructure v1.2.2
## explicit
github.com/mitchellh/mapstructure
# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
github.com/modern-go/concurrent
# github.com/modern-go/reflect2 v1.0.1
github.com/modern-go/reflect2
# github.com/morikuni/aec v1.0.0
## explicit
# github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
## explicit
github.com/mwitkow/go-conntrack
# github.com/oklog/run v1.1.0
## explicit
github.com/oklog/run
# github.com/oklog/ulid v1.3.1
## explicit
github.com/oklog/ulid
# github.com/opencontainers/go-digest v1.0.0
## explicit
github.com/opencontainers/go-digest
# github.com/opencontainers/image-spec v1.0.1
## explicit
github.com/opencontainers/image-spec/specs-go
github.com/opencontainers/image-spec/specs-go/v1
# github.com/opentracing-contrib/go-stdlib v1.0.0
## explicit
github.com/opentracing-contrib/go-stdlib/nethttp
# github.com/opentracing/opentracing-go v1.2.0
## explicit
github.com/opentracing/opentracing-go
github.com/opentracing/opentracing-go/ext
github.com/opentracing/opentracing-go/log
# github.com/pkg/errors v0.9.1
## explicit
github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.0
## explicit
github.com/pmezard/go-difflib/difflib
# github.com/prometheus/alertmanager v0.21.0
## explicit
github.com/prometheus/alertmanager/api/v2/models
# github.com/prometheus/client_golang v1.7.1
## explicit
github.com/prometheus/client_golang/api
github.com/prometheus/client_golang/api/prometheus/v1
github.com/prometheus/client_golang/prometheus
@ -332,8 +385,10 @@ github.com/prometheus/client_golang/prometheus/promhttp
github.com/prometheus/client_golang/prometheus/testutil
github.com/prometheus/client_golang/prometheus/testutil/promlint
# github.com/prometheus/client_model v0.2.0
## explicit
github.com/prometheus/client_model/go
# github.com/prometheus/common v0.10.0
## explicit
github.com/prometheus/common/config
github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
@ -347,19 +402,24 @@ github.com/prometheus/common/version
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
# github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da
# github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e
## explicit
github.com/samuel/go-zookeeper/zk
# github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
## explicit
github.com/shurcooL/httpfs/filter
github.com/shurcooL/httpfs/union
github.com/shurcooL/httpfs/vfsutil
# github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c
## explicit
github.com/shurcooL/vfsgen
# github.com/sirupsen/logrus v1.4.2
github.com/sirupsen/logrus
# github.com/soheilhy/cmux v0.1.4
## explicit
github.com/soheilhy/cmux
# github.com/uber/jaeger-client-go v2.24.0+incompatible
# github.com/uber/jaeger-client-go v2.25.0+incompatible
## explicit
github.com/uber/jaeger-client-go
github.com/uber/jaeger-client-go/config
github.com/uber/jaeger-client-go/internal/baggage
@ -379,9 +439,11 @@ github.com/uber/jaeger-client-go/thrift-gen/zipkincore
github.com/uber/jaeger-client-go/transport
github.com/uber/jaeger-client-go/utils
# github.com/uber/jaeger-lib v2.2.0+incompatible
## explicit
github.com/uber/jaeger-lib/metrics
github.com/uber/jaeger-lib/metrics/prometheus
# go.mongodb.org/mongo-driver v1.3.2
## explicit
go.mongodb.org/mongo-driver/bson
go.mongodb.org/mongo-driver/bson/bsoncodec
go.mongodb.org/mongo-driver/bson/bsonoptions
@ -407,8 +469,10 @@ go.opencensus.io/trace/internal
go.opencensus.io/trace/propagation
go.opencensus.io/trace/tracestate
# go.uber.org/atomic v1.6.0
## explicit
go.uber.org/atomic
# go.uber.org/goleak v1.0.0
## explicit
go.uber.org/goleak
go.uber.org/goleak/internal/stack
# golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
@ -422,6 +486,7 @@ golang.org/x/lint/golint
golang.org/x/mod/module
golang.org/x/mod/semver
# golang.org/x/net v0.0.0-20200707034311-ab3426394381
## explicit
golang.org/x/net/bpf
golang.org/x/net/context
golang.org/x/net/context/ctxhttp
@ -439,14 +504,17 @@ golang.org/x/net/netutil
golang.org/x/net/proxy
golang.org/x/net/trace
# golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
## explicit
golang.org/x/oauth2
golang.org/x/oauth2/google
golang.org/x/oauth2/internal
golang.org/x/oauth2/jws
golang.org/x/oauth2/jwt
# golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
## explicit
golang.org/x/sync/errgroup
# golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae
# golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c
## explicit
golang.org/x/sys/internal/unsafeheader
golang.org/x/sys/unix
golang.org/x/sys/windows
@ -457,8 +525,10 @@ golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
golang.org/x/text/width
# golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
## explicit
golang.org/x/time/rate
# golang.org/x/tools v0.0.0-20200710042808-f1c4188a97a1
# golang.org/x/tools v0.0.0-20200725200936-102e7d357031
## explicit
golang.org/x/tools/cmd/goimports
golang.org/x/tools/go/ast/astutil
golang.org/x/tools/go/gcexportdata
@ -475,6 +545,7 @@ golang.org/x/tools/internal/imports
golang.org/x/xerrors
golang.org/x/xerrors/internal
# google.golang.org/api v0.29.0
## explicit
google.golang.org/api/compute/v1
google.golang.org/api/googleapi
google.golang.org/api/googleapi/transport
@ -487,6 +558,7 @@ google.golang.org/api/transport/cert
google.golang.org/api/transport/http
google.golang.org/api/transport/http/internal/propagation
# google.golang.org/appengine v1.6.6
## explicit
google.golang.org/appengine
google.golang.org/appengine/internal
google.golang.org/appengine/internal/app_identity
@ -497,12 +569,14 @@ google.golang.org/appengine/internal/modules
google.golang.org/appengine/internal/remote_api
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e
# google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7
## explicit
google.golang.org/genproto/googleapis/api/annotations
google.golang.org/genproto/googleapis/api/httpbody
google.golang.org/genproto/googleapis/rpc/status
google.golang.org/genproto/protobuf/field_mask
# google.golang.org/grpc v1.29.1
## explicit
google.golang.org/grpc
google.golang.org/grpc/attributes
google.golang.org/grpc/backoff
@ -583,16 +657,23 @@ google.golang.org/protobuf/types/known/timestamppb
google.golang.org/protobuf/types/known/wrapperspb
google.golang.org/protobuf/types/pluginpb
# gopkg.in/alecthomas/kingpin.v2 v2.2.6
## explicit
gopkg.in/alecthomas/kingpin.v2
# gopkg.in/fsnotify/fsnotify.v1 v1.4.7
## explicit
gopkg.in/fsnotify/fsnotify.v1
# gopkg.in/inf.v0 v0.9.1
gopkg.in/inf.v0
# gopkg.in/yaml.v2 v2.3.0
## explicit
gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776
## explicit
gopkg.in/yaml.v3
# k8s.io/api v0.18.5
# gotest.tools v2.2.0+incompatible
## explicit
# k8s.io/api v0.18.6
## explicit
k8s.io/api/admissionregistration/v1
k8s.io/api/admissionregistration/v1beta1
k8s.io/api/apps/v1
@ -633,7 +714,8 @@ k8s.io/api/settings/v1alpha1
k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1
# k8s.io/apimachinery v0.18.5
# k8s.io/apimachinery v0.18.6
## explicit
k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/meta
k8s.io/apimachinery/pkg/api/resource
@ -676,7 +758,8 @@ k8s.io/apimachinery/pkg/version
k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/client-go v0.18.5
# k8s.io/client-go v0.18.6
## explicit
k8s.io/client-go/discovery
k8s.io/client-go/discovery/fake
k8s.io/client-go/kubernetes
@ -782,12 +865,14 @@ k8s.io/client-go/util/flowcontrol
k8s.io/client-go/util/keyutil
k8s.io/client-go/util/workqueue
# k8s.io/klog v1.0.0 => github.com/simonpasquier/klog-gokit v0.1.0
## explicit
k8s.io/klog
# k8s.io/klog/v2 v2.0.0
k8s.io/klog/v2
# k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6
k8s.io/kube-openapi/pkg/util/proto
# k8s.io/utils v0.0.0-20200414100711-2df71ebbae66
## explicit
k8s.io/utils/buffer
k8s.io/utils/integer
k8s.io/utils/trace
@ -795,3 +880,4 @@ k8s.io/utils/trace
sigs.k8s.io/structured-merge-diff/v3/value
# sigs.k8s.io/yaml v1.2.0
sigs.k8s.io/yaml
# k8s.io/klog => github.com/simonpasquier/klog-gokit v0.1.0

View file

@ -34,7 +34,6 @@ import (
"sort"
"strings"
"sync"
"sync/atomic"
template_text "text/template"
"time"
@ -51,9 +50,8 @@ import (
"github.com/prometheus/common/model"
"github.com/prometheus/common/route"
"github.com/prometheus/common/server"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/soheilhy/cmux"
"go.uber.org/atomic"
"golang.org/x/net/netutil"
"google.golang.org/grpc"
@ -64,6 +62,8 @@ import (
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/template"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/util/httputil"
api_v1 "github.com/prometheus/prometheus/web/api/v1"
api_v2 "github.com/prometheus/prometheus/web/api/v2"
@ -202,7 +202,7 @@ type Handler struct {
mtx sync.RWMutex
now func() model.Time
ready uint32 // ready is uint32 rather than boolean to be able to use atomic functions.
ready atomic.Uint32 // ready is uint32 rather than boolean to be able to use atomic functions.
}
// ApplyConfig updates the config field of the Handler struct
@ -293,9 +293,8 @@ func New(logger log.Logger, o *Options) *Handler {
notifier: o.Notifier,
now: model.Now,
ready: 0,
}
h.ready.Store(0)
factoryTr := func(_ context.Context) api_v1.TargetRetriever { return h.scrapeManager }
factoryAr := func(_ context.Context) api_v1.AlertmanagerRetriever { return h.notifier }
@ -484,13 +483,12 @@ func serveDebug(w http.ResponseWriter, req *http.Request) {
// Ready sets Handler to be ready.
func (h *Handler) Ready() {
atomic.StoreUint32(&h.ready, 1)
h.ready.Store(1)
}
// Verifies whether the server is ready or not.
func (h *Handler) isReady() bool {
ready := atomic.LoadUint32(&h.ready)
return ready > 0
return h.ready.Load() > 0
}
// Checks if server is ready, calls f if it is, returns 503 if it is not.