mirror of
https://github.com/prometheus/prometheus.git
synced 2025-01-11 22:07:27 -08:00
change time.duration to model.duration for standardization (#4479)
Signed-off-by: Daisy T <daisyts@gmx.com>
This commit is contained in:
parent
3581377e5d
commit
7d01ead689
|
@ -89,7 +89,7 @@ var (
|
|||
// DefaultAlertmanagerConfig is the default alertmanager configuration.
|
||||
DefaultAlertmanagerConfig = AlertmanagerConfig{
|
||||
Scheme: "http",
|
||||
Timeout: 10 * time.Second,
|
||||
Timeout: model.Duration(10 * time.Second),
|
||||
}
|
||||
|
||||
// DefaultRelabelConfig is the default Relabel configuration.
|
||||
|
@ -116,12 +116,12 @@ var (
|
|||
// By default, buffer 100 batches, which at 100ms per batch is 10s. At
|
||||
// 1000 shards, this will buffer 10M samples total.
|
||||
Capacity: 100 * 100,
|
||||
BatchSendDeadline: 5 * time.Second,
|
||||
BatchSendDeadline: model.Duration(5 * time.Second),
|
||||
|
||||
// Max number of times to retry a batch on recoverable errors.
|
||||
MaxRetries: 3,
|
||||
MinBackoff: 30 * time.Millisecond,
|
||||
MaxBackoff: 100 * time.Millisecond,
|
||||
MinBackoff: model.Duration(30 * time.Millisecond),
|
||||
MaxBackoff: model.Duration(100 * time.Millisecond),
|
||||
}
|
||||
|
||||
// DefaultRemoteReadConfig is the default remote read configuration.
|
||||
|
@ -408,7 +408,7 @@ type AlertmanagerConfig struct {
|
|||
// Path prefix to add in front of the push endpoint path.
|
||||
PathPrefix string `yaml:"path_prefix,omitempty"`
|
||||
// The timeout used when sending alerts.
|
||||
Timeout time.Duration `yaml:"timeout,omitempty"`
|
||||
Timeout model.Duration `yaml:"timeout,omitempty"`
|
||||
|
||||
// List of Alertmanager relabel configurations.
|
||||
RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"`
|
||||
|
@ -652,14 +652,14 @@ type QueueConfig struct {
|
|||
MaxSamplesPerSend int `yaml:"max_samples_per_send,omitempty"`
|
||||
|
||||
// Maximum time sample will wait in buffer.
|
||||
BatchSendDeadline time.Duration `yaml:"batch_send_deadline,omitempty"`
|
||||
BatchSendDeadline model.Duration `yaml:"batch_send_deadline,omitempty"`
|
||||
|
||||
// Max number of times to retry a batch on recoverable errors.
|
||||
MaxRetries int `yaml:"max_retries,omitempty"`
|
||||
|
||||
// On recoverable errors, backoff exponentially.
|
||||
MinBackoff time.Duration `yaml:"min_backoff,omitempty"`
|
||||
MaxBackoff time.Duration `yaml:"max_backoff,omitempty"`
|
||||
MinBackoff model.Duration `yaml:"min_backoff,omitempty"`
|
||||
MaxBackoff model.Duration `yaml:"max_backoff,omitempty"`
|
||||
}
|
||||
|
||||
// RemoteReadConfig is the configuration for reading from remote storage.
|
||||
|
|
|
@ -542,7 +542,7 @@ var expectedConf = &Config{
|
|||
AlertmanagerConfigs: []*AlertmanagerConfig{
|
||||
{
|
||||
Scheme: "https",
|
||||
Timeout: 10 * time.Second,
|
||||
Timeout: model.Duration(10 * time.Second),
|
||||
ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{
|
||||
StaticConfigs: []*targetgroup.Group{
|
||||
{
|
||||
|
|
|
@ -463,7 +463,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
|
|||
for _, am := range ams.ams {
|
||||
wg.Add(1)
|
||||
|
||||
ctx, cancel := context.WithTimeout(n.ctx, ams.cfg.Timeout)
|
||||
ctx, cancel := context.WithTimeout(n.ctx, time.Duration(ams.cfg.Timeout))
|
||||
defer cancel()
|
||||
|
||||
go func(ams *alertmanagerSet, am alertmanager) {
|
||||
|
|
|
@ -170,7 +170,7 @@ func TestHandlerSendAll(t *testing.T) {
|
|||
},
|
||||
},
|
||||
cfg: &config.AlertmanagerConfig{
|
||||
Timeout: time.Second,
|
||||
Timeout: model.Duration(time.Second),
|
||||
},
|
||||
client: authClient,
|
||||
}
|
||||
|
@ -182,7 +182,7 @@ func TestHandlerSendAll(t *testing.T) {
|
|||
},
|
||||
},
|
||||
cfg: &config.AlertmanagerConfig{
|
||||
Timeout: time.Second,
|
||||
Timeout: model.Duration(time.Second),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -337,7 +337,7 @@ func TestHandlerQueueing(t *testing.T) {
|
|||
},
|
||||
},
|
||||
cfg: &config.AlertmanagerConfig{
|
||||
Timeout: time.Second,
|
||||
Timeout: model.Duration(time.Second),
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -456,7 +456,7 @@ func (s *shards) runShard(i int) {
|
|||
// anyways.
|
||||
pendingSamples := model.Samples{}
|
||||
|
||||
timer := time.NewTimer(s.qm.cfg.BatchSendDeadline)
|
||||
timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline))
|
||||
stop := func() {
|
||||
if !timer.Stop() {
|
||||
select {
|
||||
|
@ -490,7 +490,7 @@ func (s *shards) runShard(i int) {
|
|||
pendingSamples = pendingSamples[s.qm.cfg.MaxSamplesPerSend:]
|
||||
|
||||
stop()
|
||||
timer.Reset(s.qm.cfg.BatchSendDeadline)
|
||||
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
|
||||
}
|
||||
|
||||
case <-timer.C:
|
||||
|
@ -498,7 +498,7 @@ func (s *shards) runShard(i int) {
|
|||
s.sendSamples(pendingSamples)
|
||||
pendingSamples = pendingSamples[:0]
|
||||
}
|
||||
timer.Reset(s.qm.cfg.BatchSendDeadline)
|
||||
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -532,7 +532,7 @@ func (s *shards) sendSamplesWithBackoff(samples model.Samples) {
|
|||
if _, ok := err.(recoverableError); !ok {
|
||||
break
|
||||
}
|
||||
time.Sleep(backoff)
|
||||
time.Sleep(time.Duration(backoff))
|
||||
backoff = backoff * 2
|
||||
if backoff > s.qm.cfg.MaxBackoff {
|
||||
backoff = s.qm.cfg.MaxBackoff
|
||||
|
|
|
@ -147,7 +147,7 @@ func TestSampleDeliveryTimeout(t *testing.T) {
|
|||
|
||||
cfg := config.DefaultQueueConfig
|
||||
cfg.MaxShards = 1
|
||||
cfg.BatchSendDeadline = 100 * time.Millisecond
|
||||
cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond)
|
||||
m := NewQueueManager(nil, cfg, nil, nil, c, defaultFlushDeadline)
|
||||
m.Start()
|
||||
defer m.Stop()
|
||||
|
|
Loading…
Reference in a new issue