mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-27 06:29:42 -08:00
Merge pull request #3766 from krasi-georgiev/renaming-consistency
rename structs for consistency
This commit is contained in:
commit
b30ee3e69a
|
@ -238,7 +238,7 @@ func main() {
|
||||||
ctxWeb, cancelWeb = context.WithCancel(context.Background())
|
ctxWeb, cancelWeb = context.WithCancel(context.Background())
|
||||||
ctxRule = context.Background()
|
ctxRule = context.Background()
|
||||||
|
|
||||||
notifier = notifier.New(&cfg.notifier, log.With(logger, "component", "notifier"))
|
notifier = notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier"))
|
||||||
|
|
||||||
ctxScrape, cancelScrape = context.WithCancel(context.Background())
|
ctxScrape, cancelScrape = context.WithCancel(context.Background())
|
||||||
discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"))
|
discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"))
|
||||||
|
@ -246,7 +246,7 @@ func main() {
|
||||||
ctxNotify, cancelNotify = context.WithCancel(context.Background())
|
ctxNotify, cancelNotify = context.WithCancel(context.Background())
|
||||||
discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"))
|
discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"))
|
||||||
|
|
||||||
scrapeManager = retrieval.NewScrapeManager(log.With(logger, "component", "scrape manager"), fanoutStorage)
|
scrapeManager = retrieval.NewManager(log.With(logger, "component", "scrape manager"), fanoutStorage)
|
||||||
queryEngine = promql.NewEngine(fanoutStorage, &cfg.queryEngine)
|
queryEngine = promql.NewEngine(fanoutStorage, &cfg.queryEngine)
|
||||||
ruleManager = rules.NewManager(&rules.ManagerOptions{
|
ruleManager = rules.NewManager(&rules.ManagerOptions{
|
||||||
Appendable: fanoutStorage,
|
Appendable: fanoutStorage,
|
||||||
|
@ -654,7 +654,7 @@ func computeExternalURL(u, listenAddr string) (*url.URL, error) {
|
||||||
|
|
||||||
// sendAlerts implements a the rules.NotifyFunc for a Notifier.
|
// sendAlerts implements a the rules.NotifyFunc for a Notifier.
|
||||||
// It filters any non-firing alerts from the input.
|
// It filters any non-firing alerts from the input.
|
||||||
func sendAlerts(n *notifier.Notifier, externalURL string) rules.NotifyFunc {
|
func sendAlerts(n *notifier.Manager, externalURL string) rules.NotifyFunc {
|
||||||
return func(ctx context.Context, expr string, alerts ...*rules.Alert) error {
|
return func(ctx context.Context, expr string, alerts ...*rules.Alert) error {
|
||||||
var res []*notifier.Alert
|
var res []*notifier.Alert
|
||||||
|
|
||||||
|
|
|
@ -101,9 +101,9 @@ func (a *Alert) ResolvedAt(ts time.Time) bool {
|
||||||
return !a.EndsAt.After(ts)
|
return !a.EndsAt.After(ts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Notifier is responsible for dispatching alert notifications to an
|
// Manager is responsible for dispatching alert notifications to an
|
||||||
// alert manager service.
|
// alert manager service.
|
||||||
type Notifier struct {
|
type Manager struct {
|
||||||
queue []*Alert
|
queue []*Alert
|
||||||
opts *Options
|
opts *Options
|
||||||
|
|
||||||
|
@ -206,8 +206,8 @@ func newAlertMetrics(r prometheus.Registerer, queueCap int, queueLen, alertmanag
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
// New constructs a new Notifier.
|
// NewManager is the manager constructor.
|
||||||
func New(o *Options, logger log.Logger) *Notifier {
|
func NewManager(o *Options, logger log.Logger) *Manager {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
if o.Do == nil {
|
if o.Do == nil {
|
||||||
|
@ -217,7 +217,7 @@ func New(o *Options, logger log.Logger) *Notifier {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
n := &Notifier{
|
n := &Manager{
|
||||||
queue: make([]*Alert, 0, o.QueueCapacity),
|
queue: make([]*Alert, 0, o.QueueCapacity),
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
|
@ -240,7 +240,7 @@ func New(o *Options, logger log.Logger) *Notifier {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ApplyConfig updates the status state as the new config requires.
|
// ApplyConfig updates the status state as the new config requires.
|
||||||
func (n *Notifier) ApplyConfig(conf *config.Config) error {
|
func (n *Manager) ApplyConfig(conf *config.Config) error {
|
||||||
n.mtx.Lock()
|
n.mtx.Lock()
|
||||||
defer n.mtx.Unlock()
|
defer n.mtx.Unlock()
|
||||||
|
|
||||||
|
@ -272,14 +272,14 @@ func (n *Notifier) ApplyConfig(conf *config.Config) error {
|
||||||
|
|
||||||
const maxBatchSize = 64
|
const maxBatchSize = 64
|
||||||
|
|
||||||
func (n *Notifier) queueLen() int {
|
func (n *Manager) queueLen() int {
|
||||||
n.mtx.RLock()
|
n.mtx.RLock()
|
||||||
defer n.mtx.RUnlock()
|
defer n.mtx.RUnlock()
|
||||||
|
|
||||||
return len(n.queue)
|
return len(n.queue)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Notifier) nextBatch() []*Alert {
|
func (n *Manager) nextBatch() []*Alert {
|
||||||
n.mtx.Lock()
|
n.mtx.Lock()
|
||||||
defer n.mtx.Unlock()
|
defer n.mtx.Unlock()
|
||||||
|
|
||||||
|
@ -297,7 +297,7 @@ func (n *Notifier) nextBatch() []*Alert {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run dispatches notifications continuously.
|
// Run dispatches notifications continuously.
|
||||||
func (n *Notifier) Run(tsets <-chan map[string][]*targetgroup.Group) {
|
func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) {
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
@ -319,7 +319,7 @@ func (n *Notifier) Run(tsets <-chan map[string][]*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Notifier) reload(tgs map[string][]*targetgroup.Group) {
|
func (n *Manager) reload(tgs map[string][]*targetgroup.Group) {
|
||||||
n.mtx.Lock()
|
n.mtx.Lock()
|
||||||
defer n.mtx.Unlock()
|
defer n.mtx.Unlock()
|
||||||
|
|
||||||
|
@ -335,7 +335,7 @@ func (n *Notifier) reload(tgs map[string][]*targetgroup.Group) {
|
||||||
|
|
||||||
// Send queues the given notification requests for processing.
|
// Send queues the given notification requests for processing.
|
||||||
// Panics if called on a handler that is not running.
|
// Panics if called on a handler that is not running.
|
||||||
func (n *Notifier) Send(alerts ...*Alert) {
|
func (n *Manager) Send(alerts ...*Alert) {
|
||||||
n.mtx.Lock()
|
n.mtx.Lock()
|
||||||
defer n.mtx.Unlock()
|
defer n.mtx.Unlock()
|
||||||
|
|
||||||
|
@ -377,7 +377,7 @@ func (n *Notifier) Send(alerts ...*Alert) {
|
||||||
n.setMore()
|
n.setMore()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Notifier) relabelAlerts(alerts []*Alert) []*Alert {
|
func (n *Manager) relabelAlerts(alerts []*Alert) []*Alert {
|
||||||
var relabeledAlerts []*Alert
|
var relabeledAlerts []*Alert
|
||||||
|
|
||||||
for _, alert := range alerts {
|
for _, alert := range alerts {
|
||||||
|
@ -391,7 +391,7 @@ func (n *Notifier) relabelAlerts(alerts []*Alert) []*Alert {
|
||||||
}
|
}
|
||||||
|
|
||||||
// setMore signals that the alert queue has items.
|
// setMore signals that the alert queue has items.
|
||||||
func (n *Notifier) setMore() {
|
func (n *Manager) setMore() {
|
||||||
// If we cannot send on the channel, it means the signal already exists
|
// If we cannot send on the channel, it means the signal already exists
|
||||||
// and has not been consumed yet.
|
// and has not been consumed yet.
|
||||||
select {
|
select {
|
||||||
|
@ -401,7 +401,7 @@ func (n *Notifier) setMore() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Alertmanagers returns a slice of Alertmanager URLs.
|
// Alertmanagers returns a slice of Alertmanager URLs.
|
||||||
func (n *Notifier) Alertmanagers() []*url.URL {
|
func (n *Manager) Alertmanagers() []*url.URL {
|
||||||
n.mtx.RLock()
|
n.mtx.RLock()
|
||||||
amSets := n.alertmanagers
|
amSets := n.alertmanagers
|
||||||
n.mtx.RUnlock()
|
n.mtx.RUnlock()
|
||||||
|
@ -421,7 +421,7 @@ func (n *Notifier) Alertmanagers() []*url.URL {
|
||||||
|
|
||||||
// sendAll sends the alerts to all configured Alertmanagers concurrently.
|
// sendAll sends the alerts to all configured Alertmanagers concurrently.
|
||||||
// It returns true if the alerts could be sent successfully to at least one Alertmanager.
|
// It returns true if the alerts could be sent successfully to at least one Alertmanager.
|
||||||
func (n *Notifier) sendAll(alerts ...*Alert) bool {
|
func (n *Manager) sendAll(alerts ...*Alert) bool {
|
||||||
begin := time.Now()
|
begin := time.Now()
|
||||||
|
|
||||||
b, err := json.Marshal(alerts)
|
b, err := json.Marshal(alerts)
|
||||||
|
@ -469,7 +469,7 @@ func (n *Notifier) sendAll(alerts ...*Alert) bool {
|
||||||
return numSuccess > 0
|
return numSuccess > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Notifier) sendOne(ctx context.Context, c *http.Client, url string, b []byte) error {
|
func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []byte) error {
|
||||||
req, err := http.NewRequest("POST", url, bytes.NewReader(b))
|
req, err := http.NewRequest("POST", url, bytes.NewReader(b))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -489,7 +489,7 @@ func (n *Notifier) sendOne(ctx context.Context, c *http.Client, url string, b []
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop shuts down the notification handler.
|
// Stop shuts down the notification handler.
|
||||||
func (n *Notifier) Stop() {
|
func (n *Manager) Stop() {
|
||||||
level.Info(n.logger).Log("msg", "Stopping notification manager...")
|
level.Info(n.logger).Log("msg", "Stopping notification manager...")
|
||||||
n.cancel()
|
n.cancel()
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,7 +71,7 @@ func TestPostPath(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandlerNextBatch(t *testing.T) {
|
func TestHandlerNextBatch(t *testing.T) {
|
||||||
h := New(&Options{}, nil)
|
h := NewManager(&Options{}, nil)
|
||||||
|
|
||||||
for i := range make([]struct{}, 2*maxBatchSize+1) {
|
for i := range make([]struct{}, 2*maxBatchSize+1) {
|
||||||
h.queue = append(h.queue, &Alert{
|
h.queue = append(h.queue, &Alert{
|
||||||
|
@ -168,7 +168,7 @@ func TestHandlerSendAll(t *testing.T) {
|
||||||
defer server1.Close()
|
defer server1.Close()
|
||||||
defer server2.Close()
|
defer server2.Close()
|
||||||
|
|
||||||
h := New(&Options{}, nil)
|
h := NewManager(&Options{}, nil)
|
||||||
|
|
||||||
authClient, _ := httputil.NewClientFromConfig(config_util.HTTPClientConfig{
|
authClient, _ := httputil.NewClientFromConfig(config_util.HTTPClientConfig{
|
||||||
BasicAuth: &config_util.BasicAuth{
|
BasicAuth: &config_util.BasicAuth{
|
||||||
|
@ -233,7 +233,7 @@ func TestCustomDo(t *testing.T) {
|
||||||
const testBody = "testbody"
|
const testBody = "testbody"
|
||||||
|
|
||||||
var received bool
|
var received bool
|
||||||
h := New(&Options{
|
h := NewManager(&Options{
|
||||||
Do: func(ctx old_ctx.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
Do: func(ctx old_ctx.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
||||||
received = true
|
received = true
|
||||||
body, err := ioutil.ReadAll(req.Body)
|
body, err := ioutil.ReadAll(req.Body)
|
||||||
|
@ -260,7 +260,7 @@ func TestCustomDo(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestExternalLabels(t *testing.T) {
|
func TestExternalLabels(t *testing.T) {
|
||||||
h := New(&Options{
|
h := NewManager(&Options{
|
||||||
QueueCapacity: 3 * maxBatchSize,
|
QueueCapacity: 3 * maxBatchSize,
|
||||||
ExternalLabels: model.LabelSet{"a": "b"},
|
ExternalLabels: model.LabelSet{"a": "b"},
|
||||||
RelabelConfigs: []*config.RelabelConfig{
|
RelabelConfigs: []*config.RelabelConfig{
|
||||||
|
@ -296,7 +296,7 @@ func TestExternalLabels(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandlerRelabel(t *testing.T) {
|
func TestHandlerRelabel(t *testing.T) {
|
||||||
h := New(&Options{
|
h := NewManager(&Options{
|
||||||
QueueCapacity: 3 * maxBatchSize,
|
QueueCapacity: 3 * maxBatchSize,
|
||||||
RelabelConfigs: []*config.RelabelConfig{
|
RelabelConfigs: []*config.RelabelConfig{
|
||||||
{
|
{
|
||||||
|
@ -356,7 +356,7 @@ func TestHandlerQueueing(t *testing.T) {
|
||||||
}
|
}
|
||||||
}))
|
}))
|
||||||
|
|
||||||
h := New(&Options{
|
h := NewManager(&Options{
|
||||||
QueueCapacity: 3 * maxBatchSize,
|
QueueCapacity: 3 * maxBatchSize,
|
||||||
},
|
},
|
||||||
nil,
|
nil,
|
||||||
|
@ -469,7 +469,7 @@ func TestReload(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
n := New(&Options{}, nil)
|
n := NewManager(&Options{}, nil)
|
||||||
|
|
||||||
cfg := &config.Config{}
|
cfg := &config.Config{}
|
||||||
s := `
|
s := `
|
||||||
|
|
|
@ -31,8 +31,8 @@ type Appendable interface {
|
||||||
Appender() (storage.Appender, error)
|
Appender() (storage.Appender, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewScrapeManager is the ScrapeManager constructor
|
// NewManager is the ScrapeManager constructor
|
||||||
func NewScrapeManager(logger log.Logger, app Appendable) *ScrapeManager {
|
func NewManager(logger log.Logger, app Appendable) *ScrapeManager {
|
||||||
|
|
||||||
return &ScrapeManager{
|
return &ScrapeManager{
|
||||||
append: app,
|
append: app,
|
||||||
|
|
|
@ -246,7 +246,7 @@ func TestManagerReloadNoChange(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
scrapeManager := NewScrapeManager(nil, nil)
|
scrapeManager := NewManager(nil, nil)
|
||||||
scrapeManager.scrapeConfigs[tsetName] = reloadCfg.ScrapeConfigs[0]
|
scrapeManager.scrapeConfigs[tsetName] = reloadCfg.ScrapeConfigs[0]
|
||||||
// As reload never happens, new loop should never be called.
|
// As reload never happens, new loop should never be called.
|
||||||
newLoop := func(_ *Target, s scraper) loop {
|
newLoop := func(_ *Target, s scraper) loop {
|
||||||
|
|
|
@ -77,7 +77,7 @@ type Handler struct {
|
||||||
context context.Context
|
context context.Context
|
||||||
tsdb func() *tsdb.DB
|
tsdb func() *tsdb.DB
|
||||||
storage storage.Storage
|
storage storage.Storage
|
||||||
notifier *notifier.Notifier
|
notifier *notifier.Manager
|
||||||
|
|
||||||
apiV1 *api_v1.API
|
apiV1 *api_v1.API
|
||||||
|
|
||||||
|
@ -127,7 +127,7 @@ type Options struct {
|
||||||
QueryEngine *promql.Engine
|
QueryEngine *promql.Engine
|
||||||
ScrapeManager *retrieval.ScrapeManager
|
ScrapeManager *retrieval.ScrapeManager
|
||||||
RuleManager *rules.Manager
|
RuleManager *rules.Manager
|
||||||
Notifier *notifier.Notifier
|
Notifier *notifier.Manager
|
||||||
Version *PrometheusVersion
|
Version *PrometheusVersion
|
||||||
Flags map[string]string
|
Flags map[string]string
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue