mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge pull request #2423 from prometheus/multiple-remote-writers
Re-add multiple remote writers
This commit is contained in:
commit
16bd5c8ebe
|
@ -204,7 +204,7 @@ type Config struct {
|
||||||
RuleFiles []string `yaml:"rule_files,omitempty"`
|
RuleFiles []string `yaml:"rule_files,omitempty"`
|
||||||
ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"`
|
ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"`
|
||||||
|
|
||||||
RemoteWriteConfig RemoteWriteConfig `yaml:"remote_write,omitempty"`
|
RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"`
|
||||||
|
|
||||||
// Catches all undefined fields and must be empty after parsing.
|
// Catches all undefined fields and must be empty after parsing.
|
||||||
XXX map[string]interface{} `yaml:",inline"`
|
XXX map[string]interface{} `yaml:",inline"`
|
||||||
|
|
|
@ -26,6 +26,14 @@ import (
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func mustParseURL(u string) *URL {
|
||||||
|
parsed, err := url.Parse(u)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return &URL{URL: parsed}
|
||||||
|
}
|
||||||
|
|
||||||
var expectedConf = &Config{
|
var expectedConf = &Config{
|
||||||
GlobalConfig: GlobalConfig{
|
GlobalConfig: GlobalConfig{
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
|
@ -44,17 +52,24 @@ var expectedConf = &Config{
|
||||||
"testdata/my/*.rules",
|
"testdata/my/*.rules",
|
||||||
},
|
},
|
||||||
|
|
||||||
RemoteWriteConfig: RemoteWriteConfig{
|
RemoteWriteConfigs: []*RemoteWriteConfig{
|
||||||
RemoteTimeout: model.Duration(30 * time.Second),
|
{
|
||||||
WriteRelabelConfigs: []*RelabelConfig{
|
URL: mustParseURL("http://remote1/push"),
|
||||||
{
|
RemoteTimeout: model.Duration(30 * time.Second),
|
||||||
SourceLabels: model.LabelNames{"__name__"},
|
WriteRelabelConfigs: []*RelabelConfig{
|
||||||
Separator: ";",
|
{
|
||||||
Regex: MustNewRegexp("expensive.*"),
|
SourceLabels: model.LabelNames{"__name__"},
|
||||||
Replacement: "$1",
|
Separator: ";",
|
||||||
Action: RelabelDrop,
|
Regex: MustNewRegexp("expensive.*"),
|
||||||
|
Replacement: "$1",
|
||||||
|
Action: RelabelDrop,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
URL: mustParseURL("http://remote2/push"),
|
||||||
|
RemoteTimeout: model.Duration(30 * time.Second),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
ScrapeConfigs: []*ScrapeConfig{
|
ScrapeConfigs: []*ScrapeConfig{
|
||||||
|
|
10
config/testdata/conf.good.yml
vendored
10
config/testdata/conf.good.yml
vendored
|
@ -14,10 +14,12 @@ rule_files:
|
||||||
- "my/*.rules"
|
- "my/*.rules"
|
||||||
|
|
||||||
remote_write:
|
remote_write:
|
||||||
write_relabel_configs:
|
- url: http://remote1/push
|
||||||
- source_labels: [__name__]
|
write_relabel_configs:
|
||||||
regex: expensive.*
|
- source_labels: [__name__]
|
||||||
action: drop
|
regex: expensive.*
|
||||||
|
action: drop
|
||||||
|
- url: http://remote2/push
|
||||||
|
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: prometheus
|
- job_name: prometheus
|
||||||
|
|
|
@ -31,13 +31,14 @@ import (
|
||||||
|
|
||||||
// Client allows sending batches of Prometheus samples to an HTTP endpoint.
|
// Client allows sending batches of Prometheus samples to an HTTP endpoint.
|
||||||
type Client struct {
|
type Client struct {
|
||||||
|
index int // Used to differentiate metrics.
|
||||||
url config.URL
|
url config.URL
|
||||||
client *http.Client
|
client *http.Client
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient creates a new Client.
|
// NewClient creates a new Client.
|
||||||
func NewClient(conf config.RemoteWriteConfig) (*Client, error) {
|
func NewClient(index int, conf *config.RemoteWriteConfig) (*Client, error) {
|
||||||
tlsConfig, err := httputil.NewTLSConfig(conf.TLSConfig)
|
tlsConfig, err := httputil.NewTLSConfig(conf.TLSConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -55,6 +56,7 @@ func NewClient(conf config.RemoteWriteConfig) (*Client, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Client{
|
return &Client{
|
||||||
|
index: index,
|
||||||
url: *conf.URL,
|
url: *conf.URL,
|
||||||
client: httputil.NewClient(rt),
|
client: httputil.NewClient(rt),
|
||||||
timeout: time.Duration(conf.RemoteTimeout),
|
timeout: time.Duration(conf.RemoteTimeout),
|
||||||
|
@ -114,7 +116,7 @@ func (c *Client) Store(samples model.Samples) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name identifies the client as a generic client.
|
// Name identifies the client.
|
||||||
func (c Client) Name() string {
|
func (c Client) Name() string {
|
||||||
return "generic"
|
return fmt.Sprintf("%d:%s", c.index, c.url)
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,8 @@ import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
"github.com/prometheus/common/log"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/prometheus/config"
|
||||||
|
"github.com/prometheus/prometheus/relabel"
|
||||||
)
|
)
|
||||||
|
|
||||||
// String constants for instrumentation.
|
// String constants for instrumentation.
|
||||||
|
@ -27,6 +29,12 @@ const (
|
||||||
namespace = "prometheus"
|
namespace = "prometheus"
|
||||||
subsystem = "remote_storage"
|
subsystem = "remote_storage"
|
||||||
queue = "queue"
|
queue = "queue"
|
||||||
|
|
||||||
|
defaultShards = 10
|
||||||
|
defaultMaxSamplesPerSend = 100
|
||||||
|
// The queue capacity is per shard.
|
||||||
|
defaultQueueCapacity = 100 * 1024 / defaultShards
|
||||||
|
defaultBatchSendDeadline = 5 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -105,35 +113,40 @@ type StorageClient interface {
|
||||||
Name() string
|
Name() string
|
||||||
}
|
}
|
||||||
|
|
||||||
type StorageQueueManagerConfig struct {
|
// QueueManagerConfig configures a storage queue.
|
||||||
|
type QueueManagerConfig struct {
|
||||||
QueueCapacity int // Number of samples to buffer per shard before we start dropping them.
|
QueueCapacity int // Number of samples to buffer per shard before we start dropping them.
|
||||||
Shards int // Number of shards, i.e. amount of concurrency.
|
Shards int // Number of shards, i.e. amount of concurrency.
|
||||||
MaxSamplesPerSend int // Maximum number of samples per send.
|
MaxSamplesPerSend int // Maximum number of samples per send.
|
||||||
BatchSendDeadline time.Duration // Maximum time sample will wait in buffer.
|
BatchSendDeadline time.Duration // Maximum time sample will wait in buffer.
|
||||||
|
ExternalLabels model.LabelSet
|
||||||
|
RelabelConfigs []*config.RelabelConfig
|
||||||
|
Client StorageClient
|
||||||
}
|
}
|
||||||
|
|
||||||
var defaultConfig = StorageQueueManagerConfig{
|
// QueueManager manages a queue of samples to be sent to the Storage
|
||||||
QueueCapacity: 100 * 1024 / 10,
|
|
||||||
Shards: 10,
|
|
||||||
MaxSamplesPerSend: 100,
|
|
||||||
BatchSendDeadline: 5 * time.Second,
|
|
||||||
}
|
|
||||||
|
|
||||||
// StorageQueueManager manages a queue of samples to be sent to the Storage
|
|
||||||
// indicated by the provided StorageClient.
|
// indicated by the provided StorageClient.
|
||||||
type StorageQueueManager struct {
|
type QueueManager struct {
|
||||||
cfg StorageQueueManagerConfig
|
cfg QueueManagerConfig
|
||||||
tsdb StorageClient
|
|
||||||
shards []chan *model.Sample
|
shards []chan *model.Sample
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
queueName string
|
queueName string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStorageQueueManager builds a new StorageQueueManager.
|
// NewQueueManager builds a new QueueManager.
|
||||||
func NewStorageQueueManager(tsdb StorageClient, cfg *StorageQueueManagerConfig) *StorageQueueManager {
|
func NewQueueManager(cfg QueueManagerConfig) *QueueManager {
|
||||||
if cfg == nil {
|
if cfg.QueueCapacity == 0 {
|
||||||
cfg = &defaultConfig
|
cfg.QueueCapacity = defaultQueueCapacity
|
||||||
|
}
|
||||||
|
if cfg.Shards == 0 {
|
||||||
|
cfg.Shards = defaultShards
|
||||||
|
}
|
||||||
|
if cfg.MaxSamplesPerSend == 0 {
|
||||||
|
cfg.MaxSamplesPerSend = defaultMaxSamplesPerSend
|
||||||
|
}
|
||||||
|
if cfg.BatchSendDeadline == 0 {
|
||||||
|
cfg.BatchSendDeadline = defaultBatchSendDeadline
|
||||||
}
|
}
|
||||||
|
|
||||||
shards := make([]chan *model.Sample, cfg.Shards)
|
shards := make([]chan *model.Sample, cfg.Shards)
|
||||||
|
@ -141,12 +154,11 @@ func NewStorageQueueManager(tsdb StorageClient, cfg *StorageQueueManagerConfig)
|
||||||
shards[i] = make(chan *model.Sample, cfg.QueueCapacity)
|
shards[i] = make(chan *model.Sample, cfg.QueueCapacity)
|
||||||
}
|
}
|
||||||
|
|
||||||
t := &StorageQueueManager{
|
t := &QueueManager{
|
||||||
cfg: *cfg,
|
cfg: cfg,
|
||||||
tsdb: tsdb,
|
|
||||||
shards: shards,
|
shards: shards,
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
queueName: tsdb.Name(),
|
queueName: cfg.Client.Name(),
|
||||||
}
|
}
|
||||||
|
|
||||||
queueCapacity.WithLabelValues(t.queueName).Set(float64(t.cfg.QueueCapacity))
|
queueCapacity.WithLabelValues(t.queueName).Set(float64(t.cfg.QueueCapacity))
|
||||||
|
@ -157,12 +169,29 @@ func NewStorageQueueManager(tsdb StorageClient, cfg *StorageQueueManagerConfig)
|
||||||
// Append queues a sample to be sent to the remote storage. It drops the
|
// Append queues a sample to be sent to the remote storage. It drops the
|
||||||
// sample on the floor if the queue is full.
|
// sample on the floor if the queue is full.
|
||||||
// Always returns nil.
|
// Always returns nil.
|
||||||
func (t *StorageQueueManager) Append(s *model.Sample) error {
|
func (t *QueueManager) Append(s *model.Sample) error {
|
||||||
fp := s.Metric.FastFingerprint()
|
var snew model.Sample
|
||||||
|
snew = *s
|
||||||
|
snew.Metric = s.Metric.Clone()
|
||||||
|
|
||||||
|
for ln, lv := range t.cfg.ExternalLabels {
|
||||||
|
if _, ok := s.Metric[ln]; !ok {
|
||||||
|
snew.Metric[ln] = lv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
snew.Metric = model.Metric(
|
||||||
|
relabel.Process(model.LabelSet(snew.Metric), t.cfg.RelabelConfigs...))
|
||||||
|
|
||||||
|
if snew.Metric == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fp := snew.Metric.FastFingerprint()
|
||||||
shard := uint64(fp) % uint64(t.cfg.Shards)
|
shard := uint64(fp) % uint64(t.cfg.Shards)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case t.shards[shard] <- s:
|
case t.shards[shard] <- &snew:
|
||||||
queueLength.WithLabelValues(t.queueName).Inc()
|
queueLength.WithLabelValues(t.queueName).Inc()
|
||||||
default:
|
default:
|
||||||
droppedSamplesTotal.WithLabelValues(t.queueName).Inc()
|
droppedSamplesTotal.WithLabelValues(t.queueName).Inc()
|
||||||
|
@ -174,13 +203,13 @@ func (t *StorageQueueManager) Append(s *model.Sample) error {
|
||||||
// NeedsThrottling implements storage.SampleAppender. It will always return
|
// NeedsThrottling implements storage.SampleAppender. It will always return
|
||||||
// false as a remote storage drops samples on the floor if backlogging instead
|
// false as a remote storage drops samples on the floor if backlogging instead
|
||||||
// of asking for throttling.
|
// of asking for throttling.
|
||||||
func (*StorageQueueManager) NeedsThrottling() bool {
|
func (*QueueManager) NeedsThrottling() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the queue manager sending samples to the remote storage.
|
// Start the queue manager sending samples to the remote storage.
|
||||||
// Does not block.
|
// Does not block.
|
||||||
func (t *StorageQueueManager) Start() {
|
func (t *QueueManager) Start() {
|
||||||
for i := 0; i < t.cfg.Shards; i++ {
|
for i := 0; i < t.cfg.Shards; i++ {
|
||||||
go t.runShard(i)
|
go t.runShard(i)
|
||||||
}
|
}
|
||||||
|
@ -188,7 +217,7 @@ func (t *StorageQueueManager) Start() {
|
||||||
|
|
||||||
// Stop stops sending samples to the remote storage and waits for pending
|
// Stop stops sending samples to the remote storage and waits for pending
|
||||||
// sends to complete.
|
// sends to complete.
|
||||||
func (t *StorageQueueManager) Stop() {
|
func (t *QueueManager) Stop() {
|
||||||
log.Infof("Stopping remote storage...")
|
log.Infof("Stopping remote storage...")
|
||||||
for _, shard := range t.shards {
|
for _, shard := range t.shards {
|
||||||
close(shard)
|
close(shard)
|
||||||
|
@ -197,7 +226,7 @@ func (t *StorageQueueManager) Stop() {
|
||||||
log.Info("Remote storage stopped.")
|
log.Info("Remote storage stopped.")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *StorageQueueManager) runShard(i int) {
|
func (t *QueueManager) runShard(i int) {
|
||||||
defer t.wg.Done()
|
defer t.wg.Done()
|
||||||
shard := t.shards[i]
|
shard := t.shards[i]
|
||||||
|
|
||||||
|
@ -234,12 +263,12 @@ func (t *StorageQueueManager) runShard(i int) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *StorageQueueManager) sendSamples(s model.Samples) {
|
func (t *QueueManager) sendSamples(s model.Samples) {
|
||||||
// Samples are sent to the remote storage on a best-effort basis. If a
|
// Samples are sent to the remote storage on a best-effort basis. If a
|
||||||
// sample isn't sent correctly the first time, it's simply dropped on the
|
// sample isn't sent correctly the first time, it's simply dropped on the
|
||||||
// floor.
|
// floor.
|
||||||
begin := time.Now()
|
begin := time.Now()
|
||||||
err := t.tsdb.Store(s)
|
err := t.cfg.Client.Store(s)
|
||||||
duration := time.Since(begin).Seconds()
|
duration := time.Since(begin).Seconds()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -81,9 +81,7 @@ func (c *TestStorageClient) Name() string {
|
||||||
func TestSampleDelivery(t *testing.T) {
|
func TestSampleDelivery(t *testing.T) {
|
||||||
// Let's create an even number of send batches so we don't run into the
|
// Let's create an even number of send batches so we don't run into the
|
||||||
// batch timeout case.
|
// batch timeout case.
|
||||||
cfg := defaultConfig
|
n := defaultQueueCapacity * 2
|
||||||
n := cfg.QueueCapacity * 2
|
|
||||||
cfg.Shards = 1
|
|
||||||
|
|
||||||
samples := make(model.Samples, 0, n)
|
samples := make(model.Samples, 0, n)
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
|
@ -98,7 +96,11 @@ func TestSampleDelivery(t *testing.T) {
|
||||||
|
|
||||||
c := NewTestStorageClient()
|
c := NewTestStorageClient()
|
||||||
c.expectSamples(samples[:len(samples)/2])
|
c.expectSamples(samples[:len(samples)/2])
|
||||||
m := NewStorageQueueManager(c, &cfg)
|
|
||||||
|
m := NewQueueManager(QueueManagerConfig{
|
||||||
|
Client: c,
|
||||||
|
Shards: 1,
|
||||||
|
})
|
||||||
|
|
||||||
// These should be received by the client.
|
// These should be received by the client.
|
||||||
for _, s := range samples[:len(samples)/2] {
|
for _, s := range samples[:len(samples)/2] {
|
||||||
|
@ -115,11 +117,8 @@ func TestSampleDelivery(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSampleDeliveryOrder(t *testing.T) {
|
func TestSampleDeliveryOrder(t *testing.T) {
|
||||||
cfg := defaultConfig
|
|
||||||
ts := 10
|
ts := 10
|
||||||
n := cfg.MaxSamplesPerSend * ts
|
n := defaultMaxSamplesPerSend * ts
|
||||||
// Ensure we don't drop samples in this test.
|
|
||||||
cfg.QueueCapacity = n
|
|
||||||
|
|
||||||
samples := make(model.Samples, 0, n)
|
samples := make(model.Samples, 0, n)
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
|
@ -135,7 +134,11 @@ func TestSampleDeliveryOrder(t *testing.T) {
|
||||||
|
|
||||||
c := NewTestStorageClient()
|
c := NewTestStorageClient()
|
||||||
c.expectSamples(samples)
|
c.expectSamples(samples)
|
||||||
m := NewStorageQueueManager(c, &cfg)
|
m := NewQueueManager(QueueManagerConfig{
|
||||||
|
Client: c,
|
||||||
|
// Ensure we don't drop samples in this test.
|
||||||
|
QueueCapacity: n,
|
||||||
|
})
|
||||||
|
|
||||||
// These should be received by the client.
|
// These should be received by the client.
|
||||||
for _, s := range samples {
|
for _, s := range samples {
|
||||||
|
@ -181,7 +184,7 @@ func (c *TestBlockingStorageClient) Name() string {
|
||||||
return "testblockingstorageclient"
|
return "testblockingstorageclient"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *StorageQueueManager) queueLen() int {
|
func (t *QueueManager) queueLen() int {
|
||||||
queueLength := 0
|
queueLength := 0
|
||||||
for _, shard := range t.shards {
|
for _, shard := range t.shards {
|
||||||
queueLength += len(shard)
|
queueLength += len(shard)
|
||||||
|
@ -194,9 +197,7 @@ func TestSpawnNotMoreThanMaxConcurrentSendsGoroutines(t *testing.T) {
|
||||||
// `MaxSamplesPerSend*Shards` samples should be consumed by the
|
// `MaxSamplesPerSend*Shards` samples should be consumed by the
|
||||||
// per-shard goroutines, and then another `MaxSamplesPerSend`
|
// per-shard goroutines, and then another `MaxSamplesPerSend`
|
||||||
// should be left on the queue.
|
// should be left on the queue.
|
||||||
cfg := defaultConfig
|
n := defaultMaxSamplesPerSend*defaultShards + defaultMaxSamplesPerSend
|
||||||
n := cfg.MaxSamplesPerSend*cfg.Shards + cfg.MaxSamplesPerSend
|
|
||||||
cfg.QueueCapacity = n
|
|
||||||
|
|
||||||
samples := make(model.Samples, 0, n)
|
samples := make(model.Samples, 0, n)
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
|
@ -210,7 +211,10 @@ func TestSpawnNotMoreThanMaxConcurrentSendsGoroutines(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
c := NewTestBlockedStorageClient()
|
c := NewTestBlockedStorageClient()
|
||||||
m := NewStorageQueueManager(c, &cfg)
|
m := NewQueueManager(QueueManagerConfig{
|
||||||
|
Client: c,
|
||||||
|
QueueCapacity: n,
|
||||||
|
})
|
||||||
|
|
||||||
m.Start()
|
m.Start()
|
||||||
|
|
||||||
|
@ -239,14 +243,14 @@ func TestSpawnNotMoreThanMaxConcurrentSendsGoroutines(t *testing.T) {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.queueLen() != cfg.MaxSamplesPerSend {
|
if m.queueLen() != defaultMaxSamplesPerSend {
|
||||||
t.Fatalf("Failed to drain StorageQueueManager queue, %d elements left",
|
t.Fatalf("Failed to drain QueueManager queue, %d elements left",
|
||||||
m.queueLen(),
|
m.queueLen(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
numCalls := c.NumCalls()
|
numCalls := c.NumCalls()
|
||||||
if numCalls != uint64(cfg.Shards) {
|
if numCalls != uint64(defaultShards) {
|
||||||
t.Errorf("Saw %d concurrent sends, expected %d", numCalls, cfg.Shards)
|
t.Errorf("Saw %d concurrent sends, expected %d", numCalls, defaultShards)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,16 +19,12 @@ import (
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/relabel"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Storage allows queueing samples for remote writes.
|
// Storage allows queueing samples for remote writes.
|
||||||
type Storage struct {
|
type Storage struct {
|
||||||
mtx sync.RWMutex
|
mtx sync.RWMutex
|
||||||
externalLabels model.LabelSet
|
queues []*QueueManager
|
||||||
conf config.RemoteWriteConfig
|
|
||||||
|
|
||||||
queue *StorageQueueManager
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ApplyConfig updates the state as the new config requires.
|
// ApplyConfig updates the state as the new config requires.
|
||||||
|
@ -36,34 +32,36 @@ func (s *Storage) ApplyConfig(conf *config.Config) error {
|
||||||
s.mtx.Lock()
|
s.mtx.Lock()
|
||||||
defer s.mtx.Unlock()
|
defer s.mtx.Unlock()
|
||||||
|
|
||||||
|
newQueues := []*QueueManager{}
|
||||||
// TODO: we should only stop & recreate queues which have changes,
|
// TODO: we should only stop & recreate queues which have changes,
|
||||||
// as this can be quite disruptive.
|
// as this can be quite disruptive.
|
||||||
var newQueue *StorageQueueManager
|
for i, rwConf := range conf.RemoteWriteConfigs {
|
||||||
|
c, err := NewClient(i, rwConf)
|
||||||
if conf.RemoteWriteConfig.URL != nil {
|
|
||||||
c, err := NewClient(conf.RemoteWriteConfig)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
newQueue = NewStorageQueueManager(c, nil)
|
newQueues = append(newQueues, NewQueueManager(QueueManagerConfig{
|
||||||
|
Client: c,
|
||||||
|
ExternalLabels: conf.GlobalConfig.ExternalLabels,
|
||||||
|
RelabelConfigs: rwConf.WriteRelabelConfigs,
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.queue != nil {
|
for _, q := range s.queues {
|
||||||
s.queue.Stop()
|
q.Stop()
|
||||||
}
|
}
|
||||||
s.queue = newQueue
|
|
||||||
s.conf = conf.RemoteWriteConfig
|
s.queues = newQueues
|
||||||
s.externalLabels = conf.GlobalConfig.ExternalLabels
|
for _, q := range s.queues {
|
||||||
if s.queue != nil {
|
q.Start()
|
||||||
s.queue.Start()
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop the background processing of the storage queues.
|
// Stop the background processing of the storage queues.
|
||||||
func (s *Storage) Stop() {
|
func (s *Storage) Stop() {
|
||||||
if s.queue != nil {
|
for _, q := range s.queues {
|
||||||
s.queue.Stop()
|
q.Stop()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,26 +70,9 @@ func (s *Storage) Append(smpl *model.Sample) error {
|
||||||
s.mtx.RLock()
|
s.mtx.RLock()
|
||||||
defer s.mtx.RUnlock()
|
defer s.mtx.RUnlock()
|
||||||
|
|
||||||
if s.queue == nil {
|
for _, q := range s.queues {
|
||||||
return nil
|
q.Append(smpl)
|
||||||
}
|
}
|
||||||
|
|
||||||
var snew model.Sample
|
|
||||||
snew = *smpl
|
|
||||||
snew.Metric = smpl.Metric.Clone()
|
|
||||||
|
|
||||||
for ln, lv := range s.externalLabels {
|
|
||||||
if _, ok := smpl.Metric[ln]; !ok {
|
|
||||||
snew.Metric[ln] = lv
|
|
||||||
}
|
|
||||||
}
|
|
||||||
snew.Metric = model.Metric(
|
|
||||||
relabel.Process(model.LabelSet(snew.Metric), s.conf.WriteRelabelConfigs...))
|
|
||||||
|
|
||||||
if snew.Metric == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
s.queue.Append(&snew)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
5
vendor/github.com/prometheus/common/model/value.go
generated
vendored
5
vendor/github.com/prometheus/common/model/value.go
generated
vendored
|
@ -129,11 +129,8 @@ func (s *Sample) Equal(o *Sample) bool {
|
||||||
if !s.Timestamp.Equal(o.Timestamp) {
|
if !s.Timestamp.Equal(o.Timestamp) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if s.Value.Equal(o.Value) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
return s.Value.Equal(o.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s Sample) String() string {
|
func (s Sample) String() string {
|
||||||
|
|
4
vendor/vendor.json
vendored
4
vendor/vendor.json
vendored
|
@ -560,8 +560,8 @@
|
||||||
{
|
{
|
||||||
"checksumSHA1": "vopCLXHzYm+3l5fPKOf4/fQwrCM=",
|
"checksumSHA1": "vopCLXHzYm+3l5fPKOf4/fQwrCM=",
|
||||||
"path": "github.com/prometheus/common/model",
|
"path": "github.com/prometheus/common/model",
|
||||||
"revision": "dd2f054febf4a6c00f2343686efb775948a8bff4",
|
"revision": "3007b6072c17c8d985734e6e19b1dea9174e13d3",
|
||||||
"revisionTime": "2017-01-08T23:12:12Z"
|
"revisionTime": "2017-02-19T00:35:58+01:00"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "ZbbESWBHHcPUJ/A5yrzKhTHuPc8=",
|
"checksumSHA1": "ZbbESWBHHcPUJ/A5yrzKhTHuPc8=",
|
||||||
|
|
Loading…
Reference in a new issue