mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Update package storage/remote for new labels.Labels type
`QueueManager.externalLabels` becomes a slice rather than a `Labels` so we can index into it when doing the merge operation. Note we avoid calling `Labels.Len()` in `labelProtosToLabels()`. It isn't necessary - `append()` will enlarge the buffer and we're expecting to re-use it many times. Also, we now validate protobuf input before converting to Labels. This way we can detect errors first, and we don't place unnecessary requirements on the Labels structure. Re-do seriesFilter using labels.Builder (albeit N^2). Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
This commit is contained in:
parent
9bc6d7a7db
commit
abd9909595
|
@ -153,10 +153,10 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
|
||||||
func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet {
|
func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet {
|
||||||
series := make([]storage.Series, 0, len(res.Timeseries))
|
series := make([]storage.Series, 0, len(res.Timeseries))
|
||||||
for _, ts := range res.Timeseries {
|
for _, ts := range res.Timeseries {
|
||||||
lbls := labelProtosToLabels(ts.Labels)
|
if err := validateLabelsAndMetricName(ts.Labels); err != nil {
|
||||||
if err := validateLabelsAndMetricName(lbls); err != nil {
|
|
||||||
return errSeriesSet{err: err}
|
return errSeriesSet{err: err}
|
||||||
}
|
}
|
||||||
|
lbls := labelProtosToLabels(ts.Labels)
|
||||||
series = append(series, &concreteSeries{labels: lbls, samples: ts.Samples})
|
series = append(series, &concreteSeries{labels: lbls, samples: ts.Samples})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -346,7 +346,7 @@ type concreteSeries struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *concreteSeries) Labels() labels.Labels {
|
func (c *concreteSeries) Labels() labels.Labels {
|
||||||
return labels.New(c.labels...)
|
return c.labels.Copy()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *concreteSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator {
|
func (c *concreteSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator {
|
||||||
|
@ -441,7 +441,7 @@ func (c *concreteSeriesIterator) Err() error {
|
||||||
|
|
||||||
// validateLabelsAndMetricName validates the label names/values and metric names returned from remote read,
|
// validateLabelsAndMetricName validates the label names/values and metric names returned from remote read,
|
||||||
// also making sure that there are no labels with duplicate names
|
// also making sure that there are no labels with duplicate names
|
||||||
func validateLabelsAndMetricName(ls labels.Labels) error {
|
func validateLabelsAndMetricName(ls []prompb.Label) error {
|
||||||
for i, l := range ls {
|
for i, l := range ls {
|
||||||
if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) {
|
if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) {
|
||||||
return fmt.Errorf("invalid metric name: %v", l.Value)
|
return fmt.Errorf("invalid metric name: %v", l.Value)
|
||||||
|
@ -581,30 +581,24 @@ func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
|
||||||
}
|
}
|
||||||
|
|
||||||
func labelProtosToLabels(labelPairs []prompb.Label) labels.Labels {
|
func labelProtosToLabels(labelPairs []prompb.Label) labels.Labels {
|
||||||
result := make(labels.Labels, 0, len(labelPairs))
|
b := labels.ScratchBuilder{}
|
||||||
for _, l := range labelPairs {
|
for _, l := range labelPairs {
|
||||||
result = append(result, labels.Label{
|
b.Add(l.Name, l.Value)
|
||||||
Name: l.Name,
|
|
||||||
Value: l.Value,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
sort.Sort(result)
|
b.Sort()
|
||||||
return result
|
return b.Labels()
|
||||||
}
|
}
|
||||||
|
|
||||||
// labelsToLabelsProto transforms labels into prompb labels. The buffer slice
|
// labelsToLabelsProto transforms labels into prompb labels. The buffer slice
|
||||||
// will be used to avoid allocations if it is big enough to store the labels.
|
// will be used to avoid allocations if it is big enough to store the labels.
|
||||||
func labelsToLabelsProto(labels labels.Labels, buf []prompb.Label) []prompb.Label {
|
func labelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label {
|
||||||
result := buf[:0]
|
result := buf[:0]
|
||||||
if cap(buf) < len(labels) {
|
lbls.Range(func(l labels.Label) {
|
||||||
result = make([]prompb.Label, 0, len(labels))
|
|
||||||
}
|
|
||||||
for _, l := range labels {
|
|
||||||
result = append(result, prompb.Label{
|
result = append(result, prompb.Label{
|
||||||
Name: l.Name,
|
Name: l.Name,
|
||||||
Value: l.Value,
|
Value: l.Value,
|
||||||
})
|
})
|
||||||
}
|
})
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -396,7 +396,7 @@ type QueueManager struct {
|
||||||
flushDeadline time.Duration
|
flushDeadline time.Duration
|
||||||
cfg config.QueueConfig
|
cfg config.QueueConfig
|
||||||
mcfg config.MetadataConfig
|
mcfg config.MetadataConfig
|
||||||
externalLabels labels.Labels
|
externalLabels []labels.Label
|
||||||
relabelConfigs []*relabel.Config
|
relabelConfigs []*relabel.Config
|
||||||
sendExemplars bool
|
sendExemplars bool
|
||||||
sendNativeHistograms bool
|
sendNativeHistograms bool
|
||||||
|
@ -454,13 +454,19 @@ func NewQueueManager(
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Copy externalLabels into slice which we need for processExternalLabels.
|
||||||
|
extLabelsSlice := make([]labels.Label, 0, externalLabels.Len())
|
||||||
|
externalLabels.Range(func(l labels.Label) {
|
||||||
|
extLabelsSlice = append(extLabelsSlice, l)
|
||||||
|
})
|
||||||
|
|
||||||
logger = log.With(logger, remoteName, client.Name(), endpoint, client.Endpoint())
|
logger = log.With(logger, remoteName, client.Name(), endpoint, client.Endpoint())
|
||||||
t := &QueueManager{
|
t := &QueueManager{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
flushDeadline: flushDeadline,
|
flushDeadline: flushDeadline,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
mcfg: mCfg,
|
mcfg: mCfg,
|
||||||
externalLabels: externalLabels,
|
externalLabels: extLabelsSlice,
|
||||||
relabelConfigs: relabelConfigs,
|
relabelConfigs: relabelConfigs,
|
||||||
storeClient: client,
|
storeClient: client,
|
||||||
sendExemplars: enableExemplarRemoteWrite,
|
sendExemplars: enableExemplarRemoteWrite,
|
||||||
|
@ -769,8 +775,8 @@ func (t *QueueManager) StoreSeries(series []record.RefSeries, index int) {
|
||||||
t.seriesSegmentIndexes[s.Ref] = index
|
t.seriesSegmentIndexes[s.Ref] = index
|
||||||
|
|
||||||
ls := processExternalLabels(s.Labels, t.externalLabels)
|
ls := processExternalLabels(s.Labels, t.externalLabels)
|
||||||
lbls := relabel.Process(ls, t.relabelConfigs...)
|
lbls, keep := relabel.Process(ls, t.relabelConfigs...)
|
||||||
if len(lbls) == 0 {
|
if !keep || lbls.IsEmpty() {
|
||||||
t.droppedSeries[s.Ref] = struct{}{}
|
t.droppedSeries[s.Ref] = struct{}{}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -831,44 +837,33 @@ func (t *QueueManager) client() WriteClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *QueueManager) internLabels(lbls labels.Labels) {
|
func (t *QueueManager) internLabels(lbls labels.Labels) {
|
||||||
for i, l := range lbls {
|
lbls.InternStrings(t.interner.intern)
|
||||||
lbls[i].Name = t.interner.intern(l.Name)
|
|
||||||
lbls[i].Value = t.interner.intern(l.Value)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *QueueManager) releaseLabels(ls labels.Labels) {
|
func (t *QueueManager) releaseLabels(ls labels.Labels) {
|
||||||
for _, l := range ls {
|
ls.ReleaseStrings(t.interner.release)
|
||||||
t.interner.release(l.Name)
|
|
||||||
t.interner.release(l.Value)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// processExternalLabels merges externalLabels into ls. If ls contains
|
// processExternalLabels merges externalLabels into ls. If ls contains
|
||||||
// a label in externalLabels, the value in ls wins.
|
// a label in externalLabels, the value in ls wins.
|
||||||
func processExternalLabels(ls, externalLabels labels.Labels) labels.Labels {
|
func processExternalLabels(ls labels.Labels, externalLabels []labels.Label) labels.Labels {
|
||||||
i, j, result := 0, 0, make(labels.Labels, 0, len(ls)+len(externalLabels))
|
b := labels.NewScratchBuilder(ls.Len() + len(externalLabels))
|
||||||
for i < len(ls) && j < len(externalLabels) {
|
j := 0
|
||||||
if ls[i].Name < externalLabels[j].Name {
|
ls.Range(func(l labels.Label) {
|
||||||
result = append(result, labels.Label{
|
for j < len(externalLabels) && l.Name > externalLabels[j].Name {
|
||||||
Name: ls[i].Name,
|
b.Add(externalLabels[j].Name, externalLabels[j].Value)
|
||||||
Value: ls[i].Value,
|
|
||||||
})
|
|
||||||
i++
|
|
||||||
} else if ls[i].Name > externalLabels[j].Name {
|
|
||||||
result = append(result, externalLabels[j])
|
|
||||||
j++
|
|
||||||
} else {
|
|
||||||
result = append(result, labels.Label{
|
|
||||||
Name: ls[i].Name,
|
|
||||||
Value: ls[i].Value,
|
|
||||||
})
|
|
||||||
i++
|
|
||||||
j++
|
j++
|
||||||
}
|
}
|
||||||
|
if j < len(externalLabels) && l.Name == externalLabels[j].Name {
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
b.Add(l.Name, l.Value)
|
||||||
|
})
|
||||||
|
for ; j < len(externalLabels); j++ {
|
||||||
|
b.Add(externalLabels[j].Name, externalLabels[j].Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
return append(append(result, ls[i:]...), externalLabels[j:]...)
|
return b.Labels()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *QueueManager) updateShardsLoop() {
|
func (t *QueueManager) updateShardsLoop() {
|
||||||
|
|
|
@ -180,9 +180,11 @@ func (q *querier) Select(sortSeries bool, hints *storage.SelectHints, matchers .
|
||||||
// We return the new set of matchers, along with a map of labels for which
|
// We return the new set of matchers, along with a map of labels for which
|
||||||
// matchers were added, so that these can later be removed from the result
|
// matchers were added, so that these can later be removed from the result
|
||||||
// time series again.
|
// time series again.
|
||||||
func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, labels.Labels) {
|
func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, []string) {
|
||||||
el := make(labels.Labels, len(q.externalLabels))
|
el := make([]labels.Label, 0, q.externalLabels.Len())
|
||||||
copy(el, q.externalLabels)
|
q.externalLabels.Range(func(l labels.Label) {
|
||||||
|
el = append(el, l)
|
||||||
|
})
|
||||||
|
|
||||||
// ms won't be sorted, so have to O(n^2) the search.
|
// ms won't be sorted, so have to O(n^2) the search.
|
||||||
for _, m := range ms {
|
for _, m := range ms {
|
||||||
|
@ -202,7 +204,11 @@ func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, lab
|
||||||
}
|
}
|
||||||
ms = append(ms, m)
|
ms = append(ms, m)
|
||||||
}
|
}
|
||||||
return ms, el
|
names := make([]string, len(el))
|
||||||
|
for i := range el {
|
||||||
|
names[i] = el[i].Name
|
||||||
|
}
|
||||||
|
return ms, names
|
||||||
}
|
}
|
||||||
|
|
||||||
// LabelValues implements storage.Querier and is a noop.
|
// LabelValues implements storage.Querier and is a noop.
|
||||||
|
@ -234,7 +240,8 @@ func (q *chunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, match
|
||||||
return storage.NewSeriesSetToChunkSet(q.querier.Select(sortSeries, hints, matchers...))
|
return storage.NewSeriesSetToChunkSet(q.querier.Select(sortSeries, hints, matchers...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSeriesSetFilter(ss storage.SeriesSet, toFilter labels.Labels) storage.SeriesSet {
|
// Note strings in toFilter must be sorted.
|
||||||
|
func newSeriesSetFilter(ss storage.SeriesSet, toFilter []string) storage.SeriesSet {
|
||||||
return &seriesSetFilter{
|
return &seriesSetFilter{
|
||||||
SeriesSet: ss,
|
SeriesSet: ss,
|
||||||
toFilter: toFilter,
|
toFilter: toFilter,
|
||||||
|
@ -243,7 +250,7 @@ func newSeriesSetFilter(ss storage.SeriesSet, toFilter labels.Labels) storage.Se
|
||||||
|
|
||||||
type seriesSetFilter struct {
|
type seriesSetFilter struct {
|
||||||
storage.SeriesSet
|
storage.SeriesSet
|
||||||
toFilter labels.Labels
|
toFilter []string // Label names to remove from result
|
||||||
querier storage.Querier
|
querier storage.Querier
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -264,20 +271,12 @@ func (ssf seriesSetFilter) At() storage.Series {
|
||||||
|
|
||||||
type seriesFilter struct {
|
type seriesFilter struct {
|
||||||
storage.Series
|
storage.Series
|
||||||
toFilter labels.Labels
|
toFilter []string // Label names to remove from result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sf seriesFilter) Labels() labels.Labels {
|
func (sf seriesFilter) Labels() labels.Labels {
|
||||||
labels := sf.Series.Labels()
|
b := labels.NewBuilder(sf.Series.Labels())
|
||||||
for i, j := 0, 0; i < len(labels) && j < len(sf.toFilter); {
|
// todo: check if this is too inefficient.
|
||||||
if labels[i].Name < sf.toFilter[j].Name {
|
b.Del(sf.toFilter...)
|
||||||
i++
|
return b.Labels(labels.EmptyLabels())
|
||||||
} else if labels[i].Name > sf.toFilter[j].Name {
|
|
||||||
j++
|
|
||||||
} else {
|
|
||||||
labels = labels[:i+copy(labels[i:], labels[i+1:])]
|
|
||||||
j++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return labels
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue