Added 'limit' query parameter support to /api/v1/status/tsdb endpoint (#12336)

* Added 'topN' query parameter support to /api/v1/status/tsdb endpoint

Signed-off-by: Baskar Shanmugam <baskar.shanmugam.career@gmail.com>

* Updated query parameter for tsdb status to 'limit'

Signed-off-by: Baskar Shanmugam <baskar.shanmugam.career@gmail.com>

* Corrected Stats() parameter name from topN to limit

Signed-off-by: Baskar Shanmugam <baskar.shanmugam.career@gmail.com>

* Fixed p.Stats CI failure

Signed-off-by: Baskar Shanmugam <baskar.shanmugam.career@gmail.com>

---------

Signed-off-by: Baskar Shanmugam <baskar.shanmugam.career@gmail.com>
This commit is contained in:
Baskar Shanmugam 2023-05-22 18:07:07 +05:30 committed by GitHub
parent 8c5d4b4add
commit 905a0bd63a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 45 additions and 25 deletions

View file

@ -1486,11 +1486,11 @@ func (s *readyStorage) Snapshot(dir string, withHead bool) error {
}
// Stats implements the api_v1.TSDBAdminStats interface.
func (s *readyStorage) Stats(statsByLabelName string) (*tsdb.Stats, error) {
func (s *readyStorage) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) {
if x := s.get(); x != nil {
switch db := x.(type) {
case *tsdb.DB:
return db.Head().Stats(statsByLabelName), nil
return db.Head().Stats(statsByLabelName, limit), nil
case *agent.DB:
return nil, agent.ErrUnsupported
default:

View file

@ -1074,6 +1074,10 @@ The following endpoint returns various cardinality statistics about the Promethe
```
GET /api/v1/status/tsdb
```
URL query parameters:
- `limit=<number>`: Limit the number of returned items to a given number for each set of statistics. By default, 10 items are returned.
The `data` section of the query result consists of
- **headStats**: This provides the following data about the head block of the TSDB:
- **numSeries**: The number of series.
- **chunkCount**: The number of chunks.

View file

@ -978,7 +978,7 @@ func (h *Head) DisableNativeHistograms() {
}
// PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names.
func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.PostingsStats {
func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *index.PostingsStats {
h.cardinalityMutex.Lock()
defer h.cardinalityMutex.Unlock()
currentTime := time.Duration(time.Now().Unix()) * time.Second
@ -989,7 +989,7 @@ func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.Postings
if h.cardinalityCache != nil {
return h.cardinalityCache
}
h.cardinalityCache = h.postings.Stats(statsByLabelName)
h.cardinalityCache = h.postings.Stats(statsByLabelName, limit)
h.lastPostingsStatsCall = time.Duration(time.Now().Unix()) * time.Second
return h.cardinalityCache
@ -1329,12 +1329,12 @@ type Stats struct {
// Stats returns important current HEAD statistics. Note that it is expensive to
// calculate these.
func (h *Head) Stats(statsByLabelName string) *Stats {
func (h *Head) Stats(statsByLabelName string, limit int) *Stats {
return &Stats{
NumSeries: h.NumSeries(),
MaxTime: h.MaxTime(),
MinTime: h.MinTime(),
IndexPostingStats: h.PostingsCardinalityStats(statsByLabelName),
IndexPostingStats: h.PostingsCardinalityStats(statsByLabelName, limit),
}
}

View file

@ -156,10 +156,8 @@ type PostingsStats struct {
}
// Stats calculates the cardinality statistics from postings.
func (p *MemPostings) Stats(label string) *PostingsStats {
const maxNumOfRecords = 10
func (p *MemPostings) Stats(label string, limit int) *PostingsStats {
var size uint64
p.mtx.RLock()
metrics := &maxHeap{}
@ -168,10 +166,10 @@ func (p *MemPostings) Stats(label string) *PostingsStats {
labelValuePairs := &maxHeap{}
numLabelPairs := 0
metrics.init(maxNumOfRecords)
labels.init(maxNumOfRecords)
labelValueLength.init(maxNumOfRecords)
labelValuePairs.init(maxNumOfRecords)
metrics.init(limit)
labels.init(limit)
labelValueLength.init(limit)
labelValuePairs.init(limit)
for n, e := range p.m {
if n == "" {

View file

@ -912,7 +912,7 @@ func BenchmarkPostings_Stats(b *testing.B) {
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
p.Stats("__name__")
p.Stats("__name__", 10)
}
}
@ -927,7 +927,7 @@ func TestMemPostingsStats(t *testing.T) {
p.Add(2, labels.FromStrings("label", "value1"))
// call the Stats method to calculate the cardinality statistics
stats := p.Stats("label")
stats := p.Stats("label", 10)
// assert that the expected statistics were calculated
require.Equal(t, uint64(2), stats.CardinalityMetricsStats[0].Count)

View file

@ -171,7 +171,7 @@ type TSDBAdminStats interface {
CleanTombstones() error
Delete(mint, maxt int64, ms ...*labels.Matcher) error
Snapshot(dir string, withHead bool) error
Stats(statsByLabelName string) (*tsdb.Stats, error)
Stats(statsByLabelName string, limit int) (*tsdb.Stats, error)
WALReplayStatus() (tsdb.WALReplayStatus, error)
}
@ -1472,8 +1472,15 @@ func TSDBStatsFromIndexStats(stats []index.Stat) []TSDBStat {
return result
}
func (api *API) serveTSDBStatus(*http.Request) apiFuncResult {
s, err := api.db.Stats(labels.MetricName)
func (api *API) serveTSDBStatus(r *http.Request) apiFuncResult {
limit := 10
if s := r.FormValue("limit"); s != "" {
var err error
if limit, err = strconv.Atoi(s); err != nil || limit < 1 {
return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit must be a positive number")}, nil, nil}
}
}
s, err := api.db.Stats(labels.MetricName, limit)
if err != nil {
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
}

View file

@ -2622,7 +2622,7 @@ type fakeDB struct {
func (f *fakeDB) CleanTombstones() error { return f.err }
func (f *fakeDB) Delete(int64, int64, ...*labels.Matcher) error { return f.err }
func (f *fakeDB) Snapshot(string, bool) error { return f.err }
func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) {
func (f *fakeDB) Stats(statsByLabelName string, limit int) (_ *tsdb.Stats, retErr error) {
dbDir, err := os.MkdirTemp("", "tsdb-api-ready")
if err != nil {
return nil, err
@ -2636,7 +2636,7 @@ func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) {
opts := tsdb.DefaultHeadOptions()
opts.ChunkRange = 1000
h, _ := tsdb.NewHead(nil, nil, nil, nil, opts, nil)
return h.Stats(statsByLabelName), nil
return h.Stats(statsByLabelName, limit), nil
}
func (f *fakeDB) WALReplayStatus() (tsdb.WALReplayStatus, error) {
@ -3283,9 +3283,20 @@ func TestTSDBStatus(t *testing.T) {
{
db: tsdb,
endpoint: tsdbStatusAPI,
errType: errorNone,
},
{
db: tsdb,
endpoint: tsdbStatusAPI,
values: map[string][]string{"limit": {"20"}},
errType: errorNone,
},
{
db: tsdb,
endpoint: tsdbStatusAPI,
values: map[string][]string{"limit": {"0"}},
errType: errorBadData,
},
} {
tc := tc
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {

View file

@ -251,7 +251,7 @@ func (notReadyReadStorage) StartTime() (int64, error) {
return 0, errors.Wrap(tsdb.ErrNotReady, "wrap")
}
func (notReadyReadStorage) Stats(string) (*tsdb.Stats, error) {
func (notReadyReadStorage) Stats(string, int) (*tsdb.Stats, error) {
return nil, errors.Wrap(tsdb.ErrNotReady, "wrap")
}

View file

@ -52,8 +52,8 @@ type dbAdapter struct {
*tsdb.DB
}
func (a *dbAdapter) Stats(statsByLabelName string) (*tsdb.Stats, error) {
return a.Head().Stats(statsByLabelName), nil
func (a *dbAdapter) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) {
return a.Head().Stats(statsByLabelName, limit), nil
}
func (a *dbAdapter) WALReplayStatus() (tsdb.WALReplayStatus, error) {