Added 'limit' query parameter support to /api/v1/status/tsdb endpoint (#12336)

* Added 'topN' query parameter support to /api/v1/status/tsdb endpoint

Signed-off-by: Baskar Shanmugam <baskar.shanmugam.career@gmail.com>

* Updated query parameter for tsdb status to 'limit'

Signed-off-by: Baskar Shanmugam <baskar.shanmugam.career@gmail.com>

* Corrected Stats() parameter name from topN to limit

Signed-off-by: Baskar Shanmugam <baskar.shanmugam.career@gmail.com>

* Fixed p.Stats CI failure

Signed-off-by: Baskar Shanmugam <baskar.shanmugam.career@gmail.com>

---------

Signed-off-by: Baskar Shanmugam <baskar.shanmugam.career@gmail.com>
This commit is contained in:
Baskar Shanmugam 2023-05-22 18:07:07 +05:30 committed by GitHub
parent 8c5d4b4add
commit 905a0bd63a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 45 additions and 25 deletions

View file

@ -1486,11 +1486,11 @@ func (s *readyStorage) Snapshot(dir string, withHead bool) error {
} }
// Stats implements the api_v1.TSDBAdminStats interface. // Stats implements the api_v1.TSDBAdminStats interface.
func (s *readyStorage) Stats(statsByLabelName string) (*tsdb.Stats, error) { func (s *readyStorage) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) {
if x := s.get(); x != nil { if x := s.get(); x != nil {
switch db := x.(type) { switch db := x.(type) {
case *tsdb.DB: case *tsdb.DB:
return db.Head().Stats(statsByLabelName), nil return db.Head().Stats(statsByLabelName, limit), nil
case *agent.DB: case *agent.DB:
return nil, agent.ErrUnsupported return nil, agent.ErrUnsupported
default: default:

View file

@ -1074,6 +1074,10 @@ The following endpoint returns various cardinality statistics about the Promethe
``` ```
GET /api/v1/status/tsdb GET /api/v1/status/tsdb
``` ```
URL query parameters:
- `limit=<number>`: Limit the number of returned items to a given number for each set of statistics. By default, 10 items are returned.
The `data` section of the query result consists of
- **headStats**: This provides the following data about the head block of the TSDB: - **headStats**: This provides the following data about the head block of the TSDB:
- **numSeries**: The number of series. - **numSeries**: The number of series.
- **chunkCount**: The number of chunks. - **chunkCount**: The number of chunks.

View file

@ -978,7 +978,7 @@ func (h *Head) DisableNativeHistograms() {
} }
// PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names. // PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names.
func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.PostingsStats { func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *index.PostingsStats {
h.cardinalityMutex.Lock() h.cardinalityMutex.Lock()
defer h.cardinalityMutex.Unlock() defer h.cardinalityMutex.Unlock()
currentTime := time.Duration(time.Now().Unix()) * time.Second currentTime := time.Duration(time.Now().Unix()) * time.Second
@ -989,7 +989,7 @@ func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.Postings
if h.cardinalityCache != nil { if h.cardinalityCache != nil {
return h.cardinalityCache return h.cardinalityCache
} }
h.cardinalityCache = h.postings.Stats(statsByLabelName) h.cardinalityCache = h.postings.Stats(statsByLabelName, limit)
h.lastPostingsStatsCall = time.Duration(time.Now().Unix()) * time.Second h.lastPostingsStatsCall = time.Duration(time.Now().Unix()) * time.Second
return h.cardinalityCache return h.cardinalityCache
@ -1329,12 +1329,12 @@ type Stats struct {
// Stats returns important current HEAD statistics. Note that it is expensive to // Stats returns important current HEAD statistics. Note that it is expensive to
// calculate these. // calculate these.
func (h *Head) Stats(statsByLabelName string) *Stats { func (h *Head) Stats(statsByLabelName string, limit int) *Stats {
return &Stats{ return &Stats{
NumSeries: h.NumSeries(), NumSeries: h.NumSeries(),
MaxTime: h.MaxTime(), MaxTime: h.MaxTime(),
MinTime: h.MinTime(), MinTime: h.MinTime(),
IndexPostingStats: h.PostingsCardinalityStats(statsByLabelName), IndexPostingStats: h.PostingsCardinalityStats(statsByLabelName, limit),
} }
} }

View file

@ -156,10 +156,8 @@ type PostingsStats struct {
} }
// Stats calculates the cardinality statistics from postings. // Stats calculates the cardinality statistics from postings.
func (p *MemPostings) Stats(label string) *PostingsStats { func (p *MemPostings) Stats(label string, limit int) *PostingsStats {
const maxNumOfRecords = 10
var size uint64 var size uint64
p.mtx.RLock() p.mtx.RLock()
metrics := &maxHeap{} metrics := &maxHeap{}
@ -168,10 +166,10 @@ func (p *MemPostings) Stats(label string) *PostingsStats {
labelValuePairs := &maxHeap{} labelValuePairs := &maxHeap{}
numLabelPairs := 0 numLabelPairs := 0
metrics.init(maxNumOfRecords) metrics.init(limit)
labels.init(maxNumOfRecords) labels.init(limit)
labelValueLength.init(maxNumOfRecords) labelValueLength.init(limit)
labelValuePairs.init(maxNumOfRecords) labelValuePairs.init(limit)
for n, e := range p.m { for n, e := range p.m {
if n == "" { if n == "" {

View file

@ -912,7 +912,7 @@ func BenchmarkPostings_Stats(b *testing.B) {
} }
b.ResetTimer() b.ResetTimer()
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
p.Stats("__name__") p.Stats("__name__", 10)
} }
} }
@ -927,7 +927,7 @@ func TestMemPostingsStats(t *testing.T) {
p.Add(2, labels.FromStrings("label", "value1")) p.Add(2, labels.FromStrings("label", "value1"))
// call the Stats method to calculate the cardinality statistics // call the Stats method to calculate the cardinality statistics
stats := p.Stats("label") stats := p.Stats("label", 10)
// assert that the expected statistics were calculated // assert that the expected statistics were calculated
require.Equal(t, uint64(2), stats.CardinalityMetricsStats[0].Count) require.Equal(t, uint64(2), stats.CardinalityMetricsStats[0].Count)

View file

@ -171,7 +171,7 @@ type TSDBAdminStats interface {
CleanTombstones() error CleanTombstones() error
Delete(mint, maxt int64, ms ...*labels.Matcher) error Delete(mint, maxt int64, ms ...*labels.Matcher) error
Snapshot(dir string, withHead bool) error Snapshot(dir string, withHead bool) error
Stats(statsByLabelName string) (*tsdb.Stats, error) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error)
WALReplayStatus() (tsdb.WALReplayStatus, error) WALReplayStatus() (tsdb.WALReplayStatus, error)
} }
@ -1472,8 +1472,15 @@ func TSDBStatsFromIndexStats(stats []index.Stat) []TSDBStat {
return result return result
} }
func (api *API) serveTSDBStatus(*http.Request) apiFuncResult { func (api *API) serveTSDBStatus(r *http.Request) apiFuncResult {
s, err := api.db.Stats(labels.MetricName) limit := 10
if s := r.FormValue("limit"); s != "" {
var err error
if limit, err = strconv.Atoi(s); err != nil || limit < 1 {
return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit must be a positive number")}, nil, nil}
}
}
s, err := api.db.Stats(labels.MetricName, limit)
if err != nil { if err != nil {
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil} return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
} }

View file

@ -2622,7 +2622,7 @@ type fakeDB struct {
func (f *fakeDB) CleanTombstones() error { return f.err } func (f *fakeDB) CleanTombstones() error { return f.err }
func (f *fakeDB) Delete(int64, int64, ...*labels.Matcher) error { return f.err } func (f *fakeDB) Delete(int64, int64, ...*labels.Matcher) error { return f.err }
func (f *fakeDB) Snapshot(string, bool) error { return f.err } func (f *fakeDB) Snapshot(string, bool) error { return f.err }
func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) { func (f *fakeDB) Stats(statsByLabelName string, limit int) (_ *tsdb.Stats, retErr error) {
dbDir, err := os.MkdirTemp("", "tsdb-api-ready") dbDir, err := os.MkdirTemp("", "tsdb-api-ready")
if err != nil { if err != nil {
return nil, err return nil, err
@ -2636,7 +2636,7 @@ func (f *fakeDB) Stats(statsByLabelName string) (_ *tsdb.Stats, retErr error) {
opts := tsdb.DefaultHeadOptions() opts := tsdb.DefaultHeadOptions()
opts.ChunkRange = 1000 opts.ChunkRange = 1000
h, _ := tsdb.NewHead(nil, nil, nil, nil, opts, nil) h, _ := tsdb.NewHead(nil, nil, nil, nil, opts, nil)
return h.Stats(statsByLabelName), nil return h.Stats(statsByLabelName, limit), nil
} }
func (f *fakeDB) WALReplayStatus() (tsdb.WALReplayStatus, error) { func (f *fakeDB) WALReplayStatus() (tsdb.WALReplayStatus, error) {
@ -3283,9 +3283,20 @@ func TestTSDBStatus(t *testing.T) {
{ {
db: tsdb, db: tsdb,
endpoint: tsdbStatusAPI, endpoint: tsdbStatusAPI,
errType: errorNone, errType: errorNone,
}, },
{
db: tsdb,
endpoint: tsdbStatusAPI,
values: map[string][]string{"limit": {"20"}},
errType: errorNone,
},
{
db: tsdb,
endpoint: tsdbStatusAPI,
values: map[string][]string{"limit": {"0"}},
errType: errorBadData,
},
} { } {
tc := tc tc := tc
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {

View file

@ -251,7 +251,7 @@ func (notReadyReadStorage) StartTime() (int64, error) {
return 0, errors.Wrap(tsdb.ErrNotReady, "wrap") return 0, errors.Wrap(tsdb.ErrNotReady, "wrap")
} }
func (notReadyReadStorage) Stats(string) (*tsdb.Stats, error) { func (notReadyReadStorage) Stats(string, int) (*tsdb.Stats, error) {
return nil, errors.Wrap(tsdb.ErrNotReady, "wrap") return nil, errors.Wrap(tsdb.ErrNotReady, "wrap")
} }

View file

@ -52,8 +52,8 @@ type dbAdapter struct {
*tsdb.DB *tsdb.DB
} }
func (a *dbAdapter) Stats(statsByLabelName string) (*tsdb.Stats, error) { func (a *dbAdapter) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) {
return a.Head().Stats(statsByLabelName), nil return a.Head().Stats(statsByLabelName, limit), nil
} }
func (a *dbAdapter) WALReplayStatus() (tsdb.WALReplayStatus, error) { func (a *dbAdapter) WALReplayStatus() (tsdb.WALReplayStatus, error) {