diff --git a/promql/value.go b/promql/value.go index cd36c9a7e..861dba6ae 100644 --- a/promql/value.go +++ b/promql/value.go @@ -146,6 +146,9 @@ func (p Point) MarshalJSON() ([]byte, error) { it := p.H.AllBucketIterator() for it.Next() { bucket := it.At() + if bucket.Count == 0 { + continue // No need to expose empty buckets in JSON. + } boundaries := 2 // Exclusive on both sides AKA open interval. if bucket.LowerInclusive { if bucket.UpperInclusive { diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 92dc254c3..ca63927a4 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -513,188 +513,6 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { }, nil, res.Warnings, qry.Close} } -// TODO: remove this when we have sparse histogram support in PromQL. -// This is a hack to query sparse histogram for buckets. -//func (api *API) queryRange(r *http.Request) (result apiFuncResult) { -// start, err := parseTime(r.FormValue("start")) -// if err != nil { -// return invalidParamError(err, "start") -// } -// end, err := parseTime(r.FormValue("end")) -// if err != nil { -// return invalidParamError(err, "end") -// } -// if end.Before(start) { -// return invalidParamError(errors.New("end timestamp must not be before start time"), "end") -// } -// -// step, err := parseDuration(r.FormValue("step")) -// if err != nil { -// return invalidParamError(err, "step") -// } -// -// if step <= 0 { -// return invalidParamError(errors.New("zero or negative query resolution step widths are not accepted. Try a positive integer"), "step") -// } -// -// // For safety, limit the number of returned points per timeseries. -// // This is sufficient for 60s resolution for a week or 1h resolution for a year. -// if end.Sub(start)/step > 11000 { -// err := errors.New("exceeded maximum resolution of 11,000 points per timeseries. Try decreasing the query resolution (?step=XX)") -// return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} -// } -// -// ctx := r.Context() -// if to := r.FormValue("timeout"); to != "" { -// var cancel context.CancelFunc -// timeout, err := parseDuration(to) -// if err != nil { -// return invalidParamError(err, "timeout") -// } -// -// ctx, cancel = context.WithTimeout(ctx, timeout) -// defer cancel() -// } -// -// expr, err := parser.ParseExpr(r.FormValue("query")) -// if err != nil { -// return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} -// } -// -// selectors := parser.ExtractSelectors(expr) -// if len(selectors) < 1 { -// return apiFuncResult{nil, nil, nil, nil} -// } -// -// if len(selectors) > 1 { -// return apiFuncResult{nil, &apiError{errorBadData, errors.New("need exactly 1 selector")}, nil, nil} -// } -// -// hasRate, rateDuration := false, time.Duration(0) -// parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { -// switch n := node.(type) { -// case *parser.Call: -// if n.Func.Name == "rate" { -// hasRate = true -// rateDuration = n.Args[0].(*parser.MatrixSelector).Range -// return errors.New("stop it here") -// } -// } -// return nil -// }) -// var numRateSamples int -// if hasRate { -// numRateSamples = int(end.Sub(start)/step + 1) -// if start.Add(time.Duration(numRateSamples-1) * step).After(end) { -// numRateSamples-- -// } -// start = start.Add(-rateDuration) // Adjusting for the first point lookback. -// } -// -// q, err := api.Queryable.Querier(ctx, timestamp.FromTime(start), timestamp.FromTime(end)) -// if err != nil { -// return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil} -// } -// -// res := promql.Matrix{} -// ss := q.Select(true, nil, selectors[0]...) -// -// for ss.Next() { -// resSeries := make(map[float64]promql.Series) // le -> series. -// -// s := ss.At() -// it := s.Iterator() -// for it.Next() { // Per histogram. -// t, h := it.AtHistogram() -// buckets := histogram.CumulativeExpandSparseHistogram(h) -// for buckets.Next() { -// // Every bucket is a different series with different "le". -// b := buckets.At() -// rs, ok := resSeries[b.Le] -// if !ok { -// rs = promql.Series{ -// Metric: append( -// s.Labels(), -// labels.Label{Name: "le", Value: fmt.Sprintf("%.16f", b.Le)}, // TODO: Set some precision for 'le'? -// ), -// } -// sort.Sort(rs.Metric) -// resSeries[b.Le] = rs -// } -// -// rs.Points = append(rs.Points, promql.Point{ -// T: t, -// V: float64(b.Count), -// }) -// resSeries[b.Le] = rs -// } -// if buckets.Err() != nil { -// return apiFuncResult{nil, &apiError{errorExec, buckets.Err()}, nil, nil} -// } -// } -// -// for _, rs := range resSeries { -// res = append(res, rs) -// } -// } -// -// if hasRate { -// newRes := make(promql.Matrix, len(res)) -// for i := range newRes { -// newRes[i].Metric = res[i].Metric -// points := make([]promql.Point, numRateSamples) -// -// rawPoints := res[i].Points -// -// startIdx, endIdx := 0, 0 -// for idx := range points { -// pointTime := start.Add(time.Duration(idx) * step) -// lookbackTime := pointTime.Add(-rateDuration) -// points[idx].T = timestamp.FromTime(pointTime) -// if len(rawPoints) == 0 { -// continue -// } -// -// for startIdx < len(rawPoints) && timestamp.Time(rawPoints[startIdx].T).Before(lookbackTime) { -// startIdx++ -// } -// if startIdx >= len(rawPoints) { -// startIdx = len(rawPoints) - 1 -// } -// -// for endIdx < len(rawPoints) && timestamp.Time(rawPoints[endIdx].T).Before(pointTime) { -// endIdx++ -// } -// if endIdx >= len(rawPoints) { -// endIdx = len(rawPoints) - 1 -// } else if timestamp.Time(rawPoints[endIdx].T).After(pointTime) && (len(rawPoints) == 1 || endIdx == 0) { -// continue -// } else { -// endIdx-- -// } -// -// valDiff := rawPoints[endIdx].V - rawPoints[startIdx].V -// timeDiffSeconds := float64(timestamp.Time(rawPoints[endIdx].T).Sub(timestamp.Time(rawPoints[startIdx].T))) / float64(time.Second) -// -// if timeDiffSeconds != 0 { -// points[idx].V = valDiff / timeDiffSeconds -// } -// } -// -// newRes[i].Points = points -// } -// -// res = newRes -// } -// -// sort.Sort(res) -// -// return apiFuncResult{&queryData{ -// ResultType: res.Type(), -// Result: res, -// }, nil, nil, nil} -//} - func (api *API) queryExemplars(r *http.Request) apiFuncResult { start, err := parseTimeParam(r, "start", minTime) if err != nil { @@ -1984,13 +1802,16 @@ func marshalHistogram(h *histogram.FloatHistogram, stream *jsoniter.Stream) { bucketFound := false it := h.AllBucketIterator() for it.Next() { + bucket := it.At() + if bucket.Count == 0 { + continue // No need to expose empty buckets in JSON. + } stream.WriteMore() if !bucketFound { stream.WriteObjectField(`buckets`) stream.WriteArrayStart() } bucketFound = true - bucket := it.At() boundaries := 2 // Exclusive on both sides AKA open interval. if bucket.LowerInclusive { if bucket.UpperInclusive {