Fix lint issues

Signed-off-by: beorn7 <beorn@grafana.com>
This commit is contained in:
beorn7 2021-07-05 15:27:46 +02:00
parent 9cd0297fda
commit deb02d59fb
5 changed files with 22 additions and 20 deletions

View file

@ -36,6 +36,7 @@ func (e Encoding) String() string {
return "<unknown>"
}
// IsValidEncoding returns true for supported encodings.
func IsValidEncoding(e Encoding) bool {
switch e {
case EncXOR, EncSHS:

View file

@ -105,6 +105,7 @@ func (c *HistoChunk) Meta() (int32, []histogram.Span, []histogram.Span, error) {
return readHistoChunkMeta(&b)
}
// Compact implements the Chunk interface.
func (c *HistoChunk) Compact() {
if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
buf := make([]byte, l)
@ -192,6 +193,7 @@ func (c *HistoChunk) Iterator(it Iterator) Iterator {
return c.iterator(it)
}
// HistoAppender is an Appender implementation for sparse histograms.
type HistoAppender struct {
b *bstream
@ -199,10 +201,9 @@ type HistoAppender struct {
schema int32
posSpans, negSpans []histogram.Span
// For the fields that are tracked as dod's.
// Note that we expect to handle negative deltas (e.g. resets) by
// creating new chunks, we still want to support it in general hence
// signed integer types.
// For the fields that are tracked as dod's. Note that we expect to
// handle negative deltas (e.g. resets) by creating new chunks, we still
// want to support it in general hence signed integer types.
t int64
cnt, zcnt uint64
tDelta, cntDelta, zcntDelta int64
@ -230,6 +231,10 @@ func putUvarint(b *bstream, buf []byte, x uint64) {
}
}
// Append implements Appender. This implementation does nothing for now.
// TODO(beorn7): Implement in a meaningful way, i.e. we need to support
// appending of stale markers, but this should never be used for "real"
// samples.
func (a *HistoAppender) Append(int64, float64) {}
// Appendable returns whether the chunk can be appended to, and if so
@ -239,7 +244,7 @@ func (a *HistoAppender) Append(int64, float64) {}
// * the schema has changed
// * the zerobucket threshold has changed
// * any buckets disappeared
func (a *HistoAppender) Appendable(h histogram.SparseHistogram) ([]interjection, []interjection, bool) {
func (a *HistoAppender) Appendable(h histogram.SparseHistogram) ([]Interjection, []Interjection, bool) {
// TODO zerothreshold
if h.Schema != a.schema {
return nil, nil, false
@ -359,7 +364,7 @@ func (a *HistoAppender) AppendHistogram(t int64, h histogram.SparseHistogram) {
// (positive and/or negative) buckets used, according to the provided interjections, resulting in
// the honoring of the provided new posSpans and negSpans
// note: the decode-recode can probably be done more efficiently, but that's for a future optimization
func (a *HistoAppender) Recode(posInterjections, negInterjections []interjection, posSpans, negSpans []histogram.Span) (Chunk, Appender) {
func (a *HistoAppender) Recode(posInterjections, negInterjections []Interjection, posSpans, negSpans []histogram.Span) (Chunk, Appender) {
it := newHistoIterator(a.b.bytes())
hc := NewHistoChunk()
app, err := hc.Appender()

View file

@ -11,11 +11,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// The code in this file was largely written by Damian Gryski as part of
// https://github.com/dgryski/go-tsz and published under the license below.
// It was modified to accommodate reading from byte slices without modifying
// the underlying bytes, which would panic when reading from mmap'd
// read-only byte slices.
package chunkenc
import "github.com/prometheus/prometheus/pkg/histogram"
@ -125,8 +120,8 @@ try:
return 0, false
}
// interjection describes that num new buckets are introduced before processing the pos'th delta from the original slice
type interjection struct {
// Interjection describes that num new buckets are introduced before processing the pos'th delta from the original slice
type Interjection struct {
pos int
num int
}
@ -153,14 +148,14 @@ type interjection struct {
// need to generate a list of interjections
// note: within compareSpans we don't have to worry about the changes to the spans themselves,
// thanks to the iterators, we get to work with the more useful bucket indices (which of course directly correspond to the buckets we have to adjust)
func compareSpans(a, b []histogram.Span) ([]interjection, bool) {
func compareSpans(a, b []histogram.Span) ([]Interjection, bool) {
ai := newBucketIterator(a)
bi := newBucketIterator(b)
var interjections []interjection
var interjections []Interjection
// when inter.num becomes > 0, this becomes a valid interjection that should be yielded when we finish a streak of new buckets
var inter interjection
var inter Interjection
av, aok := ai.Next()
bv, bok := bi.Next()
@ -205,7 +200,7 @@ func compareSpans(a, b []histogram.Span) ([]interjection, bool) {
}
// caller is responsible for making sure len(in) and len(out) are appropriate for the provided interjections!
func interject(in, out []int64, interjections []interjection) []int64 {
func interject(in, out []int64, interjections []Interjection) []int64 {
var j int // position in out
var v int64 // the last value seen
var interj int // the next interjection to process

View file

@ -122,7 +122,7 @@ func TestInterjection(t *testing.T) {
{Offset: 1, Length: 4},
{Offset: 3, Length: 3},
}
interj := []interjection{
interj := []Interjection{
{
pos: 2,
num: 1,
@ -140,13 +140,13 @@ func TestInterjection(t *testing.T) {
testInterject(interj, t)
}
func testCompareSpans(a, b []histogram.Span, exp []interjection, t *testing.T) {
func testCompareSpans(a, b []histogram.Span, exp []Interjection, t *testing.T) {
got, ok := compareSpans(a, b)
require.Equal(t, true, ok)
require.Equal(t, exp, got)
}
func testInterject(interjections []interjection, t *testing.T) {
func testInterject(interjections []Interjection, t *testing.T) {
// this tests the scenario as described in compareSpans's comments
// original deltas that represent these counts : 6, 3, 3, 2, 4, 5, 1
a := []int64{6, -3, 0, -1, 2, 1, -4}

View file

@ -81,6 +81,7 @@ func (c *XORChunk) NumSamples() int {
return int(binary.BigEndian.Uint16(c.Bytes()))
}
// Compact implements the Chunk interface.
func (c *XORChunk) Compact() {
if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
buf := make([]byte, l)