mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-27 22:49:40 -08:00
Fix lint issues
Signed-off-by: beorn7 <beorn@grafana.com>
This commit is contained in:
parent
9cd0297fda
commit
deb02d59fb
|
@ -36,6 +36,7 @@ func (e Encoding) String() string {
|
||||||
return "<unknown>"
|
return "<unknown>"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsValidEncoding returns true for supported encodings.
|
||||||
func IsValidEncoding(e Encoding) bool {
|
func IsValidEncoding(e Encoding) bool {
|
||||||
switch e {
|
switch e {
|
||||||
case EncXOR, EncSHS:
|
case EncXOR, EncSHS:
|
||||||
|
|
|
@ -105,6 +105,7 @@ func (c *HistoChunk) Meta() (int32, []histogram.Span, []histogram.Span, error) {
|
||||||
return readHistoChunkMeta(&b)
|
return readHistoChunkMeta(&b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Compact implements the Chunk interface.
|
||||||
func (c *HistoChunk) Compact() {
|
func (c *HistoChunk) Compact() {
|
||||||
if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
|
if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
|
||||||
buf := make([]byte, l)
|
buf := make([]byte, l)
|
||||||
|
@ -192,6 +193,7 @@ func (c *HistoChunk) Iterator(it Iterator) Iterator {
|
||||||
return c.iterator(it)
|
return c.iterator(it)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HistoAppender is an Appender implementation for sparse histograms.
|
||||||
type HistoAppender struct {
|
type HistoAppender struct {
|
||||||
b *bstream
|
b *bstream
|
||||||
|
|
||||||
|
@ -199,10 +201,9 @@ type HistoAppender struct {
|
||||||
schema int32
|
schema int32
|
||||||
posSpans, negSpans []histogram.Span
|
posSpans, negSpans []histogram.Span
|
||||||
|
|
||||||
// For the fields that are tracked as dod's.
|
// For the fields that are tracked as dod's. Note that we expect to
|
||||||
// Note that we expect to handle negative deltas (e.g. resets) by
|
// handle negative deltas (e.g. resets) by creating new chunks, we still
|
||||||
// creating new chunks, we still want to support it in general hence
|
// want to support it in general hence signed integer types.
|
||||||
// signed integer types.
|
|
||||||
t int64
|
t int64
|
||||||
cnt, zcnt uint64
|
cnt, zcnt uint64
|
||||||
tDelta, cntDelta, zcntDelta int64
|
tDelta, cntDelta, zcntDelta int64
|
||||||
|
@ -230,6 +231,10 @@ func putUvarint(b *bstream, buf []byte, x uint64) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Append implements Appender. This implementation does nothing for now.
|
||||||
|
// TODO(beorn7): Implement in a meaningful way, i.e. we need to support
|
||||||
|
// appending of stale markers, but this should never be used for "real"
|
||||||
|
// samples.
|
||||||
func (a *HistoAppender) Append(int64, float64) {}
|
func (a *HistoAppender) Append(int64, float64) {}
|
||||||
|
|
||||||
// Appendable returns whether the chunk can be appended to, and if so
|
// Appendable returns whether the chunk can be appended to, and if so
|
||||||
|
@ -239,7 +244,7 @@ func (a *HistoAppender) Append(int64, float64) {}
|
||||||
// * the schema has changed
|
// * the schema has changed
|
||||||
// * the zerobucket threshold has changed
|
// * the zerobucket threshold has changed
|
||||||
// * any buckets disappeared
|
// * any buckets disappeared
|
||||||
func (a *HistoAppender) Appendable(h histogram.SparseHistogram) ([]interjection, []interjection, bool) {
|
func (a *HistoAppender) Appendable(h histogram.SparseHistogram) ([]Interjection, []Interjection, bool) {
|
||||||
// TODO zerothreshold
|
// TODO zerothreshold
|
||||||
if h.Schema != a.schema {
|
if h.Schema != a.schema {
|
||||||
return nil, nil, false
|
return nil, nil, false
|
||||||
|
@ -359,7 +364,7 @@ func (a *HistoAppender) AppendHistogram(t int64, h histogram.SparseHistogram) {
|
||||||
// (positive and/or negative) buckets used, according to the provided interjections, resulting in
|
// (positive and/or negative) buckets used, according to the provided interjections, resulting in
|
||||||
// the honoring of the provided new posSpans and negSpans
|
// the honoring of the provided new posSpans and negSpans
|
||||||
// note: the decode-recode can probably be done more efficiently, but that's for a future optimization
|
// note: the decode-recode can probably be done more efficiently, but that's for a future optimization
|
||||||
func (a *HistoAppender) Recode(posInterjections, negInterjections []interjection, posSpans, negSpans []histogram.Span) (Chunk, Appender) {
|
func (a *HistoAppender) Recode(posInterjections, negInterjections []Interjection, posSpans, negSpans []histogram.Span) (Chunk, Appender) {
|
||||||
it := newHistoIterator(a.b.bytes())
|
it := newHistoIterator(a.b.bytes())
|
||||||
hc := NewHistoChunk()
|
hc := NewHistoChunk()
|
||||||
app, err := hc.Appender()
|
app, err := hc.Appender()
|
||||||
|
|
|
@ -11,11 +11,6 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// The code in this file was largely written by Damian Gryski as part of
|
|
||||||
// https://github.com/dgryski/go-tsz and published under the license below.
|
|
||||||
// It was modified to accommodate reading from byte slices without modifying
|
|
||||||
// the underlying bytes, which would panic when reading from mmap'd
|
|
||||||
// read-only byte slices.
|
|
||||||
package chunkenc
|
package chunkenc
|
||||||
|
|
||||||
import "github.com/prometheus/prometheus/pkg/histogram"
|
import "github.com/prometheus/prometheus/pkg/histogram"
|
||||||
|
@ -125,8 +120,8 @@ try:
|
||||||
return 0, false
|
return 0, false
|
||||||
}
|
}
|
||||||
|
|
||||||
// interjection describes that num new buckets are introduced before processing the pos'th delta from the original slice
|
// Interjection describes that num new buckets are introduced before processing the pos'th delta from the original slice
|
||||||
type interjection struct {
|
type Interjection struct {
|
||||||
pos int
|
pos int
|
||||||
num int
|
num int
|
||||||
}
|
}
|
||||||
|
@ -153,14 +148,14 @@ type interjection struct {
|
||||||
// need to generate a list of interjections
|
// need to generate a list of interjections
|
||||||
// note: within compareSpans we don't have to worry about the changes to the spans themselves,
|
// note: within compareSpans we don't have to worry about the changes to the spans themselves,
|
||||||
// thanks to the iterators, we get to work with the more useful bucket indices (which of course directly correspond to the buckets we have to adjust)
|
// thanks to the iterators, we get to work with the more useful bucket indices (which of course directly correspond to the buckets we have to adjust)
|
||||||
func compareSpans(a, b []histogram.Span) ([]interjection, bool) {
|
func compareSpans(a, b []histogram.Span) ([]Interjection, bool) {
|
||||||
ai := newBucketIterator(a)
|
ai := newBucketIterator(a)
|
||||||
bi := newBucketIterator(b)
|
bi := newBucketIterator(b)
|
||||||
|
|
||||||
var interjections []interjection
|
var interjections []Interjection
|
||||||
|
|
||||||
// when inter.num becomes > 0, this becomes a valid interjection that should be yielded when we finish a streak of new buckets
|
// when inter.num becomes > 0, this becomes a valid interjection that should be yielded when we finish a streak of new buckets
|
||||||
var inter interjection
|
var inter Interjection
|
||||||
|
|
||||||
av, aok := ai.Next()
|
av, aok := ai.Next()
|
||||||
bv, bok := bi.Next()
|
bv, bok := bi.Next()
|
||||||
|
@ -205,7 +200,7 @@ func compareSpans(a, b []histogram.Span) ([]interjection, bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// caller is responsible for making sure len(in) and len(out) are appropriate for the provided interjections!
|
// caller is responsible for making sure len(in) and len(out) are appropriate for the provided interjections!
|
||||||
func interject(in, out []int64, interjections []interjection) []int64 {
|
func interject(in, out []int64, interjections []Interjection) []int64 {
|
||||||
var j int // position in out
|
var j int // position in out
|
||||||
var v int64 // the last value seen
|
var v int64 // the last value seen
|
||||||
var interj int // the next interjection to process
|
var interj int // the next interjection to process
|
||||||
|
|
|
@ -122,7 +122,7 @@ func TestInterjection(t *testing.T) {
|
||||||
{Offset: 1, Length: 4},
|
{Offset: 1, Length: 4},
|
||||||
{Offset: 3, Length: 3},
|
{Offset: 3, Length: 3},
|
||||||
}
|
}
|
||||||
interj := []interjection{
|
interj := []Interjection{
|
||||||
{
|
{
|
||||||
pos: 2,
|
pos: 2,
|
||||||
num: 1,
|
num: 1,
|
||||||
|
@ -140,13 +140,13 @@ func TestInterjection(t *testing.T) {
|
||||||
testInterject(interj, t)
|
testInterject(interj, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testCompareSpans(a, b []histogram.Span, exp []interjection, t *testing.T) {
|
func testCompareSpans(a, b []histogram.Span, exp []Interjection, t *testing.T) {
|
||||||
got, ok := compareSpans(a, b)
|
got, ok := compareSpans(a, b)
|
||||||
require.Equal(t, true, ok)
|
require.Equal(t, true, ok)
|
||||||
require.Equal(t, exp, got)
|
require.Equal(t, exp, got)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testInterject(interjections []interjection, t *testing.T) {
|
func testInterject(interjections []Interjection, t *testing.T) {
|
||||||
// this tests the scenario as described in compareSpans's comments
|
// this tests the scenario as described in compareSpans's comments
|
||||||
// original deltas that represent these counts : 6, 3, 3, 2, 4, 5, 1
|
// original deltas that represent these counts : 6, 3, 3, 2, 4, 5, 1
|
||||||
a := []int64{6, -3, 0, -1, 2, 1, -4}
|
a := []int64{6, -3, 0, -1, 2, 1, -4}
|
||||||
|
|
|
@ -81,6 +81,7 @@ func (c *XORChunk) NumSamples() int {
|
||||||
return int(binary.BigEndian.Uint16(c.Bytes()))
|
return int(binary.BigEndian.Uint16(c.Bytes()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Compact implements the Chunk interface.
|
||||||
func (c *XORChunk) Compact() {
|
func (c *XORChunk) Compact() {
|
||||||
if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
|
if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
|
||||||
buf := make([]byte, l)
|
buf := make([]byte, l)
|
||||||
|
|
Loading…
Reference in a new issue