mirror of
https://github.com/prometheus/prometheus.git
synced 2025-02-21 03:16:00 -08:00
Fix staticcheck issues.
This commit is contained in:
parent
0fcfe3209f
commit
59aca4138b
|
@ -229,7 +229,7 @@ func Main() int {
|
||||||
webHandler.Ready()
|
webHandler.Ready()
|
||||||
log.Info("Server is Ready to receive requests.")
|
log.Info("Server is Ready to receive requests.")
|
||||||
|
|
||||||
term := make(chan os.Signal)
|
term := make(chan os.Signal, 1)
|
||||||
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
|
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
|
||||||
select {
|
select {
|
||||||
case <-term:
|
case <-term:
|
||||||
|
|
|
@ -130,7 +130,7 @@ func TestTargetURL(t *testing.T) {
|
||||||
func newTestTarget(targetURL string, deadline time.Duration, labels model.LabelSet) *Target {
|
func newTestTarget(targetURL string, deadline time.Duration, labels model.LabelSet) *Target {
|
||||||
labels = labels.Clone()
|
labels = labels.Clone()
|
||||||
labels[model.SchemeLabel] = "http"
|
labels[model.SchemeLabel] = "http"
|
||||||
labels[model.AddressLabel] = model.LabelValue(strings.TrimLeft(targetURL, "http://"))
|
labels[model.AddressLabel] = model.LabelValue(strings.TrimPrefix(targetURL, "http://"))
|
||||||
labels[model.MetricsPathLabel] = "/metrics"
|
labels[model.MetricsPathLabel] = "/metrics"
|
||||||
|
|
||||||
return &Target{
|
return &Target{
|
||||||
|
|
|
@ -63,6 +63,9 @@ func TestFPMapper(t *testing.T) {
|
||||||
defer closer.Close()
|
defer closer.Close()
|
||||||
|
|
||||||
mapper, err := newFPMapper(sm, p)
|
mapper, err := newFPMapper(sm, p)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
// Everything is empty, resolving a FP should do nothing.
|
// Everything is empty, resolving a FP should do nothing.
|
||||||
gotFP := mapper.mapFP(fp1, cm11)
|
gotFP := mapper.mapFP(fp1, cm11)
|
||||||
|
|
|
@ -462,7 +462,7 @@ func (p *persistence) persistChunks(fp model.Fingerprint, chunks []chunk.Chunk)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine index within the file.
|
// Determine index within the file.
|
||||||
offset, err := f.Seek(0, os.SEEK_CUR)
|
offset, err := f.Seek(0, io.SeekCurrent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
|
@ -498,7 +498,7 @@ func (p *persistence) loadChunks(fp model.Fingerprint, indexes []int, indexOffse
|
||||||
// This loads chunks in batches. A batch is a streak of
|
// This loads chunks in batches. A batch is a streak of
|
||||||
// consecutive chunks, read from disk in one go.
|
// consecutive chunks, read from disk in one go.
|
||||||
batchSize := 1
|
batchSize := 1
|
||||||
if _, err := f.Seek(offsetForChunkIndex(indexes[i]+indexOffset), os.SEEK_SET); err != nil {
|
if _, err := f.Seek(offsetForChunkIndex(indexes[i]+indexOffset), io.SeekStart); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -561,7 +561,7 @@ func (p *persistence) loadChunkDescs(fp model.Fingerprint, offsetFromEnd int) ([
|
||||||
cds := make([]*chunk.Desc, numChunks)
|
cds := make([]*chunk.Desc, numChunks)
|
||||||
chunkTimesBuf := make([]byte, 16)
|
chunkTimesBuf := make([]byte, 16)
|
||||||
for i := 0; i < numChunks; i++ {
|
for i := 0; i < numChunks; i++ {
|
||||||
_, err := f.Seek(offsetForChunkIndex(i)+chunkHeaderFirstTimeOffset, os.SEEK_SET)
|
_, err := f.Seek(offsetForChunkIndex(i)+chunkHeaderFirstTimeOffset, io.SeekStart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -814,7 +814,7 @@ func (p *persistence) checkpointSeriesMapAndHeads(
|
||||||
if realNumberOfSeries != numberOfSeriesInHeader {
|
if realNumberOfSeries != numberOfSeriesInHeader {
|
||||||
// The number of series has changed in the meantime.
|
// The number of series has changed in the meantime.
|
||||||
// Rewrite it in the header.
|
// Rewrite it in the header.
|
||||||
if _, err = f.Seek(int64(numberOfSeriesOffset), os.SEEK_SET); err != nil {
|
if _, err = f.Seek(int64(numberOfSeriesOffset), io.SeekStart); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = codable.EncodeUint64(f, realNumberOfSeries); err != nil {
|
if err = codable.EncodeUint64(f, realNumberOfSeries); err != nil {
|
||||||
|
@ -971,7 +971,7 @@ func (p *persistence) dropAndPersistChunks(
|
||||||
headerBuf := make([]byte, chunkHeaderLen)
|
headerBuf := make([]byte, chunkHeaderLen)
|
||||||
// Find the first chunk in the file that should be kept.
|
// Find the first chunk in the file that should be kept.
|
||||||
for ; ; numDropped++ {
|
for ; ; numDropped++ {
|
||||||
_, err = f.Seek(offsetForChunkIndex(numDropped), os.SEEK_SET)
|
_, err = f.Seek(offsetForChunkIndex(numDropped), io.SeekStart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1007,7 +1007,7 @@ func (p *persistence) dropAndPersistChunks(
|
||||||
if numDropped == chunkIndexToStartSeek {
|
if numDropped == chunkIndexToStartSeek {
|
||||||
// Nothing to drop. Just adjust the return values and append the chunks (if any).
|
// Nothing to drop. Just adjust the return values and append the chunks (if any).
|
||||||
numDropped = 0
|
numDropped = 0
|
||||||
_, err = f.Seek(offsetForChunkIndex(0), os.SEEK_SET)
|
_, err = f.Seek(offsetForChunkIndex(0), io.SeekStart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1033,7 +1033,7 @@ func (p *persistence) dropAndPersistChunks(
|
||||||
binary.LittleEndian.Uint64(headerBuf[chunkHeaderFirstTimeOffset:]),
|
binary.LittleEndian.Uint64(headerBuf[chunkHeaderFirstTimeOffset:]),
|
||||||
)
|
)
|
||||||
chunk.Ops.WithLabelValues(chunk.Drop).Add(float64(numDropped))
|
chunk.Ops.WithLabelValues(chunk.Drop).Add(float64(numDropped))
|
||||||
_, err = f.Seek(-chunkHeaderLen, os.SEEK_CUR)
|
_, err = f.Seek(-chunkHeaderLen, io.SeekCurrent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1354,7 +1354,7 @@ func (p *persistence) openChunkFileForWriting(fp model.Fingerprint) (*os.File, e
|
||||||
}
|
}
|
||||||
return os.OpenFile(p.fileNameForFingerprint(fp), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640)
|
return os.OpenFile(p.fileNameForFingerprint(fp), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640)
|
||||||
// NOTE: Although the file was opened for append,
|
// NOTE: Although the file was opened for append,
|
||||||
// f.Seek(0, os.SEEK_CUR)
|
// f.Seek(0, io.SeekCurrent)
|
||||||
// would now return '0, nil', so we cannot check for a consistent file length right now.
|
// would now return '0, nil', so we cannot check for a consistent file length right now.
|
||||||
// However, the chunkIndexForOffset function is doing that check, so a wrong file length
|
// However, the chunkIndexForOffset function is doing that check, so a wrong file length
|
||||||
// would still be detected.
|
// would still be detected.
|
||||||
|
|
|
@ -136,6 +136,9 @@ func testPersistLoadDropChunks(t *testing.T, encoding chunk.Encoding) {
|
||||||
}
|
}
|
||||||
// Load all chunk descs.
|
// Load all chunk descs.
|
||||||
actualChunkDescs, err := p.loadChunkDescs(fp, 0)
|
actualChunkDescs, err := p.loadChunkDescs(fp, 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
if len(actualChunkDescs) != 10 {
|
if len(actualChunkDescs) != 10 {
|
||||||
t.Errorf("Got %d chunkDescs, want %d.", len(actualChunkDescs), 10)
|
t.Errorf("Got %d chunkDescs, want %d.", len(actualChunkDescs), 10)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1420,7 +1420,7 @@ func testEvictAndPurgeSeries(t *testing.T, encoding chunk.Encoding) {
|
||||||
// Unarchive metrics.
|
// Unarchive metrics.
|
||||||
s.getOrCreateSeries(fp, model.Metric{})
|
s.getOrCreateSeries(fp, model.Metric{})
|
||||||
|
|
||||||
series, ok = s.fpToSeries.get(fp)
|
_, ok = s.fpToSeries.get(fp)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatal("could not find series")
|
t.Fatal("could not find series")
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,6 +68,9 @@ func TestStoreHTTPErrorHandling(t *testing.T) {
|
||||||
url: &config.URL{serverURL},
|
url: &config.URL{serverURL},
|
||||||
timeout: model.Duration(time.Second),
|
timeout: model.Duration(time.Second),
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
err = c.Store(nil)
|
err = c.Store(nil)
|
||||||
if !reflect.DeepEqual(err, test.err) {
|
if !reflect.DeepEqual(err, test.err) {
|
||||||
|
|
Loading…
Reference in a new issue