Merge remote-tracking branch 'prometheus/main' into arve/wlog-histograms

This commit is contained in:
Arve Knudsen 2024-05-28 12:06:29 +02:00
commit a4942ffa8c
15 changed files with 98 additions and 43 deletions

View file

@ -161,21 +161,20 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Install Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
cache: false
go-version: 1.22.x
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
with:
args: --verbose
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
version: v1.56.2
version: v1.59.0
fuzzing:
uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request'

View file

@ -47,7 +47,7 @@ issues:
source: "^// ==="
- linters:
- perfsprint
text: "fmt.Sprintf can be replaced with string addition"
text: "fmt.Sprintf can be replaced with string concatenation"
linters-settings:
depguard:
rules:

View file

@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.56.2
GOLANGCI_LINT_VERSION ?= v1.59.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))

View file

@ -42,6 +42,7 @@ import (
"github.com/mwitkow/go-conntrack"
"github.com/oklog/run"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promlog"
@ -252,6 +253,18 @@ func main() {
newFlagRetentionDuration model.Duration
)
// Unregister the default GoCollector, and reregister with our defaults.
if prometheus.Unregister(collectors.NewGoCollector()) {
prometheus.MustRegister(
collectors.NewGoCollector(
collectors.WithGoCollectorRuntimeMetrics(
collectors.MetricsGC,
collectors.MetricsScheduler,
),
),
)
}
cfg := flagConfig{
notifier: notifier.Options{
Registerer: prometheus.DefaultRegisterer,

View file

@ -296,7 +296,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() {
require.Equal(t, 1, qc)
} else {
require.Greater(t, qc, 0, "no queries logged")
require.Positive(t, qc, "no queries logged")
}
p.validateLastQuery(t, ql)
@ -366,7 +366,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() {
require.Equal(t, 1, qc)
} else {
require.Greater(t, qc, 0, "no queries logged")
require.Positive(t, qc, "no queries logged")
}
}

View file

@ -1349,7 +1349,7 @@ interface.
The following meta labels are available on targets during [relabeling](#relabel_config):
* `__meta_openstack_address_pool`: the pool of the private IP.
* `__meta_openstack_instance_flavor`: the flavor of the OpenStack instance.
* `__meta_openstack_instance_flavor`: the flavor ID of the OpenStack instance.
* `__meta_openstack_instance_id`: the OpenStack instance ID.
* `__meta_openstack_instance_image`: the ID of the image the OpenStack instance is using.
* `__meta_openstack_instance_name`: the OpenStack instance name.
@ -1357,7 +1357,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_openstack_private_ip`: the private IP of the OpenStack instance.
* `__meta_openstack_project_id`: the project (tenant) owning this instance.
* `__meta_openstack_public_ip`: the public IP of the OpenStack instance.
* `__meta_openstack_tag_<tagkey>`: each tag value of the instance.
* `__meta_openstack_tag_<key>`: each metadata item of the instance, with any unsupported characters converted to an underscore.
* `__meta_openstack_user_id`: the user account owning the tenant.
See below for the configuration options for OpenStack discovery:

View file

@ -21,6 +21,7 @@ import (
"os"
"sort"
"strconv"
"sync"
"testing"
"time"
@ -58,7 +59,9 @@ func TestQueryConcurrency(t *testing.T) {
require.NoError(t, err)
defer os.RemoveAll(dir)
queryTracker := promql.NewActiveQueryTracker(dir, maxConcurrency, nil)
t.Cleanup(queryTracker.Close)
t.Cleanup(func() {
require.NoError(t, queryTracker.Close())
})
opts := promql.EngineOpts{
Logger: nil,
@ -90,9 +93,14 @@ func TestQueryConcurrency(t *testing.T) {
return nil
}
var wg sync.WaitGroup
for i := 0; i < maxConcurrency; i++ {
q := engine.NewTestQuery(f)
go q.Exec(ctx)
wg.Add(1)
go func() {
q.Exec(ctx)
wg.Done()
}()
select {
case <-processing:
// Expected.
@ -102,7 +110,11 @@ func TestQueryConcurrency(t *testing.T) {
}
q := engine.NewTestQuery(f)
go q.Exec(ctx)
wg.Add(1)
go func() {
q.Exec(ctx)
wg.Done()
}()
select {
case <-processing:
@ -125,6 +137,8 @@ func TestQueryConcurrency(t *testing.T) {
for i := 0; i < maxConcurrency; i++ {
block <- struct{}{}
}
wg.Wait()
}
// contextDone returns an error if the context was canceled or timed out.

View file

@ -16,6 +16,8 @@ package promql
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path/filepath"
@ -36,6 +38,8 @@ type ActiveQueryTracker struct {
maxConcurrent int
}
var _ io.Closer = &ActiveQueryTracker{}
type Entry struct {
Query string `json:"query"`
Timestamp int64 `json:"timestamp_sec"`
@ -83,6 +87,23 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) {
}
}
type mmapedFile struct {
f io.Closer
m mmap.MMap
}
func (f *mmapedFile) Close() error {
err := f.m.Unmap()
if err != nil {
err = fmt.Errorf("mmapedFile: unmapping: %w", err)
}
if fErr := f.f.Close(); fErr != nil {
return errors.Join(fmt.Errorf("close mmapedFile.f: %w", fErr), err)
}
return err
}
func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) {
file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666)
if err != nil {
@ -108,7 +129,7 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io
return nil, nil, err
}
return fileAsBytes, file, err
return fileAsBytes, &mmapedFile{f: file, m: fileAsBytes}, err
}
func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker {
@ -204,9 +225,13 @@ func (tracker ActiveQueryTracker) Insert(ctx context.Context, query string) (int
}
}
func (tracker *ActiveQueryTracker) Close() {
// Close closes tracker.
func (tracker *ActiveQueryTracker) Close() error {
if tracker == nil || tracker.closer == nil {
return
return nil
}
tracker.closer.Close()
if err := tracker.closer.Close(); err != nil {
return fmt.Errorf("close ActiveQueryTracker.closer: %w", err)
}
return nil
}

View file

@ -16,6 +16,7 @@ package promql
import (
"context"
"os"
"path/filepath"
"testing"
"github.com/grafana/regexp"
@ -104,26 +105,26 @@ func TestIndexReuse(t *testing.T) {
}
func TestMMapFile(t *testing.T) {
file, err := os.CreateTemp("", "mmapedFile")
dir := t.TempDir()
fpath := filepath.Join(dir, "mmapedFile")
const data = "ab"
fileAsBytes, closer, err := getMMapedFile(fpath, 2, nil)
require.NoError(t, err)
copy(fileAsBytes, data)
require.NoError(t, closer.Close())
filename := file.Name()
defer os.Remove(filename)
fileAsBytes, _, err := getMMapedFile(filename, 2, nil)
require.NoError(t, err)
copy(fileAsBytes, "ab")
f, err := os.Open(filename)
f, err := os.Open(fpath)
require.NoError(t, err)
t.Cleanup(func() {
_ = f.Close()
})
bytes := make([]byte, 4)
n, err := f.Read(bytes)
require.Equal(t, 2, n)
require.NoError(t, err, "Unexpected error while reading file.")
require.Equal(t, fileAsBytes, bytes[:2], "Mmap failed")
require.Equal(t, 2, n)
require.Equal(t, []byte(data), bytes[:2], "Mmap failed")
}
func TestParseBrokenJSON(t *testing.T) {

View file

@ -24,8 +24,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- name: install Go
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Install Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: 1.22.x
@ -33,6 +33,7 @@ jobs:
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
uses: golangci/golangci-lint-action@9d1e0624a798bb64f6c3cea93db47765312263dc # v5.1.0
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
with:
version: v1.56.2
args: --verbose
version: v1.59.0

View file

@ -55,7 +55,7 @@ func (q *writeJobQueue) assertInvariants(t *testing.T) {
require.Len(t, s.segment, s.nextWrite)
}
// Last segment must have at least one element, or we wouldn't have created it.
require.Greater(t, s.nextWrite, 0)
require.Positive(t, s.nextWrite)
}
require.Equal(t, q.size, totalSize)

View file

@ -4574,7 +4574,7 @@ func TestOOOCompaction(t *testing.T) {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls)
require.NoError(t, err)
require.False(t, created)
require.Greater(t, ms.ooo.oooHeadChunk.chunk.NumSamples(), 0)
require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
require.Len(t, ms.ooo.oooMmappedChunks, 14) // 7 original, 7 duplicate.
}
checkNonEmptyOOOChunk(series1)
@ -4715,7 +4715,7 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls)
require.NoError(t, err)
require.False(t, created)
require.Greater(t, ms.ooo.oooHeadChunk.chunk.NumSamples(), 0)
require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
}
// If the normal Head is not compacted, the OOO head compaction does not take place.
@ -4816,7 +4816,7 @@ func TestOOOCompactionWithDisabledWriteLog(t *testing.T) {
ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls)
require.NoError(t, err)
require.False(t, created)
require.Greater(t, ms.ooo.oooHeadChunk.chunk.NumSamples(), 0)
require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
}
// If the normal Head is not compacted, the OOO head compaction does not take place.
@ -5517,8 +5517,8 @@ func TestWBLAndMmapReplay(t *testing.T) {
addedRecs++
require.NoError(t, newWbl.Log(rec))
}
require.Greater(t, markers, 0)
require.Greater(t, addedRecs, 0)
require.Positive(t, markers)
require.Positive(t, addedRecs)
require.NoError(t, newWbl.Close())
require.NoError(t, sr.Close())
require.NoError(t, os.RemoveAll(wblDir))

View file

@ -3557,7 +3557,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
expMmapChunks = append(expMmapChunks, &cpy)
}
expHeadChunkSamples := ms.headChunks.chunk.NumSamples()
require.Greater(t, expHeadChunkSamples, 0)
require.Positive(t, expHeadChunkSamples)
// Series with mix of histograms and float.
s2 := labels.FromStrings("a", "b2")
@ -4638,7 +4638,7 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) {
require.NoError(t, err)
require.NotEqual(t, "", name)
require.Equal(t, 0, idx)
require.Greater(t, offset, 0)
require.Positive(t, offset)
}
// TestWBLReplay checks the replay at a low level.

View file

@ -251,7 +251,7 @@ func TestCheckpoint(t *testing.T) {
require.NoError(t, w.Truncate(107))
require.NoError(t, DeleteCheckpoints(w.Dir(), 106))
require.Equal(t, histogramsInWAL+floatHistogramsInWAL+samplesInWAL, stats.TotalSamples)
require.Greater(t, stats.DroppedSamples, 0)
require.Positive(t, stats.DroppedSamples)
// Only the new checkpoint should be left.
files, err := os.ReadDir(dir)

View file

@ -2973,8 +2973,10 @@ func assertAPIError(t *testing.T, got *apiError, exp errorType) {
t.Helper()
if exp == errorNone {
//nolint:testifylint
require.Nil(t, got)
} else {
//nolint:testifylint
require.NotNil(t, got)
require.Equal(t, exp, got.typ, "(%q)", got)
}