Merge remote-tracking branch 'upstream/main' into jvp/merge-prometheus-main

This commit is contained in:
Jesus Vazquez 2022-04-15 12:09:46 +02:00
commit 7106db9303
14 changed files with 246 additions and 59 deletions

View file

@ -58,13 +58,13 @@ jobs:
keys:
- v3-npm-deps-{{ checksum "web/ui/package-lock.json" }}
- v3-npm-deps-
- run: make ui-install
- run: make assets-tarball
- run: make ui-lint
- run: make ui-build-module
- run: make ui-test
- run: make assets-tarball -o assets
- store_artifacts:
path: web/ui/static.tar.gz
- persist_to_workspace:
root: .
paths:
- .tarballs
- save_cache:
key: v3-npm-deps-{{ checksum "web/ui/package-lock.json" }}
paths:
@ -148,7 +148,7 @@ workflows:
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
filters:
tags:
ignore: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
ignore: /^v2(\.[0-9]+){2}(-.+|[^-.]*)$/
branches:
ignore: /^(main|release-.*|.*build-all.*)$/
- prometheus/build:
@ -158,7 +158,7 @@ workflows:
branches:
only: /^(main|release-.*|.*build-all.*)$/
tags:
only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
only: /^v2(\.[0-9]+){2}(-.+|[^-.]*)$/
- prometheus/publish_main:
context: org-context
requires:
@ -177,7 +177,7 @@ workflows:
- build_all
filters:
tags:
only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
only: /^v2(\.[0-9]+){2}(-.+|[^-.]*)$/
branches:
ignore: /.*/
image: circleci/golang:1-node

40
.github/workflows/codeql-analysis.yml vendored Normal file
View file

@ -0,0 +1,40 @@
---
name: "CodeQL"
on:
push:
branches: [main, release-*]
pull_request:
branches: [main]
schedule:
- cron: "26 14 * * 1"
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
security-events: write
strategy:
fail-fast: false
matrix:
language: ["go", "javascript"]
steps:
- name: Checkout repository
uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: '>=1.17 <1.18'
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@v2
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

View file

@ -1,3 +1,29 @@
## 2.35.0-rc1 / 2022-04-14
* [ENHANCEMENT] Update package `uber.go/auomaxprocs` to support `cgroups2` #10584
* [BUGFIX] Tracing/GRPC: Set TLS credentials only when insecure is false. #10592
* [BUGFIX] Agent: Fix ID collision when loading a WAL with multiple segments. #10587
* [BUGFIX] Revoke storing target and metadata cache in context. This can fix a memory link introduced in `2.35.0-rc0` #10590
## 2.35.0-rc0 / 2022-04-08
* [CHANGE] TSDB: Delete `*.tmp` WAL files when Prometheus starts. #10317
* [CHANGE] promtool: Add new flag `--lint` (enabled by default) for the commands `check rules` and `check config`, resulting in a new exit code (`3`) for linter errors. #10435
* [FEATURE] Support for automatically setting the variable `GOMAXPROCS` to the container CPU limit. Enable with the flag `--enable-feature=auto-gomaxprocs` #10498
* [FEATURE] PromQL: Extend statistics with total and peak number of samples in a query. Additionally, per-step statistics are available with --enable-feature=promql-per-step-stats and using `stats=all` in the query API.
Enable with the flag `--enable-feature=per-step-stats` #10369
* [ENHANCEMENT] Prometheus is built with Go 1.18. #10501
* [ENHANCEMENT] TSDB: more efficient sorting of postings read from WAL at startup. #10500
* [ENHANCEMENT] Azure SD: Add metric to track Azure SD failures #10476
* [ENHANCEMENT] Azure SD: Add an optional `resource_group` configuration. #10365
* [ENHANCEMENT] Kubernetes SD: Support `discovery.k8s.io/v1` `EndpointSlice` (previously only `discovery.k8s.io/v1beta1` `EndpointSlice` was supported). #9570
* [ENHANCEMENT] Kubernetes SD: Allow attaching node metadata to discovered pods. #10080
* [ENHANCEMENT] OAuth2: Support for using a proxy URL to fetch OAuth2 tokens. #10492
* [ENHANCEMENT] Configuration: Add the ability to disable HTTP2. #10492
* [BUGFIX] Kubernetes SD: Explicitly include gcp auth from k8s.io. #10516
* [BUGFIX] Fix OpenMetrics parser to sort uppercase labels correctly. #10510
* [BUGFIX] UI: Fix scrape interval and duration tooltip not showing on target page. #10545
## 2.34.0 / 2022-03-15
* [CHANGE] UI: Classic UI removed. #10208

View file

@ -68,7 +68,7 @@ assets-compress: assets
scripts/compress_assets.sh
.PHONY: assets-tarball
assets-tarball: assets-compress
assets-tarball: assets
@echo '>> packaging assets'
scripts/package_assets.sh

View file

@ -122,6 +122,37 @@ You can build a docker image locally with the following commands:
*NB* if you are on a Mac, you will need [gnu-tar](https://formulae.brew.sh/formula/gnu-tar).
## Using Prometheus as a Go Library
### Remote Write
We are publishing our Remote Write protobuf independently at
[buf.build](https://buf.build/prometheus/prometheus/assets).
You can use that as a library:
```shell
$ go get go.buf.build/protocolbuffers/go/prometheus/prometheus
```
This is experimental.
### Prometheus code base
In order to comply with [go mod](https://go.dev/ref/mod#versions) rules,
Prometheus release number do not exactly match Go module releases. For the
Prometheus v2.y.z releases, we are publishing equivalent v0.y.z tags.
Therefore, a user that would want to use Prometheus v2.35.0 as a library could do:
```
$ go get github.com/prometheus/prometheus@v0.35.0
```
This solution makes it clear that we might break our internal Go APIs between
minor user-facing releases, as [breaking changes are allowed in major version
zero](https://semver.org/#spec-item-4).
## React UI Development
For more information on building, running, and developing on the new React-based UI, see the React app's [README.md](web/ui/README.md).

View file

@ -147,6 +147,18 @@ $ git tag -s "${tag}" -m "${tag}"
$ git push origin "${tag}"
```
Go modules versioning requires strict use of semver. Because we do not commit to
avoid code-level breaking changes for the libraries between minor releases of
the Prometheus server, we use major version zero releases for the libraries.
Tag the new library release via the following commands:
```bash
$ tag="v$(sed s/2/0/ < VERSION)"
$ git tag -s "${tag}" -m "${tag}"
$ git push origin "${tag}"
```
Optionally, you can use this handy `.gitconfig` alias.
```ini

View file

@ -1 +1 @@
2.34.0
2.35.0-rc1

68
go.mod
View file

@ -63,7 +63,7 @@ require (
go.opentelemetry.io/otel/sdk v1.6.1
go.opentelemetry.io/otel/trace v1.6.1
go.uber.org/atomic v1.9.0
go.uber.org/automaxprocs v1.4.0
go.uber.org/automaxprocs v1.5.1
go.uber.org/goleak v1.1.12
golang.org/x/net v0.0.0-20220325170049-de3da57026de
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a
@ -124,3 +124,69 @@ exclude (
k8s.io/client-go v9.0.0+incompatible
k8s.io/client-go v9.0.0-invalid+incompatible
)
retract (
v2.5.0+incompatible
v2.5.0-rc.2+incompatible
v2.5.0-rc.1+incompatible
v2.5.0-rc.0+incompatible
v2.4.3+incompatible
v2.4.2+incompatible
v2.4.1+incompatible
v2.4.0+incompatible
v2.4.0-rc.0+incompatible
v2.3.2+incompatible
v2.3.1+incompatible
v2.3.0+incompatible
v2.2.1+incompatible
v2.2.0+incompatible
v2.2.0-rc.1+incompatible
v2.2.0-rc.0+incompatible
v2.1.0+incompatible
v2.0.0+incompatible
v2.0.0-rc.3+incompatible
v2.0.0-rc.2+incompatible
v2.0.0-rc.1+incompatible
v2.0.0-rc.0+incompatible
v2.0.0-beta.5+incompatible
v2.0.0-beta.4+incompatible
v2.0.0-beta.3+incompatible
v2.0.0-beta.2+incompatible
v2.0.0-beta.1+incompatible
v2.0.0-beta.0+incompatible
v2.0.0-alpha.3+incompatible
v2.0.0-alpha.2+incompatible
v2.0.0-alpha.1+incompatible
v2.0.0-alpha.0+incompatible
v1.8.2
v1.8.1
v1.8.0
v1.7.2
v1.7.1
v1.7.0
v1.6.3
v1.6.2
v1.6.1
v1.6.0
v1.5.3
v1.5.2
v1.5.1
v1.5.0
v1.4.1
v1.4.0
v1.3.1
v1.3.0
v1.3.0-beta.0
v1.2.3
v1.2.2
v1.2.1
v1.2.0
v1.1.3
v1.1.2
v1.1.1
v1.1.0
v1.0.2
v1.0.1
v1.0.0
v1.0.0-rc.0
)

6
go.sum
View file

@ -1026,6 +1026,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/prometheus/alertmanager v0.24.0 h1:HBWR3lk4uy3ys+naDZthDdV7yEsxpaNeZuUS+hJgrOw=
github.com/prometheus/alertmanager v0.24.0/go.mod h1:r6fy/D7FRuZh5YbnX6J3MBY0eI4Pb5yPYS7/bPSXXqI=
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
@ -1294,8 +1296,8 @@ go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0=
go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q=
go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk=
go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=

View file

@ -298,12 +298,8 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
}
opts.target.SetMetadataStore(cache)
// Store the cache in the context.
loopCtx := ContextWithMetricMetadataStore(ctx, cache)
loopCtx = ContextWithTarget(loopCtx, opts.target)
return newScrapeLoop(
loopCtx,
ctx,
opts.scraper,
log.With(logger, "target", opts.target),
buffers,
@ -619,7 +615,7 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
if limits.labelLimit > 0 {
nbLabels := len(lset)
if nbLabels > int(limits.labelLimit) {
return fmt.Errorf("label_limit exceeded (metric: %.50s, number of label: %d, limit: %d)", met, nbLabels, limits.labelLimit)
return fmt.Errorf("label_limit exceeded (metric: %.50s, number of labels: %d, limit: %d)", met, nbLabels, limits.labelLimit)
}
}
@ -631,14 +627,14 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
if limits.labelNameLengthLimit > 0 {
nameLength := len(l.Name)
if nameLength > int(limits.labelNameLengthLimit) {
return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label: %.50v, name length: %d, limit: %d)", met, l, nameLength, limits.labelNameLengthLimit)
return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label name: %.50s, length: %d, limit: %d)", met, l.Name, nameLength, limits.labelNameLengthLimit)
}
}
if limits.labelValueLengthLimit > 0 {
valueLength := len(l.Value)
if valueLength > int(limits.labelValueLengthLimit) {
return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label: %.50v, value length: %d, limit: %d)", met, l, valueLength, limits.labelValueLengthLimit)
return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label name: %.50s, value: %.50q, length: %d, limit: %d)", met, l.Name, l.Value, valueLength, limits.labelValueLengthLimit)
}
}
}
@ -1379,7 +1375,8 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int
// Call sl.append again with an empty scrape to trigger stale markers.
// If the target has since been recreated and scraped, the
// stale markers will be out of order and ignored.
app := sl.appender(sl.ctx)
// sl.context would have been cancelled, hence using sl.parentCtx.
app := sl.appender(sl.parentCtx)
var err error
defer func() {
if err != nil {
@ -1393,7 +1390,7 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int
}()
if _, _, _, err = sl.append(app, []byte{}, "", staleTime); err != nil {
app.Rollback()
app = sl.appender(sl.ctx)
app = sl.appender(sl.parentCtx)
level.Warn(sl.l).Log("msg", "Stale append failed", "err", err)
}
if err = sl.reportStale(app, staleTime); err != nil {
@ -1795,31 +1792,3 @@ func reusableCache(r, l *config.ScrapeConfig) bool {
}
return reflect.DeepEqual(zeroConfig(r), zeroConfig(l))
}
// CtxKey is a dedicated type for keys of context-embedded values propagated
// with the scrape context.
type ctxKey int
// Valid CtxKey values.
const (
ctxKeyMetadata ctxKey = iota + 1
ctxKeyTarget
)
func ContextWithMetricMetadataStore(ctx context.Context, s MetricMetadataStore) context.Context {
return context.WithValue(ctx, ctxKeyMetadata, s)
}
func MetricMetadataStoreFromContext(ctx context.Context) (MetricMetadataStore, bool) {
s, ok := ctx.Value(ctxKeyMetadata).(MetricMetadataStore)
return s, ok
}
func ContextWithTarget(ctx context.Context, t *Target) context.Context {
return context.WithValue(ctx, ctxKeyTarget, t)
}
func TargetFromContext(ctx context.Context) (*Target, bool) {
t, ok := ctx.Value(ctxKeyTarget).(*Target)
return t, ok
}

View file

@ -4,5 +4,7 @@
set -euo pipefail
version="$(< VERSION)"
mkdir -p .tarballs
cd web/ui
find static -type f -not -name '*.gz' -print0 | xargs -0 tar czf static.tar.gz
find static -type f -not -name '*.gz' -print0 | xargs -0 tar czf ../../.tarballs/prometheus-web-ui-${version}.tar.gz

View file

@ -183,6 +183,14 @@ func getClient(tracingCfg config.TracingConfig) (otlptrace.Client, error) {
opts := []otlptracegrpc.Option{otlptracegrpc.WithEndpoint(tracingCfg.Endpoint)}
if tracingCfg.Insecure {
opts = append(opts, otlptracegrpc.WithInsecure())
} else {
// Use of TLS Credentials forces the use of TLS. Therefore it can
// only be set when `insecure` is set to false.
tlsConf, err := config_util.NewTLSConfig(&tracingCfg.TLSConfig)
if err != nil {
return nil, err
}
opts = append(opts, otlptracegrpc.WithTLSCredentials(credentials.NewTLS(tlsConf)))
}
if tracingCfg.Compression != "" {
opts = append(opts, otlptracegrpc.WithCompressor(tracingCfg.Compression))
@ -194,12 +202,6 @@ func getClient(tracingCfg config.TracingConfig) (otlptrace.Client, error) {
opts = append(opts, otlptracegrpc.WithTimeout(time.Duration(tracingCfg.Timeout)))
}
tlsConf, err := config_util.NewTLSConfig(&tracingCfg.TLSConfig)
if err != nil {
return nil, err
}
opts = append(opts, otlptracegrpc.WithTLSCredentials(credentials.NewTLS(tlsConf)))
client = otlptracegrpc.NewClient(opts...)
case config.TracingClientHTTP:
opts := []otlptracehttp.Option{otlptracehttp.WithEndpoint(tracingCfg.Endpoint)}

View file

@ -392,7 +392,7 @@ func (db *DB) replayWAL() error {
func (db *DB) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef) (err error) {
var (
dec record.Decoder
lastRef chunks.HeadSeriesRef
lastRef = chunks.HeadSeriesRef(db.nextRef.Load())
decoded = make(chan interface{}, 10)
errCh = make(chan error, 1)

View file

@ -15,6 +15,7 @@ package agent
import (
"context"
"fmt"
"path/filepath"
"strconv"
"testing"
@ -24,6 +25,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/exemplar"
@ -409,6 +411,41 @@ func TestLockfile(t *testing.T) {
})
}
func Test_ExistingWAL_NextRef(t *testing.T) {
dbDir := t.TempDir()
rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil)
defer func() {
require.NoError(t, rs.Close())
}()
db, err := Open(log.NewNopLogger(), nil, rs, dbDir, DefaultOptions())
require.NoError(t, err)
seriesCount := 10
// Append <seriesCount> series
app := db.Appender(context.Background())
for i := 0; i < seriesCount; i++ {
lset := labels.Labels{
{Name: model.MetricNameLabel, Value: fmt.Sprintf("series_%d", i)},
}
_, err := app.Append(0, lset, 0, 100)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
// Truncate the WAL to force creation of a new segment.
require.NoError(t, db.truncate(0))
require.NoError(t, db.Close())
// Create a new storage and see what nextRef is initialized to.
db, err = Open(log.NewNopLogger(), nil, rs, dbDir, DefaultOptions())
require.NoError(t, err)
defer require.NoError(t, db.Close())
require.Equal(t, uint64(seriesCount), db.nextRef.Load(), "nextRef should be equal to the number of series written across the entire WAL")
}
func startTime() (int64, error) {
return time.Now().Unix() * 1000, nil
}