Merge branch 'openstack-loadbalancer-discovery' of github.com:paulojmdias/prometheus into openstack-loadbalancer-discovery

Signed-off-by: Paulo Dias <paulodias.gm@gmail.com>
This commit is contained in:
Paulo Dias 2025-01-02 14:43:46 +00:00
commit d40e99c2ec
No known key found for this signature in database
GPG key ID: CE1937300CB848AA
42 changed files with 855 additions and 368 deletions

View file

@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0
- uses: bufbuild/buf-setup-action@9672cee01808979ea1249f81d6d321217b9a10f6 # v1.47.2
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -13,7 +13,7 @@ jobs:
if: github.repository_owner == 'prometheus'
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0
- uses: bufbuild/buf-setup-action@9672cee01808979ea1249f81d6d321217b9a10f6 # v1.47.2
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -80,7 +80,7 @@ jobs:
runs-on: windows-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
- uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
with:
go-version: 1.23.x
- run: |
@ -171,7 +171,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Go
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
with:
cache: false
go-version: 1.23.x
@ -184,7 +184,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Go
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
with:
go-version: 1.23.x
- name: Install snmp_exporter/generator dependencies
@ -247,7 +247,7 @@ jobs:
with:
node-version-file: "web/ui/.nvmrc"
registry-url: "https://registry.npmjs.org"
- uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
- uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}

View file

@ -27,12 +27,12 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Initialize CodeQL
uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
uses: github/codeql-action/init@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
uses: github/codeql-action/autobuild@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
uses: github/codeql-action/analyze@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5

View file

@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # tag=v3.26.10
uses: github/codeql-action/upload-sarif@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # tag=v3.27.5
with:
sarif_file: results.sarif

View file

@ -1429,8 +1429,9 @@ var (
// OTLPConfig is the configuration for writing to the OTLP endpoint.
type OTLPConfig struct {
PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"`
TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"`
PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"`
TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"`
KeepIdentifyingResourceAttributes bool `yaml:"keep_identifying_resource_attributes,omitempty"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.

View file

@ -1554,6 +1554,20 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) {
})
}
func TestOTLPAllowServiceNameInTargetInfo(t *testing.T) {
t.Run("good config", func(t *testing.T) {
want, err := LoadFile(filepath.Join("testdata", "otlp_allow_keep_identifying_resource_attributes.good.yml"), false, promslog.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
var got Config
require.NoError(t, yaml.UnmarshalStrict(out, &got))
require.True(t, got.OTLPConfig.KeepIdentifyingResourceAttributes)
})
}
func TestOTLPAllowUTF8(t *testing.T) {
t.Run("good config", func(t *testing.T) {
fpath := filepath.Join("testdata", "otlp_allow_utf8.good.yml")

View file

@ -0,0 +1,2 @@
otlp:
keep_identifying_resource_attributes: true

View file

@ -97,6 +97,7 @@ func makeEndpoints() *v1.Endpoints {
}
func TestEndpointsDiscoveryBeforeRun(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{})
k8sDiscoveryTest{
@ -151,6 +152,7 @@ func TestEndpointsDiscoveryBeforeRun(t *testing.T) {
}
func TestEndpointsDiscoveryAdd(t *testing.T) {
t.Parallel()
obj := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
@ -276,6 +278,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) {
}
func TestEndpointsDiscoveryDelete(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints())
k8sDiscoveryTest{
@ -294,6 +297,7 @@ func TestEndpointsDiscoveryDelete(t *testing.T) {
}
func TestEndpointsDiscoveryUpdate(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints())
k8sDiscoveryTest{
@ -365,6 +369,7 @@ func TestEndpointsDiscoveryUpdate(t *testing.T) {
}
func TestEndpointsDiscoveryEmptySubsets(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints())
k8sDiscoveryTest{
@ -393,6 +398,7 @@ func TestEndpointsDiscoveryEmptySubsets(t *testing.T) {
}
func TestEndpointsDiscoveryWithService(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints())
k8sDiscoveryTest{
@ -458,6 +464,7 @@ func TestEndpointsDiscoveryWithService(t *testing.T) {
}
func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints())
k8sDiscoveryTest{
@ -538,6 +545,7 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) {
}
func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
t.Parallel()
metadataConfig := AttachMetadataConfig{Node: true}
nodeLabels1 := map[string]string{"az": "us-east1"}
nodeLabels2 := map[string]string{"az": "us-west2"}
@ -611,6 +619,7 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
}
func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
t.Parallel()
nodeLabels1 := map[string]string{"az": "us-east1"}
nodeLabels2 := map[string]string{"az": "us-west2"}
node1 := makeNode("foobar", "", "", nodeLabels1, nil)
@ -688,6 +697,7 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
}
func TestEndpointsDiscoveryNamespaces(t *testing.T) {
t.Parallel()
epOne := makeEndpoints()
epOne.Namespace = "ns1"
objs := []runtime.Object{
@ -839,6 +849,7 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) {
}
func TestEndpointsDiscoveryOwnNamespace(t *testing.T) {
t.Parallel()
epOne := makeEndpoints()
epOne.Namespace = "own-ns"
@ -933,6 +944,7 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) {
}
func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) {
t.Parallel()
ep := makeEndpoints()
ep.Namespace = "ns"
@ -978,6 +990,7 @@ func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) {
// TestEndpointsDiscoveryUpdatePod makes sure that Endpoints discovery detects underlying Pods changes.
// See https://github.com/prometheus/prometheus/issues/11305 for more details.
func TestEndpointsDiscoveryUpdatePod(t *testing.T) {
t.Parallel()
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
@ -1097,6 +1110,7 @@ func TestEndpointsDiscoveryUpdatePod(t *testing.T) {
}
func TestEndpointsDiscoverySidecarContainer(t *testing.T) {
t.Parallel()
objs := []runtime.Object{
&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{

View file

@ -21,6 +21,7 @@ import (
)
func Test_EndpointSliceAdaptor_v1(t *testing.T) {
t.Parallel()
endpointSlice := makeEndpointSliceV1()
adaptor := newEndpointSliceAdaptorFromV1(endpointSlice)

View file

@ -114,6 +114,7 @@ func makeEndpointSliceV1() *v1.EndpointSlice {
}
func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}})
k8sDiscoveryTest{
@ -195,6 +196,7 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
}
func TestEndpointSliceDiscoveryAdd(t *testing.T) {
t.Parallel()
obj := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
@ -322,6 +324,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
}
func TestEndpointSliceDiscoveryDelete(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
k8sDiscoveryTest{
@ -340,6 +343,7 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) {
}
func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
k8sDiscoveryTest{
@ -396,6 +400,7 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
}
func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
k8sDiscoveryTest{
@ -424,6 +429,7 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
}
func TestEndpointSliceDiscoveryWithService(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
k8sDiscoveryTest{
@ -516,6 +522,7 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) {
}
func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
k8sDiscoveryTest{
@ -623,6 +630,7 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
}
func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
t.Parallel()
metadataConfig := AttachMetadataConfig{Node: true}
nodeLabels1 := map[string]string{"az": "us-east1"}
nodeLabels2 := map[string]string{"az": "us-west2"}
@ -722,6 +730,7 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
}
func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
t.Parallel()
metadataConfig := AttachMetadataConfig{Node: true}
nodeLabels1 := map[string]string{"az": "us-east1"}
nodeLabels2 := map[string]string{"az": "us-west2"}
@ -827,6 +836,7 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
}
func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
t.Parallel()
epOne := makeEndpointSliceV1()
epOne.Namespace = "ns1"
objs := []runtime.Object{
@ -1003,6 +1013,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
}
func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
t.Parallel()
epOne := makeEndpointSliceV1()
epOne.Namespace = "own-ns"
@ -1123,6 +1134,7 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
}
func TestEndpointSliceDiscoveryEmptyPodStatus(t *testing.T) {
t.Parallel()
ep := makeEndpointSliceV1()
ep.Namespace = "ns"
@ -1169,6 +1181,7 @@ func TestEndpointSliceDiscoveryEmptyPodStatus(t *testing.T) {
// sets up indexing for the main Kube informer only when needed.
// See: https://github.com/prometheus/prometheus/pull/13554#discussion_r1490965817
func TestEndpointSliceInfIndexersCount(t *testing.T) {
t.Parallel()
tests := []struct {
name string
withNodeMetadata bool
@ -1179,6 +1192,7 @@ func TestEndpointSliceInfIndexersCount(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
var (
n *Discovery
mainInfIndexersCount int
@ -1204,6 +1218,7 @@ func TestEndpointSliceInfIndexersCount(t *testing.T) {
}
func TestEndpointSliceDiscoverySidecarContainer(t *testing.T) {
t.Parallel()
objs := []runtime.Object{
&v1.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{

View file

@ -144,6 +144,7 @@ func expectedTargetGroups(ns string, tls TLSMode) map[string]*targetgroup.Group
}
func TestIngressDiscoveryAdd(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}})
k8sDiscoveryTest{
@ -158,6 +159,7 @@ func TestIngressDiscoveryAdd(t *testing.T) {
}
func TestIngressDiscoveryAddTLS(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}})
k8sDiscoveryTest{
@ -172,6 +174,7 @@ func TestIngressDiscoveryAddTLS(t *testing.T) {
}
func TestIngressDiscoveryAddMixed(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}})
k8sDiscoveryTest{
@ -186,6 +189,7 @@ func TestIngressDiscoveryAddMixed(t *testing.T) {
}
func TestIngressDiscoveryNamespaces(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}})
expected := expectedTargetGroups("ns1", TLSNo)
@ -207,6 +211,7 @@ func TestIngressDiscoveryNamespaces(t *testing.T) {
}
func TestIngressDiscoveryOwnNamespace(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{IncludeOwnNamespace: true})
expected := expectedTargetGroups("own-ns", TLSNo)

View file

@ -273,6 +273,7 @@ func (s *Service) hasSynced() bool {
}
func TestRetryOnError(t *testing.T) {
t.Parallel()
for _, successAt := range []int{1, 2, 3} {
var called int
f := func() error {
@ -288,6 +289,7 @@ func TestRetryOnError(t *testing.T) {
}
func TestFailuresCountMetric(t *testing.T) {
t.Parallel()
tests := []struct {
role Role
minFailedWatches int
@ -324,6 +326,7 @@ func TestFailuresCountMetric(t *testing.T) {
}
func TestNodeName(t *testing.T) {
t.Parallel()
node := &apiv1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",

View file

@ -56,6 +56,7 @@ func makeEnumeratedNode(i int) *v1.Node {
}
func TestNodeDiscoveryBeforeStart(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleNode, NamespaceDiscovery{})
k8sDiscoveryTest{
@ -95,6 +96,7 @@ func TestNodeDiscoveryBeforeStart(t *testing.T) {
}
func TestNodeDiscoveryAdd(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleNode, NamespaceDiscovery{})
k8sDiscoveryTest{
@ -124,6 +126,7 @@ func TestNodeDiscoveryAdd(t *testing.T) {
}
func TestNodeDiscoveryDelete(t *testing.T) {
t.Parallel()
obj := makeEnumeratedNode(0)
n, c := makeDiscovery(RoleNode, NamespaceDiscovery{}, obj)
@ -142,6 +145,7 @@ func TestNodeDiscoveryDelete(t *testing.T) {
}
func TestNodeDiscoveryUpdate(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleNode, NamespaceDiscovery{})
k8sDiscoveryTest{

View file

@ -239,6 +239,7 @@ func expectedPodTargetGroupsWithNodeMeta(ns, nodeName string, nodeLabels map[str
}
func TestPodDiscoveryBeforeRun(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RolePod, NamespaceDiscovery{})
k8sDiscoveryTest{
@ -302,6 +303,7 @@ func TestPodDiscoveryBeforeRun(t *testing.T) {
}
func TestPodDiscoveryInitContainer(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RolePod, NamespaceDiscovery{})
ns := "default"
@ -329,6 +331,7 @@ func TestPodDiscoveryInitContainer(t *testing.T) {
}
func TestPodDiscoveryAdd(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RolePod, NamespaceDiscovery{})
k8sDiscoveryTest{
@ -343,6 +346,7 @@ func TestPodDiscoveryAdd(t *testing.T) {
}
func TestPodDiscoveryDelete(t *testing.T) {
t.Parallel()
obj := makePods()
n, c := makeDiscovery(RolePod, NamespaceDiscovery{}, obj)
@ -362,6 +366,7 @@ func TestPodDiscoveryDelete(t *testing.T) {
}
func TestPodDiscoveryUpdate(t *testing.T) {
t.Parallel()
obj := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
@ -403,6 +408,7 @@ func TestPodDiscoveryUpdate(t *testing.T) {
}
func TestPodDiscoveryUpdateEmptyPodIP(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RolePod, NamespaceDiscovery{})
initialPod := makePods()
@ -427,6 +433,7 @@ func TestPodDiscoveryUpdateEmptyPodIP(t *testing.T) {
}
func TestPodDiscoveryNamespaces(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RolePod, NamespaceDiscovery{Names: []string{"ns1", "ns2"}})
expected := expectedPodTargetGroups("ns1")
@ -448,6 +455,7 @@ func TestPodDiscoveryNamespaces(t *testing.T) {
}
func TestPodDiscoveryOwnNamespace(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RolePod, NamespaceDiscovery{IncludeOwnNamespace: true})
expected := expectedPodTargetGroups("own-ns")
@ -466,6 +474,7 @@ func TestPodDiscoveryOwnNamespace(t *testing.T) {
}
func TestPodDiscoveryWithNodeMetadata(t *testing.T) {
t.Parallel()
attachMetadata := AttachMetadataConfig{Node: true}
n, c := makeDiscoveryWithMetadata(RolePod, NamespaceDiscovery{}, attachMetadata)
nodeLbls := map[string]string{"l1": "v1"}
@ -485,6 +494,7 @@ func TestPodDiscoveryWithNodeMetadata(t *testing.T) {
}
func TestPodDiscoveryWithNodeMetadataUpdateNode(t *testing.T) {
t.Parallel()
nodeLbls := map[string]string{"l2": "v2"}
attachMetadata := AttachMetadataConfig{Node: true}
n, c := makeDiscoveryWithMetadata(RolePod, NamespaceDiscovery{}, attachMetadata)

View file

@ -118,6 +118,7 @@ func makeLoadBalancerService() *v1.Service {
}
func TestServiceDiscoveryAdd(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleService, NamespaceDiscovery{})
k8sDiscoveryTest{
@ -189,6 +190,7 @@ func TestServiceDiscoveryAdd(t *testing.T) {
}
func TestServiceDiscoveryDelete(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleService, NamespaceDiscovery{}, makeService())
k8sDiscoveryTest{
@ -207,6 +209,7 @@ func TestServiceDiscoveryDelete(t *testing.T) {
}
func TestServiceDiscoveryUpdate(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleService, NamespaceDiscovery{}, makeService())
k8sDiscoveryTest{
@ -251,6 +254,7 @@ func TestServiceDiscoveryUpdate(t *testing.T) {
}
func TestServiceDiscoveryNamespaces(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleService, NamespaceDiscovery{Names: []string{"ns1", "ns2"}})
k8sDiscoveryTest{
@ -303,6 +307,7 @@ func TestServiceDiscoveryNamespaces(t *testing.T) {
}
func TestServiceDiscoveryOwnNamespace(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleService, NamespaceDiscovery{IncludeOwnNamespace: true})
k8sDiscoveryTest{
@ -338,6 +343,7 @@ func TestServiceDiscoveryOwnNamespace(t *testing.T) {
}
func TestServiceDiscoveryAllNamespaces(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(RoleService, NamespaceDiscovery{})
k8sDiscoveryTest{

View file

@ -127,19 +127,37 @@ func (m *SDMock) HandleServiceHashiCupsGet() {
}
func TestConfiguredService(t *testing.T) {
conf := &SDConfig{
Server: "http://localhost:4646",
testCases := []struct {
name string
server string
acceptedURL bool
}{
{"invalid hostname URL", "http://foo.bar:4646", true},
{"invalid even though accepted by parsing", "foo.bar:4646", true},
{"valid address URL", "http://172.30.29.23:4646", true},
{"invalid URL", "172.30.29.23:4646", false},
}
reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg)
metrics := conf.NewDiscovererMetrics(reg, refreshMetrics)
require.NoError(t, metrics.Register())
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
conf := &SDConfig{
Server: tc.server,
}
_, err := NewDiscovery(conf, nil, metrics)
require.NoError(t, err)
reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg)
metrics := conf.NewDiscovererMetrics(reg, refreshMetrics)
require.NoError(t, metrics.Register())
defer metrics.Unregister()
metrics.Unregister()
_, err := NewDiscovery(conf, nil, metrics)
if tc.acceptedURL {
require.NoError(t, err)
} else {
require.Error(t, err)
}
})
}
}
func TestNomadSDRefresh(t *testing.T) {

View file

@ -182,6 +182,10 @@ otlp:
# It preserves all special characters like dots, but it still add required suffixes
# for units and _total like in UnderscoreEscapingWithSuffixes.
[ translation_strategy: <string> | default = "UnderscoreEscapingWithSuffixes" ]
# Enables adding "service.name", "service.namespace" and "service.instance.id"
# resource attributes to the "target_info" metric, on top of converting
# them into the "instance" and "job" labels.
[ keep_identifying_resource_attributes: <boolean> | default = false]
# Settings related to the remote read feature.
remote_read:
@ -2146,7 +2150,8 @@ The following meta labels are available on targets during [relabeling](#relabel_
[ namespace: <string> | default = default ]
[ refresh_interval: <duration> | default = 60s ]
[ region: <string> | default = global ]
[ server: <host> ]
# The URL to connect to the API.
[ server: <string> ]
[ tag_separator: <string> | default = ,]
# HTTP client settings, including authentication methods (such as basic auth and

View file

@ -262,6 +262,9 @@ process ID.
## Shutting down your instance gracefully.
While Prometheus does have recovery mechanisms in the case that there is an
abrupt process failure it is recommend to use the `SIGTERM` signal to cleanly
shutdown a Prometheus instance. If you're running on Linux this can be performed
by using `kill -s SIGTERM <PID>`, replacing `<PID>` with your Prometheus process ID.
abrupt process failure it is recommended to use signals or interrupts for a
clean shutdown of a Prometheus instance. On Linux, this can be done by sending
the `SIGTERM` or `SIGINT` signals to the Prometheus process. For example, you
can use `kill -s <SIGNAL> <PID>`, replacing `<SIGNAL>` with the signal name
and `<PID>` with the Prometheus process ID. Alternatively, you can press the
interrupt character at the controlling terminal, which by default is `^C` (Control-C).

View file

@ -115,13 +115,12 @@ time series you scrape (fewer targets or fewer series per target), or you
can increase the scrape interval. However, reducing the number of series is
likely more effective, due to compression of samples within a series.
If your local storage becomes corrupted for whatever reason, the best
strategy to address the problem is to shut down Prometheus then remove the
entire storage directory. You can also try removing individual block directories,
or the WAL directory to resolve the problem. Note that this means losing
approximately two hours data per block directory. Again, Prometheus's local
storage is not intended to be durable long-term storage; external solutions
offer extended retention and data durability.
If your local storage becomes corrupted to the point where Prometheus will not
start it is recommended to backup the storage directory and restore the
corrupted block directories from your backups. If you do not have backups the
last resort is to remove the corrupted files. For example you can try removing
individual block directories or the write-ahead-log (wal) files. Note that this
means losing the data for the time range those blocks or wal covers.
CAUTION: Non-POSIX compliant filesystems are not supported for Prometheus'
local storage as unrecoverable corruptions may happen. NFS filesystems

View file

@ -6,11 +6,11 @@ require (
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.11.7
github.com/influxdata/influxdb v1.11.8
github.com/prometheus/client_golang v1.20.5
github.com/prometheus/common v0.60.1
github.com/prometheus/prometheus v0.53.1
github.com/stretchr/testify v1.9.0
github.com/stretchr/testify v1.10.0
)
require (

View file

@ -166,8 +166,8 @@ github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MI
github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/influxdata/influxdb v1.11.7 h1:C31A+S9YfjTCOuAv9Qs0ZdQufslOZZBtejjxiV8QNQw=
github.com/influxdata/influxdb v1.11.7/go.mod h1:zRTAuk/Ie/V1LGxJUv8jfDmfv+ypz22lxfhc1MxC3rI=
github.com/influxdata/influxdb v1.11.8 h1:lX8MJDfk91O7nqzzonQkjk87gOeQy9V/Xp3gpELhG1s=
github.com/influxdata/influxdb v1.11.8/go.mod h1:zRTAuk/Ie/V1LGxJUv8jfDmfv+ypz22lxfhc1MxC3rI=
github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8=
github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
@ -293,8 +293,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs=
github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI=
github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=

64
go.mod
View file

@ -17,7 +17,7 @@ require (
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
github.com/cespare/xxhash/v2 v2.3.0
github.com/dennwc/varint v1.0.0
github.com/digitalocean/godo v1.128.0
github.com/digitalocean/godo v1.131.0
github.com/docker/docker v27.3.1+incompatible
github.com/edsrzf/mmap-go v1.2.0
github.com/envoyproxy/go-control-plane v0.13.1
@ -36,12 +36,12 @@ require (
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.30.0
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3
github.com/hetznercloud/hcloud-go/v2 v2.15.0
github.com/hetznercloud/hcloud-go/v2 v2.17.0
github.com/ionos-cloud/sdk-go/v6 v6.2.1
github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.11
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
github.com/linode/linodego v1.42.0
github.com/linode/linodego v1.43.0
github.com/miekg/dns v1.1.62
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
@ -58,36 +58,36 @@ require (
github.com/prometheus/sigv4 v0.1.0
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.9.0
github.com/stretchr/testify v1.10.0
github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/collector/pdata v1.18.0
go.opentelemetry.io/collector/semconv v0.112.0
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0
go.opentelemetry.io/otel v1.31.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0
go.opentelemetry.io/otel/sdk v1.31.0
go.opentelemetry.io/otel/trace v1.31.0
go.opentelemetry.io/collector/pdata v1.20.0
go.opentelemetry.io/collector/semconv v0.114.0
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0
go.opentelemetry.io/otel v1.32.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0
go.opentelemetry.io/otel/sdk v1.32.0
go.opentelemetry.io/otel/trace v1.32.0
go.uber.org/atomic v1.11.0
go.uber.org/automaxprocs v1.6.0
go.uber.org/goleak v1.3.0
go.uber.org/multierr v1.11.0
golang.org/x/oauth2 v0.23.0
golang.org/x/sync v0.8.0
golang.org/x/sys v0.26.0
golang.org/x/text v0.19.0
golang.org/x/tools v0.26.0
golang.org/x/oauth2 v0.24.0
golang.org/x/sync v0.9.0
golang.org/x/sys v0.27.0
golang.org/x/text v0.20.0
golang.org/x/tools v0.27.0
google.golang.org/api v0.204.0
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53
google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28
google.golang.org/grpc v1.67.1
google.golang.org/protobuf v1.35.1
google.golang.org/protobuf v1.35.2
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.31.2
k8s.io/apimachinery v0.31.2
k8s.io/client-go v0.31.2
k8s.io/api v0.31.3
k8s.io/apimachinery v0.31.3
k8s.io/client-go v0.31.3
k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.130.1
)
@ -127,7 +127,7 @@ require (
github.com/go-openapi/spec v0.20.14 // indirect
github.com/go-openapi/swag v0.22.9 // indirect
github.com/go-openapi/validate v0.23.0 // indirect
github.com/go-resty/resty/v2 v2.13.1 // indirect
github.com/go-resty/resty/v2 v2.15.3 // indirect
github.com/godbus/dbus/v5 v5.0.4 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/golang/glog v1.2.2 // indirect
@ -140,7 +140,7 @@ require (
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect
github.com/hashicorp/cronexpr v1.1.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
@ -185,15 +185,15 @@ require (
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.mongodb.org/mongo-driver v1.14.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/metric v1.31.0 // indirect
go.opentelemetry.io/otel/metric v1.32.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
golang.org/x/crypto v0.28.0 // indirect
golang.org/x/crypto v0.29.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/mod v0.21.0 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/term v0.25.0 // indirect
golang.org/x/mod v0.22.0 // indirect
golang.org/x/net v0.31.0 // indirect
golang.org/x/term v0.26.0 // indirect
golang.org/x/time v0.7.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect

159
go.sum
View file

@ -91,8 +91,8 @@ github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/digitalocean/godo v1.128.0 h1:cGn/ibMSRZ9+8etbzMv2MnnCEPTTGlEnx3HHTPwdk1U=
github.com/digitalocean/godo v1.128.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc=
github.com/digitalocean/godo v1.131.0 h1:0WHymufAV5avpodT0h5/pucUVfO4v7biquOIqhLeROY=
github.com/digitalocean/godo v1.131.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
@ -160,8 +160,8 @@ github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZC
github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw=
github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE=
github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g=
github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0=
github.com/go-resty/resty/v2 v2.15.3 h1:bqff+hcqAflpiF591hhJzNdkRsFhlB96CYfBwSFvql8=
github.com/go-resty/resty/v2 v2.15.3/go.mod h1:0fHAoK7JoBy/Ch36N8VFeMsK7xQOHhvWaC3iOktwmIU=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
@ -235,8 +235,8 @@ github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrR
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0=
github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ=
github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM=
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
@ -287,8 +287,8 @@ github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1av
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.15.0 h1:6mpMJ/RuX1woZj+MCJdyKNEX9129KDkEIDeeyfr4GD4=
github.com/hetznercloud/hcloud-go/v2 v2.15.0/go.mod h1:h8sHav+27Xa+48cVMAvAUMELov5h298Ilg2vflyTHgg=
github.com/hetznercloud/hcloud-go/v2 v2.17.0 h1:ge0w2piey9SV6XGyU/wQ6HBR24QyMbJ3wLzezplqR68=
github.com/hetznercloud/hcloud-go/v2 v2.17.0/go.mod h1:zfyZ4Orx+mPpYDzWAxXR7DHGL50nnlZ5Edzgs1o6f/s=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY=
@ -329,8 +329,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/linode/linodego v1.42.0 h1:ZSbi4MtvwrfB9Y6bknesorvvueBGGilcmh2D5dq76RM=
github.com/linode/linodego v1.42.0/go.mod h1:2yzmY6pegPBDgx2HDllmt0eIk2IlzqcgK6NR0wFCFRY=
github.com/linode/linodego v1.43.0 h1:sGeBB3caZt7vKBoPS5p4AVzmlG4JoqQOdigIibx3egk=
github.com/linode/linodego v1.43.0/go.mod h1:n4TMFu1UVNala+icHqrTEFFaicYSF74cSAUG5zkTwfA=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
@ -487,8 +487,9 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs=
github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI=
@ -498,33 +499,32 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/pdata v1.18.0 h1:/yg2rO2dxqDM2p6GutsMCxXN6sKlXwyIz/ZYyUPONBg=
go.opentelemetry.io/collector/pdata v1.18.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
go.opentelemetry.io/collector/semconv v0.112.0 h1:JPQyvZhlNLVSuVI+FScONaiFygB7h7NTZceUEKIQUEc=
go.opentelemetry.io/collector/semconv v0.112.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM=
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4=
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.opentelemetry.io/collector/pdata v1.20.0 h1:ePcwt4bdtISP0loHaE+C9xYoU2ZkIvWv89Fob16o9SM=
go.opentelemetry.io/collector/pdata v1.20.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
go.opentelemetry.io/collector/semconv v0.114.0 h1:/eKcCJwZepQUtEuFuxa0thx2XIOvhFpaf214ZG1a11k=
go.opentelemetry.io/collector/semconv v0.114.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0 h1:7F3XCD6WYzDkwbi8I8N+oYJWquPVScnRosKGgqjsR8c=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0/go.mod h1:Dk3C0BfIlZDZ5c6eVS7TYiH2vssuyUU3vUsgbrR+5V4=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94=
go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U=
go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI=
go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M=
go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8=
go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4=
go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU=
go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM=
go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
@ -540,12 +540,9 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ=
golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
@ -554,10 +551,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -575,17 +570,12 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo=
golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -593,10 +583,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -622,39 +610,22 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU=
golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -667,10 +638,8 @@ golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o=
golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -683,10 +652,10 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U=
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g=
google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
@ -704,8 +673,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -733,12 +702,12 @@ gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0=
k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk=
k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw=
k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc=
k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs=
k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8=
k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE=
k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4=
k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4=
k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=

View file

@ -1400,27 +1400,41 @@ func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe
floats := vals[0].(Matrix)[0].Floats
histograms := vals[0].(Matrix)[0].Histograms
resets := 0
if len(floats) > 1 {
prev := floats[0].F
for _, sample := range floats[1:] {
current := sample.F
if current < prev {
resets++
}
prev = current
}
if len(floats) == 0 && len(histograms) == 0 {
return enh.Out, nil
}
if len(histograms) > 1 {
prev := histograms[0].H
for _, sample := range histograms[1:] {
current := sample.H
if current.DetectReset(prev) {
var prevSample, curSample Sample
for iFloat, iHistogram := 0, 0; iFloat < len(floats) || iHistogram < len(histograms); {
switch {
// Process a float sample if no histogram sample remains or its timestamp is earlier.
// Process a histogram sample if no float sample remains or its timestamp is earlier.
case iHistogram >= len(histograms) || iFloat < len(floats) && floats[iFloat].T < histograms[iHistogram].T:
curSample.F = floats[iFloat].F
curSample.H = nil
iFloat++
case iFloat >= len(floats) || iHistogram < len(histograms) && floats[iFloat].T > histograms[iHistogram].T:
curSample.H = histograms[iHistogram].H
iHistogram++
}
// Skip the comparison for the first sample, just initialize prevSample.
if iFloat+iHistogram == 1 {
prevSample = curSample
continue
}
switch {
case prevSample.H == nil && curSample.H == nil:
if curSample.F < prevSample.F {
resets++
}
case prevSample.H != nil && curSample.H == nil, prevSample.H == nil && curSample.H != nil:
resets++
case prevSample.H != nil && curSample.H != nil:
if curSample.H.DetectReset(prevSample.H) {
resets++
}
prev = current
}
prevSample = curSample
}
return append(enh.Out, Sample{F: float64(resets)}), nil
@ -1441,16 +1455,12 @@ func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelp
// Process a float sample if no histogram sample remains or its timestamp is earlier.
// Process a histogram sample if no float sample remains or its timestamp is earlier.
case iHistogram >= len(histograms) || iFloat < len(floats) && floats[iFloat].T < histograms[iHistogram].T:
{
curSample.F = floats[iFloat].F
curSample.H = nil
iFloat++
}
curSample.F = floats[iFloat].F
curSample.H = nil
iFloat++
case iFloat >= len(floats) || iHistogram < len(histograms) && floats[iFloat].T > histograms[iHistogram].T:
{
curSample.H = histograms[iHistogram].H
iHistogram++
}
curSample.H = histograms[iHistogram].H
iHistogram++
}
// Skip the comparison for the first sample, just initialize prevSample.
if iFloat+iHistogram == 1 {
@ -1459,20 +1469,14 @@ func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelp
}
switch {
case prevSample.H == nil && curSample.H == nil:
{
if curSample.F != prevSample.F && !(math.IsNaN(curSample.F) && math.IsNaN(prevSample.F)) {
changes++
}
}
case prevSample.H != nil && curSample.H == nil, prevSample.H == nil && curSample.H != nil:
{
if curSample.F != prevSample.F && !(math.IsNaN(curSample.F) && math.IsNaN(prevSample.F)) {
changes++
}
case prevSample.H != nil && curSample.H == nil, prevSample.H == nil && curSample.H != nil:
changes++
case prevSample.H != nil && curSample.H != nil:
{
if !curSample.H.Equals(prevSample.H) {
changes++
}
if !curSample.H.Equals(prevSample.H) {
changes++
}
}
prevSample = curSample

View file

@ -66,7 +66,7 @@ var testStartTime = time.Unix(0, 0).UTC()
// LoadedStorage returns storage with generated data using the provided load statements.
// Non-load statements will cause test errors.
func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage {
test, err := newTest(t, input)
test, err := newTest(t, input, false)
require.NoError(t, err)
for _, cmd := range test.cmds {
@ -125,11 +125,17 @@ func RunBuiltinTests(t TBRun, engine promql.QueryEngine) {
// RunTest parses and runs the test against the provided engine.
func RunTest(t testutil.T, input string, engine promql.QueryEngine) {
require.NoError(t, runTest(t, input, engine))
require.NoError(t, runTest(t, input, engine, false))
}
func runTest(t testutil.T, input string, engine promql.QueryEngine) error {
test, err := newTest(t, input)
// testTest allows tests to be run in "test-the-test" mode (true for
// testingMode). This is a special mode for testing test code execution itself.
func testTest(t testutil.T, input string, engine promql.QueryEngine) error {
return runTest(t, input, engine, true)
}
func runTest(t testutil.T, input string, engine promql.QueryEngine, testingMode bool) error {
test, err := newTest(t, input, testingMode)
// Why do this before checking err? newTest() can create the test storage and then return an error,
// and we want to make sure to clean that up to avoid leaking goroutines.
@ -164,6 +170,8 @@ func runTest(t testutil.T, input string, engine promql.QueryEngine) error {
// against a test storage.
type test struct {
testutil.T
// testingMode distinguishes between normal execution and test-execution mode.
testingMode bool
cmds []testCommand
@ -174,10 +182,11 @@ type test struct {
}
// newTest returns an initialized empty Test.
func newTest(t testutil.T, input string) (*test, error) {
func newTest(t testutil.T, input string, testingMode bool) (*test, error) {
test := &test{
T: t,
cmds: []testCommand{},
T: t,
cmds: []testCommand{},
testingMode: testingMode,
}
err := test.parse(input)
test.clear()
@ -1078,11 +1087,25 @@ func (t *test) exec(tc testCommand, engine promql.QueryEngine) error {
}
func (t *test) execEval(cmd *evalCmd, engine promql.QueryEngine) error {
if cmd.isRange {
return t.execRangeEval(cmd, engine)
do := func() error {
if cmd.isRange {
return t.execRangeEval(cmd, engine)
}
return t.execInstantEval(cmd, engine)
}
return t.execInstantEval(cmd, engine)
if t.testingMode {
return do()
}
if tt, ok := t.T.(*testing.T); ok {
tt.Run(fmt.Sprintf("line %d/%s", cmd.line, cmd.expr), func(t *testing.T) {
require.NoError(t, do())
})
return nil
}
return errors.New("t.T is not testing.T")
}
func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {

View file

@ -595,7 +595,7 @@ eval range from 0 to 5m step 5m testmetric
for name, testCase := range testCases {
t.Run(name, func(t *testing.T) {
err := runTest(t, testCase.input, NewTestEngine(t, false, 0, DefaultMaxSamplesPerQuery))
err := testTest(t, testCase.input, NewTestEngine(t, false, 0, DefaultMaxSamplesPerQuery))
if testCase.expectedError == "" {
require.NoError(t, err)

View file

@ -5,7 +5,7 @@ load 5m
http_requests{path="/biz"} 0 0 0 0 0 1 1 1 1 1
http_requests_histogram{path="/foo"} {{schema:0 sum:1 count:1}}x9
http_requests_histogram{path="/bar"} 0 0 0 0 0 0 0 0 {{schema:0 sum:1 count:1}} {{schema:0 sum:1 count:1}}
http_requests_histogram{path="/biz"} 0 1 0 2 0 3 0 {{schema:0 sum:1 count:1}} {{schema:0 sum:2 count:2}} {{schema:0 sum:1 count:1}}
http_requests_histogram{path="/biz"} 0 1 0 2 0 3 0 {{schema:0 sum:1 count:1 z_bucket_w:0.001 z_bucket:2}} {{schema:0 sum:2 count:2 z_bucket_w:0.001 z_bucket:1}} {{schema:0 sum:1 count:1 z_bucket_w:0.001 z_bucket:2}}
# Tests for resets().
eval instant at 50m resets(http_requests[5m])
@ -42,6 +42,18 @@ eval instant at 50m resets(http_requests[50m])
eval instant at 50m resets(nonexistent_metric[50m])
# Test for mix of floats and histograms.
eval instant at 50m resets(http_requests_histogram[6m])
{path="/foo"} 0
{path="/bar"} 0
{path="/biz"} 0
eval instant at 50m resets(http_requests_histogram[60m])
{path="/foo"} 0
{path="/bar"} 1
{path="/biz"} 6
# Tests for changes().
eval instant at 50m changes(http_requests[5m])
@ -494,6 +506,7 @@ load 5m
test_sgn{src="sgn-d"} -50
test_sgn{src="sgn-e"} 0
test_sgn{src="sgn-f"} 100
test_sgn{src="sgn-histogram"} {{schema:1 sum:1 count:1}}
eval instant at 0m sgn(test_sgn)
{src="sgn-a"} -1
@ -1259,11 +1272,16 @@ clear
load 5m
exp_root_log{l="x"} 10
exp_root_log{l="y"} 20
exp_root_log_h{l="z"} {{schema:1 sum:1 count:1}}
eval instant at 1m exp(exp_root_log)
{l="x"} 22026.465794806718
{l="y"} 485165195.4097903
eval instant at 1m exp({__name__=~"exp_root_log(_h)?"})
{l="x"} 22026.465794806718
{l="y"} 485165195.4097903
eval instant at 1m exp(exp_root_log - 10)
{l="y"} 22026.465794806718
{l="x"} 1
@ -1276,6 +1294,10 @@ eval instant at 1m ln(exp_root_log)
{l="x"} 2.302585092994046
{l="y"} 2.995732273553991
eval instant at 1m ln({__name__=~"exp_root_log(_h)?"})
{l="x"} 2.302585092994046
{l="y"} 2.995732273553991
eval instant at 1m ln(exp_root_log - 10)
{l="y"} 2.302585092994046
{l="x"} -Inf
@ -1288,14 +1310,26 @@ eval instant at 1m exp(ln(exp_root_log))
{l="y"} 20
{l="x"} 10
eval instant at 1m exp(ln({__name__=~"exp_root_log(_h)?"}))
{l="y"} 20
{l="x"} 10
eval instant at 1m sqrt(exp_root_log)
{l="x"} 3.1622776601683795
{l="y"} 4.47213595499958
eval instant at 1m sqrt({__name__=~"exp_root_log(_h)?"})
{l="x"} 3.1622776601683795
{l="y"} 4.47213595499958
eval instant at 1m log2(exp_root_log)
{l="x"} 3.3219280948873626
{l="y"} 4.321928094887363
eval instant at 1m log2({__name__=~"exp_root_log(_h)?"})
{l="x"} 3.3219280948873626
{l="y"} 4.321928094887363
eval instant at 1m log2(exp_root_log - 10)
{l="y"} 3.3219280948873626
{l="x"} -Inf
@ -1308,6 +1342,10 @@ eval instant at 1m log10(exp_root_log)
{l="x"} 1
{l="y"} 1.301029995663981
eval instant at 1m log10({__name__=~"exp_root_log(_h)?"})
{l="x"} 1
{l="y"} 1.301029995663981
eval instant at 1m log10(exp_root_log - 10)
{l="y"} 1
{l="x"} -Inf

View file

@ -330,6 +330,8 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
mrc = sp.config.MetricRelabelConfigs
fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType()
alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistograms
convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCB
)
validationScheme := model.UTF8Validation
@ -350,12 +352,12 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
}
t := sp.activeTargets[fp]
interval, timeout, err := t.intervalAndTimeout(interval, timeout)
targetInterval, targetTimeout, err := t.intervalAndTimeout(interval, timeout)
var (
s = &targetScraper{
Target: t,
client: sp.client,
timeout: timeout,
timeout: targetTimeout,
bodySizeLimit: bodySizeLimit,
acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme),
acceptEncodingHeader: acceptEncodingHeader(enableCompression),
@ -373,10 +375,12 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
trackTimestampsStaleness: trackTimestampsStaleness,
mrc: mrc,
cache: cache,
interval: interval,
timeout: timeout,
interval: targetInterval,
timeout: targetTimeout,
validationScheme: validationScheme,
fallbackScrapeProtocol: fallbackScrapeProtocol,
alwaysScrapeClassicHist: alwaysScrapeClassicHist,
convertClassicHistToNHCB: convertClassicHistToNHCB,
})
)
if err != nil {

View file

@ -65,10 +65,15 @@ func TestMain(m *testing.M) {
testutil.TolerantVerifyLeak(m)
}
func newTestScrapeMetrics(t testing.TB) *scrapeMetrics {
func newTestRegistryAndScrapeMetrics(t testing.TB) (*prometheus.Registry, *scrapeMetrics) {
reg := prometheus.NewRegistry()
metrics, err := newScrapeMetrics(reg)
require.NoError(t, err)
return reg, metrics
}
func newTestScrapeMetrics(t testing.TB) *scrapeMetrics {
_, metrics := newTestRegistryAndScrapeMetrics(t)
return metrics
}
@ -370,6 +375,7 @@ func TestScrapePoolReload(t *testing.T) {
return l
}
reg, metrics := newTestRegistryAndScrapeMetrics(t)
sp := &scrapePool{
appendable: &nopAppendable{},
activeTargets: map[uint64]*Target{},
@ -377,7 +383,7 @@ func TestScrapePoolReload(t *testing.T) {
newLoop: newLoop,
logger: nil,
client: http.DefaultClient,
metrics: newTestScrapeMetrics(t),
metrics: metrics,
symbolTable: labels.NewSymbolTable(),
}
@ -432,6 +438,12 @@ func TestScrapePoolReload(t *testing.T) {
require.Equal(t, sp.activeTargets, beforeTargets, "Reloading affected target states unexpectedly")
require.Len(t, sp.loops, numTargets, "Unexpected number of stopped loops after reload")
got, err := gatherLabels(reg, "prometheus_target_reload_length_seconds")
require.NoError(t, err)
expectedName, expectedValue := "interval", "3s"
require.Equal(t, [][]*dto.LabelPair{{{Name: &expectedName, Value: &expectedValue}}}, got)
require.Equal(t, 1.0, prom_testutil.ToFloat64(sp.metrics.targetScrapePoolReloads))
}
func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) {
@ -447,6 +459,7 @@ func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) {
}
return l
}
reg, metrics := newTestRegistryAndScrapeMetrics(t)
sp := &scrapePool{
appendable: &nopAppendable{},
activeTargets: map[uint64]*Target{
@ -460,7 +473,7 @@ func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) {
newLoop: newLoop,
logger: nil,
client: http.DefaultClient,
metrics: newTestScrapeMetrics(t),
metrics: metrics,
symbolTable: labels.NewSymbolTable(),
}
@ -468,6 +481,30 @@ func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) {
if err != nil {
t.Fatalf("unable to reload configuration: %s", err)
}
// Check that the reload metric is labeled with the pool interval, not the overridden interval.
got, err := gatherLabels(reg, "prometheus_target_reload_length_seconds")
require.NoError(t, err)
expectedName, expectedValue := "interval", "3s"
require.Equal(t, [][]*dto.LabelPair{{{Name: &expectedName, Value: &expectedValue}}}, got)
}
// Gather metrics from the provided Gatherer with specified familyName,
// and return all sets of name/value pairs.
func gatherLabels(g prometheus.Gatherer, familyName string) ([][]*dto.LabelPair, error) {
families, err := g.Gather()
if err != nil {
return nil, err
}
ret := make([][]*dto.LabelPair, 0)
for _, f := range families {
if f.GetName() == familyName {
for _, m := range f.GetMetric() {
ret = append(ret, m.GetLabel())
}
break
}
}
return ret, nil
}
func TestScrapePoolTargetLimit(t *testing.T) {

View file

@ -354,6 +354,80 @@ func TestTargetsFromGroup(t *testing.T) {
require.EqualError(t, failures[0], expectedError)
}
// TestTargetsFromGroupWithLabelKeepDrop aims to demonstrate and reinforce the current behavior: relabeling's "labelkeep" and "labeldrop"
// are applied to all labels of a target, including internal ones (labels starting with "__" such as "__address__").
// This will be helpful for cases like https://github.com/prometheus/prometheus/issues/12355.
func TestTargetsFromGroupWithLabelKeepDrop(t *testing.T) {
tests := []struct {
name string
cfgText string
targets []model.LabelSet
shouldDropTarget bool
}{
{
name: "no relabeling",
cfgText: `
global:
metric_name_validation_scheme: legacy
scrape_configs:
- job_name: job1
static_configs:
- targets: ["localhost:9090"]
`,
targets: []model.LabelSet{{model.AddressLabel: "localhost:9090"}},
},
{
name: "labelkeep",
cfgText: `
global:
metric_name_validation_scheme: legacy
scrape_configs:
- job_name: job1
static_configs:
- targets: ["localhost:9090"]
relabel_configs:
- regex: 'foo'
action: labelkeep
`,
targets: []model.LabelSet{{model.AddressLabel: "localhost:9090"}},
shouldDropTarget: true,
},
{
name: "labeldrop",
cfgText: `
global:
metric_name_validation_scheme: legacy
scrape_configs:
- job_name: job1
static_configs:
- targets: ["localhost:9090"]
relabel_configs:
- regex: '__address__'
action: labeldrop
`,
targets: []model.LabelSet{{model.AddressLabel: "localhost:9090"}},
shouldDropTarget: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
config := loadConfiguration(t, tt.cfgText)
lb := labels.NewBuilder(labels.EmptyLabels())
targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: tt.targets}, config.ScrapeConfigs[0], nil, lb)
if tt.shouldDropTarget {
require.Len(t, failures, 1)
require.EqualError(t, failures[0], "instance 0 in group : no address")
require.Empty(t, targets)
} else {
require.Empty(t, failures)
require.Len(t, targets, 1)
}
})
}
}
func BenchmarkTargetsFromGroup(b *testing.B) {
// Simulate Kubernetes service-discovery and use subset of rules from typical Prometheus config.
cfgText := `

View file

@ -29,15 +29,15 @@ import (
//
// Labels that start with non-letter rune will be prefixed with "key_".
// An exception is made for double-underscores which are allowed.
func NormalizeLabel(label string, allowUTF8 bool) string {
// Trivial case
if len(label) == 0 || allowUTF8 {
func NormalizeLabel(label string) string {
// Trivial case.
if len(label) == 0 {
return label
}
label = strutil.SanitizeLabelName(label)
// If label starts with a number, prepend with "key_"
// If label starts with a number, prepend with "key_".
if unicode.IsDigit(rune(label[0])) {
label = "key_" + label
} else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") {

View file

@ -22,27 +22,24 @@ import (
func TestNormalizeLabel(t *testing.T) {
tests := []struct {
label string
expected string
expectedUTF8 string
label string
expected string
}{
{"", "", ""},
{"label:with:colons", "label_with_colons", "label:with:colons"}, // Without UTF-8 support, colons are only allowed in metric names
{"LabelWithCapitalLetters", "LabelWithCapitalLetters", "LabelWithCapitalLetters"},
{"label!with&special$chars)", "label_with_special_chars_", "label!with&special$chars)"},
{"label_with_foreign_characters_字符", "label_with_foreign_characters___", "label_with_foreign_characters_字符"},
{"label.with.dots", "label_with_dots", "label.with.dots"},
{"123label", "key_123label", "123label"},
{"_label_starting_with_underscore", "key_label_starting_with_underscore", "_label_starting_with_underscore"},
{"__label_starting_with_2underscores", "__label_starting_with_2underscores", "__label_starting_with_2underscores"},
{"", ""},
{"label:with:colons", "label_with_colons"},
{"LabelWithCapitalLetters", "LabelWithCapitalLetters"},
{"label!with&special$chars)", "label_with_special_chars_"},
{"label_with_foreign_characters_字符", "label_with_foreign_characters___"},
{"label.with.dots", "label_with_dots"},
{"123label", "key_123label"},
{"_label_starting_with_underscore", "key_label_starting_with_underscore"},
{"__label_starting_with_2underscores", "__label_starting_with_2underscores"},
}
for i, test := range tests {
t.Run(fmt.Sprintf("test_%d", i), func(t *testing.T) {
result := NormalizeLabel(test.label, false)
result := NormalizeLabel(test.label)
require.Equal(t, test.expected, result)
uTF8result := NormalizeLabel(test.label, true)
require.Equal(t, test.expectedUTF8, uTF8result)
})
}
}

View file

@ -157,7 +157,10 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting
// map ensures no duplicate label names.
l := make(map[string]string, maxLabelCount)
for _, label := range labels {
var finalKey = prometheustranslator.NormalizeLabel(label.Name, settings.AllowUTF8)
finalKey := label.Name
if !settings.AllowUTF8 {
finalKey = prometheustranslator.NormalizeLabel(finalKey)
}
if existingValue, alreadyExists := l[finalKey]; alreadyExists {
l[finalKey] = existingValue + ";" + label.Value
} else {
@ -166,7 +169,10 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting
}
for _, lbl := range promotedAttrs {
normalized := prometheustranslator.NormalizeLabel(lbl.Name, settings.AllowUTF8)
normalized := lbl.Name
if !settings.AllowUTF8 {
normalized = prometheustranslator.NormalizeLabel(normalized)
}
if _, exists := l[normalized]; !exists {
l[normalized] = lbl.Value
}
@ -204,8 +210,8 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting
log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.")
}
// internal labels should be maintained
if !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") {
name = prometheustranslator.NormalizeLabel(name, settings.AllowUTF8)
if !settings.AllowUTF8 && !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") {
name = prometheustranslator.NormalizeLabel(name)
}
l[name] = extras[i+1]
}
@ -600,6 +606,10 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta
}
settings.PromoteResourceAttributes = nil
if settings.KeepIdentifyingResourceAttributes {
// Do not pass identifying attributes as ignoreAttrs below.
identifyingAttrs = nil
}
labels := createAttributes(resource, attributes, settings, identifyingAttrs, false, model.MetricNameLabel, name)
haveIdentifier := false
for _, l := range labels {

View file

@ -49,15 +49,44 @@ func TestCreateAttributes(t *testing.T) {
}
attrs := pcommon.NewMap()
attrs.PutStr("metric-attr", "metric value")
attrs.PutStr("metric-attr-other", "metric value other")
testCases := []struct {
name string
promoteResourceAttributes []string
ignoreAttrs []string
expectedLabels []prompb.Label
}{
{
name: "Successful conversion without resource attribute promotion",
promoteResourceAttributes: nil,
expectedLabels: []prompb.Label{
{
Name: "__name__",
Value: "test_metric",
},
{
Name: "instance",
Value: "service ID",
},
{
Name: "job",
Value: "service name",
},
{
Name: "metric_attr",
Value: "metric value",
},
{
Name: "metric_attr_other",
Value: "metric value other",
},
},
},
{
name: "Successful conversion with some attributes ignored",
promoteResourceAttributes: nil,
ignoreAttrs: []string{"metric-attr-other"},
expectedLabels: []prompb.Label{
{
Name: "__name__",
@ -97,6 +126,10 @@ func TestCreateAttributes(t *testing.T) {
Name: "metric_attr",
Value: "metric value",
},
{
Name: "metric_attr_other",
Value: "metric value other",
},
{
Name: "existent_attr",
Value: "resource value",
@ -127,6 +160,10 @@ func TestCreateAttributes(t *testing.T) {
Name: "metric_attr",
Value: "metric value",
},
{
Name: "metric_attr_other",
Value: "metric value other",
},
},
},
{
@ -153,6 +190,10 @@ func TestCreateAttributes(t *testing.T) {
Name: "metric_attr",
Value: "metric value",
},
{
Name: "metric_attr_other",
Value: "metric value other",
},
},
},
}
@ -161,7 +202,7 @@ func TestCreateAttributes(t *testing.T) {
settings := Settings{
PromoteResourceAttributes: tc.promoteResourceAttributes,
}
lbls := createAttributes(resource, attrs, settings, nil, false, model.MetricNameLabel, "test_metric")
lbls := createAttributes(resource, attrs, settings, tc.ignoreAttrs, false, model.MetricNameLabel, "test_metric")
assert.ElementsMatch(t, lbls, tc.expectedLabels)
})

View file

@ -32,13 +32,14 @@ import (
)
type Settings struct {
Namespace string
ExternalLabels map[string]string
DisableTargetInfo bool
ExportCreatedMetric bool
AddMetricSuffixes bool
AllowUTF8 bool
PromoteResourceAttributes []string
Namespace string
ExternalLabels map[string]string
DisableTargetInfo bool
ExportCreatedMetric bool
AddMetricSuffixes bool
AllowUTF8 bool
PromoteResourceAttributes []string
KeepIdentifyingResourceAttributes bool
}
// PrometheusConverter converts from OTel write format to Prometheus remote write format.

View file

@ -28,41 +28,72 @@ import (
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/prompb"
prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
)
func TestFromMetrics(t *testing.T) {
t.Run("successful", func(t *testing.T) {
converter := NewPrometheusConverter()
payload := createExportRequest(5, 128, 128, 2, 0)
var expMetadata []prompb.MetricMetadata
resourceMetricsSlice := payload.Metrics().ResourceMetrics()
for i := 0; i < resourceMetricsSlice.Len(); i++ {
scopeMetricsSlice := resourceMetricsSlice.At(i).ScopeMetrics()
for j := 0; j < scopeMetricsSlice.Len(); j++ {
metricSlice := scopeMetricsSlice.At(j).Metrics()
for k := 0; k < metricSlice.Len(); k++ {
metric := metricSlice.At(k)
promName := prometheustranslator.BuildCompliantName(metric, "", false, false)
expMetadata = append(expMetadata, prompb.MetricMetadata{
Type: otelMetricTypeToPromMetricType(metric),
MetricFamilyName: promName,
Help: metric.Description(),
Unit: metric.Unit(),
})
for _, keepIdentifyingResourceAttributes := range []bool{false, true} {
t.Run(fmt.Sprintf("successful/keepIdentifyingAttributes=%v", keepIdentifyingResourceAttributes), func(t *testing.T) {
converter := NewPrometheusConverter()
payload := createExportRequest(5, 128, 128, 2, 0)
var expMetadata []prompb.MetricMetadata
resourceMetricsSlice := payload.Metrics().ResourceMetrics()
for i := 0; i < resourceMetricsSlice.Len(); i++ {
scopeMetricsSlice := resourceMetricsSlice.At(i).ScopeMetrics()
for j := 0; j < scopeMetricsSlice.Len(); j++ {
metricSlice := scopeMetricsSlice.At(j).Metrics()
for k := 0; k < metricSlice.Len(); k++ {
metric := metricSlice.At(k)
promName := prometheustranslator.BuildCompliantName(metric, "", false, false)
expMetadata = append(expMetadata, prompb.MetricMetadata{
Type: otelMetricTypeToPromMetricType(metric),
MetricFamilyName: promName,
Help: metric.Description(),
Unit: metric.Unit(),
})
}
}
}
}
annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), Settings{})
require.NoError(t, err)
require.Empty(t, annots)
annots, err := converter.FromMetrics(
context.Background(),
payload.Metrics(),
Settings{KeepIdentifyingResourceAttributes: keepIdentifyingResourceAttributes},
)
require.NoError(t, err)
require.Empty(t, annots)
if diff := cmp.Diff(expMetadata, converter.Metadata()); diff != "" {
t.Errorf("mismatch (-want +got):\n%s", diff)
}
})
if diff := cmp.Diff(expMetadata, converter.Metadata()); diff != "" {
t.Errorf("mismatch (-want +got):\n%s", diff)
}
ts := converter.TimeSeries()
require.Len(t, ts, 1408+1) // +1 for the target_info.
target_info_count := 0
for _, s := range ts {
b := labels.NewScratchBuilder(2)
lbls := s.ToLabels(&b, nil)
if lbls.Get(labels.MetricName) == "target_info" {
target_info_count++
require.Equal(t, "test-namespace/test-service", lbls.Get("job"))
require.Equal(t, "id1234", lbls.Get("instance"))
if keepIdentifyingResourceAttributes {
require.Equal(t, "test-service", lbls.Get("service_name"))
require.Equal(t, "test-namespace", lbls.Get("service_namespace"))
require.Equal(t, "id1234", lbls.Get("service_instance_id"))
} else {
require.False(t, lbls.Has("service_name"))
require.False(t, lbls.Has("service_namespace"))
require.False(t, lbls.Has("service_instance_id"))
}
}
}
require.Equal(t, 1, target_info_count)
})
}
t.Run("context cancellation", func(t *testing.T) {
converter := NewPrometheusConverter()
@ -169,6 +200,15 @@ func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCou
rm := request.Metrics().ResourceMetrics().AppendEmpty()
generateAttributes(rm.Resource().Attributes(), "resource", resourceAttributeCount)
// Fake some resource attributes.
for k, v := range map[string]string{
"service.name": "test-service",
"service.namespace": "test-namespace",
"service.instance.id": "id1234",
} {
rm.Resource().Attributes().PutStr(k, v)
}
metrics := rm.ScopeMetrics().AppendEmpty().Metrics()
ts := pcommon.NewTimestampFromTime(time.Now())

View file

@ -1688,7 +1688,7 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl
s.enqueuedHistograms.Sub(int64(histogramCount))
}
// sendSamples to the remote storage with backoff for recoverable errors.
// sendSamplesWithBackoff to the remote storage with backoff for recoverable errors.
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) (WriteResponseStats, error) {
// Build the WriteRequest with no metadata.
req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, buf, nil, enc)
@ -1802,7 +1802,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
return accumulatedStats, err
}
// sendV2Samples to the remote storage with backoff for recoverable errors.
// sendV2SamplesWithBackoff to the remote storage with backoff for recoverable errors.
func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) (WriteResponseStats, error) {
// Build the WriteRequest with no metadata.
req, highest, lowest, err := buildV2WriteRequest(s.qm.logger, samples, labels, pBuf, buf, nil, enc)

View file

@ -512,9 +512,10 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
converter := otlptranslator.NewPrometheusConverter()
annots, err := converter.FromMetrics(r.Context(), req.Metrics(), otlptranslator.Settings{
AddMetricSuffixes: true,
AllowUTF8: otlpCfg.TranslationStrategy == config.NoUTF8EscapingWithSuffixes,
PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes,
AddMetricSuffixes: true,
AllowUTF8: otlpCfg.TranslationStrategy == config.NoUTF8EscapingWithSuffixes,
PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes,
KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes,
})
if err != nil {
h.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err)

View file

@ -25,20 +25,20 @@ in-file offset (lower 4 bytes) and segment sequence number (upper 4 bytes).
└──────────────────────────────┘
```
# Chunk
```
┌───────────────┬───────────────────┬─────────────┬────────────────┐
│ len <uvarint> │ encoding <1 byte> │ data <data>CRC32 <4 byte>
└───────────────┴───────────────────┴─────────────┴────────────────┘
┌───────────────┬───────────────────┬─────────────┬───────────────────
│ len <uvarint> │ encoding <1 byte> │ data <data>checksum <4 byte>
└───────────────┴───────────────────┴─────────────┴───────────────────
```
Notes:
* `<uvarint>` has 1 to 10 bytes.
* `encoding`: Currently either `XOR`, `histogram`, or `floathistogram`, see
[code for numerical values](https://github.com/prometheus/prometheus/blob/02d0de9987ad99dee5de21853715954fadb3239f/tsdb/chunkenc/chunk.go#L28-L47).
* `len`: Chunk size in bytes. 1 to 10 bytes long using the [`<uvarint>` encoding](https://go.dev/src/encoding/binary/varint.go).
* `encoding`: Currently either `XOR`, `histogram`, or `floathistogram`, see [code for numerical values](https://github.com/prometheus/prometheus/blob/02d0de9987ad99dee5de21853715954fadb3239f/tsdb/chunkenc/chunk.go#L28-L47).
* `data`: See below for each encoding.
* `checksum`: Checksum of `encoding` and `data`. It's a [cyclic redudancy check](https://en.wikipedia.org/wiki/Cyclic_redundancy_check) with the Castagnoli polynomial, serialised as an unsigned 32 bits big endian number. Can be refered as a `CRC-32C`.
## XOR chunk data
@ -177,7 +177,6 @@ the encoding will therefore result in a short varbit representation. The upper
bound of 33554430 is picked so that the varbit encoded value will take at most
4 bytes.
## Float histogram chunk data
Float histograms have the same layout as histograms apart from the encoding of samples.

View file

@ -1,15 +1,32 @@
# WAL Disk Format
This document describes the official Prometheus WAL format.
The write ahead log operates in segments that are numbered and sequential,
e.g. `000000`, `000001`, `000002`, etc., and are limited to 128MB by default.
A segment is written to in pages of 32KB. Only the last page of the most recent segment
and are limited to 128MB by default.
## Segment filename
The sequence number is captured in the segment filename,
e.g. `000000`, `000001`, `000002`, etc. The first unsigned integer represents
the sequence number of the segment, typically encoded with six digits.
## Segment encoding
This section describes the segment encoding.
A segment encodes an array of records. It does not contain any header. A segment
is written to pages of 32KB. Only the last page of the most recent segment
may be partial. A WAL record is an opaque byte slice that gets split up into sub-records
should it exceed the remaining space of the current page. Records are never split across
segment boundaries. If a single record exceeds the default segment size, a segment with
a larger size will be created.
The encoding of pages is largely borrowed from [LevelDB's/RocksDB's write ahead log.](https://github.com/facebook/rocksdb/wiki/Write-Ahead-Log-File-Format)
Notable deviations are that the record fragment is encoded as:
### Records encoding
Each record fragment is encoded as:
```
┌───────────┬──────────┬────────────┬──────────────┐
@ -17,7 +34,8 @@ Notable deviations are that the record fragment is encoded as:
└───────────┴──────────┴────────────┴──────────────┘
```
The initial type byte is made up of three components: a 3-bit reserved field, a 1-bit zstd compression flag, a 1-bit snappy compression flag, and a 3-bit type flag.
The initial type byte is made up of three components: a 3-bit reserved field,
a 1-bit zstd compression flag, a 1-bit snappy compression flag, and a 3-bit type flag.
```
┌─────────────────┬──────────────────┬────────────────────┬──────────────────┐
@ -25,7 +43,7 @@ The initial type byte is made up of three components: a 3-bit reserved field, a
└─────────────────┴──────────────────┴────────────────────┴──────────────────┘
```
The lowest 3 bits within this flag represent the record type as follows:
The lowest 3 bits within the type flag represent the record type as follows:
* `0`: rest of page will be empty
* `1`: a full record encoded in a single fragment
@ -33,11 +51,16 @@ The lowest 3 bits within this flag represent the record type as follows:
* `3`: middle fragment of a record
* `4`: final fragment of a record
## Record encoding
After the type byte, 2-byte length and then 4-byte checksum of the following data are encoded.
The records written to the write ahead log are encoded as follows:
All float values are represented using the [IEEE 754 format](https://en.wikipedia.org/wiki/IEEE_754).
### Series records
### Record types
In the following sections, all the known record types are described. New types,
can be added in the future.
#### Series records
Series records encode the labels that identifies a series and its unique ID.
@ -58,7 +81,7 @@ Series records encode the labels that identifies a series and its unique ID.
└────────────────────────────────────────────┘
```
### Sample records
#### Sample records
Sample records encode samples as a list of triples `(series_id, timestamp, value)`.
Series reference and timestamp are encoded as deltas w.r.t the first sample.
@ -79,7 +102,7 @@ The first sample record begins at the second row.
└──────────────────────────────────────────────────────────────────┘
```
### Tombstone records
#### Tombstone records
Tombstone records encode tombstones as a list of triples `(series_id, min_time, max_time)`
and specify an interval for which samples of a series got deleted.
@ -95,9 +118,9 @@ and specify an interval for which samples of a series got deleted.
└─────────────────────────────────────────────────────┘
```
### Exemplar records
#### Exemplar records
Exemplar records encode exemplars as a list of triples `(series_id, timestamp, value)`
Exemplar records encode exemplars as a list of triples `(series_id, timestamp, value)`
plus the length of the labels list, and all the labels.
The first row stores the starting id and the starting timestamp.
Series reference and timestamp are encoded as deltas w.r.t the first exemplar.
@ -127,7 +150,7 @@ See: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/Op
└──────────────────────────────────────────────────────────────────┘
```
### Metadata records
#### Metadata records
Metadata records encode the metadata updates associated with a series.
@ -156,3 +179,90 @@ Metadata records encode the metadata updates associated with a series.
└────────────────────────────────────────────┘
```
#### Histogram records
Histogram records encode the integer and float native histogram samples.
A record with the integer native histograms with the exponential bucketing:
```
┌───────────────────────────────────────────────────────────────────────┐
│ type = 7 <1b>
├───────────────────────────────────────────────────────────────────────┤
│ ┌────────────────────┬───────────────────────────┐ │
│ │ id <8b> │ timestamp <8b> │ │
│ └────────────────────┴───────────────────────────┘ │
│ ┌────────────────────┬──────────────────────────────────────────────┐ │
│ │ id_delta <uvarint> │ timestamp_delta <uvarint> │ │
│ ├────────────────────┴────┬─────────────────────────────────────────┤ │
│ │ counter_reset_hint <1b> │ schema <varint> │ │
│ ├─────────────────────────┴────┬────────────────────────────────────┤ │
│ │ zero_threshold (float) <8b> │ zero_count <uvarint> │ │
│ ├─────────────────┬────────────┴────────────────────────────────────┤ │
│ │ count <uvarint> │ sum (float) <8b> │ │
│ ├─────────────────┴─────────────────────────────────────────────────┤ │
│ │ positive_spans_num <uvarint> │ │
│ ├─────────────────────────────────┬─────────────────────────────────┤ │
│ │ positive_span_offset_1 <varint> │ positive_span_len_1 <uvarint32> │ │
│ ├─────────────────────────────────┴─────────────────────────────────┤ │
│ │ . . . │ │
│ ├───────────────────────────────────────────────────────────────────┤ │
│ │ negative_spans_num <uvarint> │ │
│ ├───────────────────────────────┬───────────────────────────────────┤ │
│ │ negative_span_offset <varint> │ negative_span_len <uvarint32> │ │
│ ├───────────────────────────────┴───────────────────────────────────┤ │
│ │ . . . │ │
│ ├───────────────────────────────────────────────────────────────────┤ │
│ │ positive_bkts_num <uvarint> │ │
│ ├─────────────────────────┬───────┬─────────────────────────────────┤ │
│ │ positive_bkt_1 <varint> │ . . . │ positive_bkt_n <varint> │ │
│ ├─────────────────────────┴───────┴─────────────────────────────────┤ │
│ │ negative_bkts_num <uvarint> │ │
│ ├─────────────────────────┬───────┬─────────────────────────────────┤ │
│ │ negative_bkt_1 <varint> │ . . . │ negative_bkt_n <varint> │ │
│ └─────────────────────────┴───────┴─────────────────────────────────┘ │
│ . . . │
└───────────────────────────────────────────────────────────────────────┘
```
A records with the Float histograms:
```
┌───────────────────────────────────────────────────────────────────────┐
│ type = 8 <1b>
├───────────────────────────────────────────────────────────────────────┤
│ ┌────────────────────┬───────────────────────────┐ │
│ │ id <8b> │ timestamp <8b> │ │
│ └────────────────────┴───────────────────────────┘ │
│ ┌────────────────────┬──────────────────────────────────────────────┐ │
│ │ id_delta <uvarint> │ timestamp_delta <uvarint> │ │
│ ├────────────────────┴────┬─────────────────────────────────────────┤ │
│ │ counter_reset_hint <1b> │ schema <varint> │ │
│ ├─────────────────────────┴────┬────────────────────────────────────┤ │
│ │ zero_threshold (float) <8b> │ zero_count (float) <8b> │ │
│ ├────────────────────┬─────────┴────────────────────────────────────┤ │
│ │ count (float) <8b> │ sum (float) <8b> │ │
│ ├────────────────────┴──────────────────────────────────────────────┤ │
│ │ positive_spans_num <uvarint> │ │
│ ├─────────────────────────────────┬─────────────────────────────────┤ │
│ │ positive_span_offset_1 <varint> │ positive_span_len_1 <uvarint32> │ │
│ ├─────────────────────────────────┴─────────────────────────────────┤ │
│ │ . . . │ │
│ ├───────────────────────────────────────────────────────────────────┤ │
│ │ negative_spans_num <uvarint> │ │
│ ├───────────────────────────────┬───────────────────────────────────┤ │
│ │ negative_span_offset <varint> │ negative_span_len <uvarint32> │ │
│ ├───────────────────────────────┴───────────────────────────────────┤ │
│ │ . . . │ │
│ ├───────────────────────────────────────────────────────────────────┤ │
│ │ positive_bkts_num <uvarint> │ │
│ ├─────────────────────────────┬───────┬─────────────────────────────┤ │
│ │ positive_bkt_1 (float) <8b> │ . . . │ positive_bkt_n (float) <8b> │ │
│ ├─────────────────────────────┴───────┴─────────────────────────────┤ │
│ │ negative_bkts_num <uvarint> │ │
│ ├─────────────────────────────┬───────┬─────────────────────────────┤ │
│ │ negative_bkt_1 (float) <8b> │ . . . │ negative_bkt_n (float) <8b> │ │
│ └─────────────────────────────┴───────┴─────────────────────────────┘ │
│ . . . │
└───────────────────────────────────────────────────────────────────────┘
```

View file

@ -18,6 +18,7 @@ import (
"context"
"encoding/binary"
"fmt"
"maps"
"math"
"runtime"
"slices"
@ -32,6 +33,8 @@ import (
"github.com/prometheus/prometheus/storage"
)
const exponentialSliceGrowthFactor = 2
var allPostingsKey = labels.Label{}
// AllPostingsKey returns the label key that is used to store the postings list of all existing IDs.
@ -55,15 +58,33 @@ var ensureOrderBatchPool = sync.Pool{
// EnsureOrder() must be called once before any reads are done. This allows for quick
// unordered batch fills on startup.
type MemPostings struct {
mtx sync.RWMutex
m map[string]map[string][]storage.SeriesRef
mtx sync.RWMutex
// m holds the postings lists for each label-value pair, indexed first by label name, and then by label value.
//
// mtx must be held when interacting with m (the appropriate one for reading or writing).
// It is safe to retain a reference to a postings list after releasing the lock.
//
// BUG: There's currently a data race in addFor, which might modify the tail of the postings list:
// https://github.com/prometheus/prometheus/issues/15317
m map[string]map[string][]storage.SeriesRef
// lvs holds the label values for each label name.
// lvs[name] is essentially an unsorted append-only list of all keys in m[name]
// mtx must be held when interacting with lvs.
// Since it's append-only, it is safe to the label values slice after releasing the lock.
lvs map[string][]string
ordered bool
}
const defaultLabelNamesMapSize = 512
// NewMemPostings returns a memPostings that's ready for reads and writes.
func NewMemPostings() *MemPostings {
return &MemPostings{
m: make(map[string]map[string][]storage.SeriesRef, 512),
m: make(map[string]map[string][]storage.SeriesRef, defaultLabelNamesMapSize),
lvs: make(map[string][]string, defaultLabelNamesMapSize),
ordered: true,
}
}
@ -72,7 +93,8 @@ func NewMemPostings() *MemPostings {
// until EnsureOrder() was called once.
func NewUnorderedMemPostings() *MemPostings {
return &MemPostings{
m: make(map[string]map[string][]storage.SeriesRef, 512),
m: make(map[string]map[string][]storage.SeriesRef, defaultLabelNamesMapSize),
lvs: make(map[string][]string, defaultLabelNamesMapSize),
ordered: false,
}
}
@ -80,16 +102,19 @@ func NewUnorderedMemPostings() *MemPostings {
// Symbols returns an iterator over all unique name and value strings, in order.
func (p *MemPostings) Symbols() StringIter {
p.mtx.RLock()
// Make a quick clone of the map to avoid holding the lock while iterating.
// It's safe to use the values of the map after releasing the lock, as they're append-only slices.
lvs := maps.Clone(p.lvs)
p.mtx.RUnlock()
// Add all the strings to a map to de-duplicate.
symbols := make(map[string]struct{}, 512)
for n, e := range p.m {
symbols := make(map[string]struct{}, defaultLabelNamesMapSize)
for n, labelValues := range lvs {
symbols[n] = struct{}{}
for v := range e {
for _, v := range labelValues {
symbols[v] = struct{}{}
}
}
p.mtx.RUnlock()
res := make([]string, 0, len(symbols))
for k := range symbols {
@ -145,13 +170,14 @@ func (p *MemPostings) LabelNames() []string {
// LabelValues returns label values for the given name.
func (p *MemPostings) LabelValues(_ context.Context, name string) []string {
p.mtx.RLock()
defer p.mtx.RUnlock()
values := p.lvs[name]
p.mtx.RUnlock()
values := make([]string, 0, len(p.m[name]))
for v := range p.m[name] {
values = append(values, v)
}
return values
// The slice from p.lvs[name] is shared between all readers, and it is append-only.
// Since it's shared, we need to make a copy of it before returning it to make
// sure that no caller modifies the original one by sorting it or filtering it.
// Since it's append-only, we can do this while not holding the mutex anymore.
return slices.Clone(values)
}
// PostingsStats contains cardinality based statistics for postings.
@ -294,6 +320,7 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma
p.mtx.Lock()
defer p.mtx.Unlock()
affectedLabelNames := map[string]struct{}{}
process := func(l labels.Label) {
orig := p.m[l.Name][l.Value]
repl := make([]storage.SeriesRef, 0, len(orig))
@ -306,10 +333,7 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma
p.m[l.Name][l.Value] = repl
} else {
delete(p.m[l.Name], l.Value)
// Delete the key if we removed all values.
if len(p.m[l.Name]) == 0 {
delete(p.m, l.Name)
}
affectedLabelNames[l.Name] = struct{}{}
}
}
@ -323,22 +347,52 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma
// Note that a read query will most likely want to read multiple postings lists, say 5, 10 or 20 (depending on the number of matchers)
// And that read query will most likely evaluate only one of those matchers before we unpause here, so we want to pause often.
if i%512 == 0 {
p.mtx.Unlock()
// While it's tempting to just do a `time.Sleep(time.Millisecond)` here,
// it wouldn't ensure use that readers actually were able to get the read lock,
// because if there are writes waiting on same mutex, readers won't be able to get it.
// So we just grab one RLock ourselves.
p.mtx.RLock()
// We shouldn't wait here, because we would be blocking a potential write for no reason.
// Note that if there's a writer waiting for us to unlock, no reader will be able to get the read lock.
p.mtx.RUnlock() //nolint:staticcheck // SA2001: this is an intentionally empty critical section.
// Now we can wait a little bit just to increase the chance of a reader getting the lock.
// If we were deleting 100M series here, pausing every 512 with 1ms sleeps would be an extra of 200s, which is negligible.
time.Sleep(time.Millisecond)
p.mtx.Lock()
p.unlockWaitAndLockAgain()
}
}
process(allPostingsKey)
// Now we need to update the label values slices.
i = 0
for name := range affectedLabelNames {
i++
// From time to time we want some readers to go through and read their postings.
if i%512 == 0 {
p.unlockWaitAndLockAgain()
}
if len(p.m[name]) == 0 {
// Delete the label name key if we deleted all values.
delete(p.m, name)
delete(p.lvs, name)
continue
}
// Create the new slice with enough room to grow without reallocating.
// We have deleted values here, so there's definitely some churn, so be prepared for it.
lvs := make([]string, 0, exponentialSliceGrowthFactor*len(p.m[name]))
for v := range p.m[name] {
lvs = append(lvs, v)
}
p.lvs[name] = lvs
}
}
// unlockWaitAndLockAgain will unlock an already locked p.mtx.Lock() and then wait a little bit before locking it again,
// letting the RLock()-waiting goroutines to get the lock.
func (p *MemPostings) unlockWaitAndLockAgain() {
p.mtx.Unlock()
// While it's tempting to just do a `time.Sleep(time.Millisecond)` here,
// it wouldn't ensure use that readers actually were able to get the read lock,
// because if there are writes waiting on same mutex, readers won't be able to get it.
// So we just grab one RLock ourselves.
p.mtx.RLock()
// We shouldn't wait here, because we would be blocking a potential write for no reason.
// Note that if there's a writer waiting for us to unlock, no reader will be able to get the read lock.
p.mtx.RUnlock() //nolint:staticcheck // SA2001: this is an intentionally empty critical section.
// Now we can wait a little bit just to increase the chance of a reader getting the lock.
time.Sleep(time.Millisecond)
p.mtx.Lock()
}
// Iter calls f for each postings list. It aborts if f returns an error and returns it.
@ -370,7 +424,7 @@ func (p *MemPostings) Add(id storage.SeriesRef, lset labels.Labels) {
func appendWithExponentialGrowth[T any](a []T, v T) []T {
if cap(a) < len(a)+1 {
newList := make([]T, len(a), len(a)*2+1)
newList := make([]T, len(a), len(a)*exponentialSliceGrowthFactor+1)
copy(newList, a)
a = newList
}
@ -383,7 +437,11 @@ func (p *MemPostings) addFor(id storage.SeriesRef, l labels.Label) {
nm = map[string][]storage.SeriesRef{}
p.m[l.Name] = nm
}
list := appendWithExponentialGrowth(nm[l.Value], id)
vm, ok := nm[l.Value]
if !ok {
p.lvs[l.Name] = appendWithExponentialGrowth(p.lvs[l.Name], l.Value)
}
list := appendWithExponentialGrowth(vm, id)
nm[l.Value] = list
if !p.ordered {
@ -402,25 +460,27 @@ func (p *MemPostings) addFor(id storage.SeriesRef, l labels.Label) {
}
func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) Postings {
// We'll copy the values into a slice and then match over that,
// We'll take the label values slice and then match over that,
// this way we don't need to hold the mutex while we're matching,
// which can be slow (seconds) if the match function is a huge regex.
// Holding this lock prevents new series from being added (slows down the write path)
// and blocks the compaction process.
vals := p.labelValues(name)
for i, count := 0, 1; i < len(vals); count++ {
if count%checkContextEveryNIterations == 0 && ctx.Err() != nil {
//
// We just need to make sure we don't modify the slice we took,
// so we'll append matching values to a different one.
p.mtx.RLock()
readOnlyLabelValues := p.lvs[name]
p.mtx.RUnlock()
vals := make([]string, 0, len(readOnlyLabelValues))
for i, v := range readOnlyLabelValues {
if i%checkContextEveryNIterations == 0 && ctx.Err() != nil {
return ErrPostings(ctx.Err())
}
if match(vals[i]) {
i++
continue
if match(v) {
vals = append(vals, v)
}
// Didn't match, bring the last value to this position, make the slice shorter and check again.
// The order of the slice doesn't matter as it comes from a map iteration.
vals[i], vals = vals[len(vals)-1], vals[:len(vals)-1]
}
// If none matched (or this label had no values), no need to grab the lock again.
@ -469,27 +529,6 @@ func (p *MemPostings) PostingsForAllLabelValues(ctx context.Context, name string
return Merge(ctx, its...)
}
// labelValues returns a slice of label values for the given label name.
// It will take the read lock.
func (p *MemPostings) labelValues(name string) []string {
p.mtx.RLock()
defer p.mtx.RUnlock()
e := p.m[name]
if len(e) == 0 {
return nil
}
vals := make([]string, 0, len(e))
for v, srs := range e {
if len(srs) > 0 {
vals = append(vals, v)
}
}
return vals
}
// ExpandPostings returns the postings expanded as a slice.
func ExpandPostings(p Postings) (res []storage.SeriesRef, err error) {
for p.Next() {