From 42509bc106da4873ddb5120efc6d1193425a3f39 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 20 Dec 2022 09:40:31 +0100 Subject: [PATCH 1/9] Switch to promci Reuse common actions Signed-off-by: Julien Pivotto --- .github/actions/build/action.yml | 23 -------- .github/actions/check_proto/action.yml | 20 ------- .github/actions/publish_images/action.yml | 47 ---------------- .github/actions/publish_main/action.yml | 43 --------------- .github/actions/publish_release/action.yml | 54 ------------------- .../actions/publish_release_images/action.yml | 53 ------------------ .github/actions/restore_artifacts/action.yml | 19 ------- .github/actions/save_artifacts/action.yml | 17 ------ .github/actions/setup_environment/action.yml | 43 --------------- .github/workflows/ci.yml | 23 +++++--- 10 files changed, 15 insertions(+), 327 deletions(-) delete mode 100644 .github/actions/build/action.yml delete mode 100644 .github/actions/check_proto/action.yml delete mode 100644 .github/actions/publish_images/action.yml delete mode 100644 .github/actions/publish_main/action.yml delete mode 100644 .github/actions/publish_release/action.yml delete mode 100644 .github/actions/publish_release_images/action.yml delete mode 100644 .github/actions/restore_artifacts/action.yml delete mode 100644 .github/actions/save_artifacts/action.yml delete mode 100644 .github/actions/setup_environment/action.yml diff --git a/.github/actions/build/action.yml b/.github/actions/build/action.yml deleted file mode 100644 index 7b8e3605f..000000000 --- a/.github/actions/build/action.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Build -inputs: - thread: - type: integer - description: Current thread - required: true - default: 3 - parallelism: - type: integer - description: Number of builds to do in parallel - default: 3 - promu_opts: - type: string - description: Options to pass to promu -runs: - using: composite - steps: - - uses: ./.github/actions/setup_environment - - run: ~/go/bin/promu crossbuild -v --parallelism ${{ inputs.parallelism }} --parallelism-thread ${{ inputs.thread }} ${{ inputs.promu_opts }} - shell: bash - - uses: ./.github/actions/save_artifacts - with: - directory: .build diff --git a/.github/actions/check_proto/action.yml b/.github/actions/check_proto/action.yml deleted file mode 100644 index 9d9d773e6..000000000 --- a/.github/actions/check_proto/action.yml +++ /dev/null @@ -1,20 +0,0 @@ -name: Check proto files -inputs: - version: - type: string - description: Protoc version - default: "3.5.1" -runs: - using: composite - steps: - - run: | - env - set -x - curl -s -L https://github.com/protocolbuffers/protobuf/releases/download/v${{ inputs.version }}/protoc-${{ inputs.version }}-linux-x86_64.zip > /tmp/protoc.zip - unzip -d /tmp /tmp/protoc.zip - chmod +x /tmp/bin/protoc - export PATH=/tmp/bin:$PATH - make proto - shell: bash - - run: git diff --exit-code - shell: bash diff --git a/.github/actions/publish_images/action.yml b/.github/actions/publish_images/action.yml deleted file mode 100644 index bc17eb7d9..000000000 --- a/.github/actions/publish_images/action.yml +++ /dev/null @@ -1,47 +0,0 @@ -name: Publish image -inputs: - registry: - type: string - description: Docker registry - organization: - type: string - description: Organization - login: - type: string - description: Username - password: - type: string - description: Password - dockerfile_path: - description: Path to Dockerfile - type: string - default: "" - dockerbuild_context: - description: Path to Dockerbuild context - type: string - default: "" - container_image_name: - description: Name of the container image - type: string - default: "" -runs: - using: composite - steps: - - if: inputs.dockerfile_path != '' - run: echo "export DOCKERFILE_PATH=${{ inputs.dockerfile_path }}" >> /tmp/tmp-profile - shell: bash - - if: inputs.container_image_name != '' - run: echo "export DOCKER_IMAGE_NAME=${{ inputs.container_image_name }}" >> /tmp/tmp-profile - shell: bash - - if: inputs.dockerbuild_context != '' - run: echo "export DOCKERBUILD_CONTEXT=${{ inputs.dockerbuild_context }}" >> /tmp/tmp-profile - shell: bash - - run: | - touch /tmp/tmp-profile - . /tmp/tmp-profile - make docker DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - docker images - echo ${{ inputs.password }} | docker login -u ${{ inputs.login }} --password-stdin ${{ inputs.registry }} - make docker-publish DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - make docker-manifest DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - shell: bash diff --git a/.github/actions/publish_main/action.yml b/.github/actions/publish_main/action.yml deleted file mode 100644 index 3ff45c20a..000000000 --- a/.github/actions/publish_main/action.yml +++ /dev/null @@ -1,43 +0,0 @@ -name: Publish image -inputs: - docker_hub_organization: - type: string - description: DockerHub organization - default: prom - docker_hub_login: - type: string - description: DockerHub username - docker_hub_password: - type: string - description: DockerHub password - quay_io_organization: - type: string - description: Quay.io organization - default: prometheus - quay_io_login: - type: string - description: Quay.io username - quay_io_password: - type: string - description: Quay.io password -runs: - using: composite - steps: - - uses: ./.github/actions/setup_environment - with: - enable_docker_multibuild: true - - uses: ./.github/actions/restore_artifacts - - uses: ./.github/actions/publish_images - if: inputs.docker_hub_organization != '' && inputs.docker_hub_login != '' - with: - registry: docker.io - organization: ${{ inputs.docker_hub_organization }} - login: ${{ inputs.docker_hub_login }} - password: ${{ inputs.docker_hub_password }} - - uses: ./.github/actions/publish_images - if: inputs.quay_io_organization != '' && inputs.quay_io_login != '' - with: - registry: quay.io - organization: ${{ inputs.quay_io_organization }} - login: ${{ inputs.quay_io_login }} - password: ${{ inputs.quay_io_password }} diff --git a/.github/actions/publish_release/action.yml b/.github/actions/publish_release/action.yml deleted file mode 100644 index 1b6a0e302..000000000 --- a/.github/actions/publish_release/action.yml +++ /dev/null @@ -1,54 +0,0 @@ -name: Publish image -inputs: - docker_hub_organization: - type: string - description: DockerHub organization - default: prom - docker_hub_login: - type: string - description: DockerHub username - docker_hub_password: - type: string - description: DockerHub password - quay_io_organization: - type: string - description: Quay.io organization - default: prometheus - quay_io_login: - type: string - description: Quay.io username - quay_io_password: - type: string - description: Quay.io password - github_token: - type: string - description: Github Token -runs: - using: composite - steps: - - uses: ./.github/actions/setup_environment - with: - enable_docker_multibuild: true - - uses: ./.github/actions/restore_artifacts - - run: ~/go/bin/promu crossbuild tarballs - shell: bash - - run: ~/go/bin/promu checksum .tarballs - shell: bash - - run: ~/go/bin/promu release .tarballs - shell: bash - env: - GITHUB_TOKEN: ${{ inputs.github_token }} - - uses: ./.github/actions/publish_release_images - if: inputs.docker_hub_organization != '' && inputs.docker_hub_login != '' - with: - registry: docker.io - organization: ${{ inputs.docker_hub_organization }} - login: ${{ inputs.docker_hub_login }} - password: ${{ inputs.docker_hub_password }} - - uses: ./.github/actions/publish_release_images - if: inputs.quay_io_organization != '' && inputs.quay_io_login != '' - with: - registry: quay.io - organization: ${{ inputs.quay_io_organization }} - login: ${{ inputs.quay_io_login }} - password: ${{ inputs.quay_io_password }} diff --git a/.github/actions/publish_release_images/action.yml b/.github/actions/publish_release_images/action.yml deleted file mode 100644 index 3b6132894..000000000 --- a/.github/actions/publish_release_images/action.yml +++ /dev/null @@ -1,53 +0,0 @@ -name: Publish release image -inputs: - registry: - type: string - description: Docker registry - organization: - type: string - description: Organization - login: - type: string - description: Username - password: - type: string - description: Password - dockerfile_path: - description: Path to Dockerfile - type: string - default: "" - dockerbuild_context: - description: Path to Dockerbuild context - type: string - default: "" - container_image_name: - description: Name of the container image - type: string - default: "" -runs: - using: composite - steps: - - if: inputs.dockerfile_path != '' - run: echo "export DOCKERFILE_PATH=${{ inputs.dockerfile_path }}" >> /tmp/tmp-profile - shell: bash - - if: inputs.container_image_name != '' - run: echo "export DOCKER_IMAGE_NAME=${{ inputs.container_image_name }}" >> /tmp/tmp-profile - shell: bash - - if: inputs.dockerbuild_context != '' - run: echo "export DOCKERBUILD_CONTEXT=${{ inputs.dockerbuild_context }}" >> /tmp/tmp-profile - shell: bash - - run: | - current_tag=${GITHUB_REF#refs/*/} - touch /tmp/tmp-profile - . /tmp/tmp-profile - make docker DOCKER_IMAGE_TAG="$current_tag" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - docker images - echo ${{ inputs.password }} | docker login -u ${{ inputs.login }} --password-stdin ${{ inputs.registry }} - make docker-publish DOCKER_IMAGE_TAG="$current_tag" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - make docker-manifest DOCKER_IMAGE_TAG="$current_tag" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - if [[ "$current_tag" =~ ^v[0-9]+(\.[0-9]+){2}$ ]]; then - make docker-tag-latest DOCKER_IMAGE_TAG="$current_tag" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - make docker-publish DOCKER_IMAGE_TAG="latest" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - make docker-manifest DOCKER_IMAGE_TAG="latest" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - fi - shell: bash diff --git a/.github/actions/restore_artifacts/action.yml b/.github/actions/restore_artifacts/action.yml deleted file mode 100644 index a2d1b625e..000000000 --- a/.github/actions/restore_artifacts/action.yml +++ /dev/null @@ -1,19 +0,0 @@ -# Restore artifacts created by save_artifacts. -# Tar is used because the default actions do not preserve directory structure -# and file mode. -name: Restore artifacts -runs: - using: composite - steps: - - name: Download all workflow run artifacts - uses: actions/download-artifact@v3 - with: - name: artifact - path: .artifacts - - run: | - for tar in .artifacts/*.tar - do - tar xvf $tar - done - rm -v .artifacts/*.tar - shell: bash diff --git a/.github/actions/save_artifacts/action.yml b/.github/actions/save_artifacts/action.yml deleted file mode 100644 index e0b0440d0..000000000 --- a/.github/actions/save_artifacts/action.yml +++ /dev/null @@ -1,17 +0,0 @@ -# Tar is used because the default actions do not preserve directory structure -# and file mode. -name: Save artifacts -inputs: - directory: - type: string - description: Path of the directory to save -runs: - using: composite - steps: - - run: | - tar cvf artifact.tar ${{ inputs.directory }} - mv artifact.tar artifact-$(sha1sum artifact.tar|awk '{ print $1 }').tar - shell: bash - - uses: actions/upload-artifact@v3 - with: - path: artifact-*.tar diff --git a/.github/actions/setup_environment/action.yml b/.github/actions/setup_environment/action.yml deleted file mode 100644 index 8c1d4d58e..000000000 --- a/.github/actions/setup_environment/action.yml +++ /dev/null @@ -1,43 +0,0 @@ -name: Setup environment -inputs: - enable_go: - type: boolean - description: Whether to enable go specific features, such as caching. - default: true - enable_npm: - type: boolean - description: Whether to enable npm specific features, such as caching. - default: false - enable_docker_multibuild: - type: boolean - description: Whether to enable multibuild docker - default: false -runs: - using: composite - steps: - - uses: actions/cache@v3 - if: inputs.enable_go == 'true' - with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - uses: actions/cache@v3 - if: inputs.enable_npm == 'true' - with: - path: | - ~/.npm - key: ${{ runner.os }}-npm-${{ hashFiles('web/ui/package-lock.json') }} - restore-keys: | - ${{ runner.os }}-npm- - - run: make promu - shell: bash - if: inputs.enable_go == 'true' - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - if: inputs.enable_docker_multibuild == 'true' - - name: Set up buildx - uses: docker/setup-buildx-action@v1 - if: inputs.enable_docker_multibuild == 'true' diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 26a9c8b8b..98c771e86 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,13 +13,14 @@ jobs: image: quay.io/prometheus/golang-builder:1.19-base steps: - uses: actions/checkout@v3 - - uses: ./.github/actions/setup_environment + - uses: prometheus/promci@v0.0.2 + - uses: ./.github/promci/actions/setup_environment - run: make GO_ONLY=1 SKIP_GOLANGCI_LINT=1 - run: go test ./tsdb/ -test.tsdb-isolation=false - run: GOARCH=386 go test ./cmd/prometheus - run: make -C documentation/examples/remote_storage - run: make -C documentation/examples - - uses: ./.github/actions/check_proto + - uses: ./.github/promci/actions/check_proto with: version: "3.15.8" @@ -33,14 +34,15 @@ jobs: steps: - uses: actions/checkout@v3 - - uses: ./.github/actions/setup_environment + - uses: prometheus/promci@v0.0.2 + - uses: ./.github/promci/actions/setup_environment with: enable_go: false enable_npm: true - run: make assets-tarball - run: make ui-lint - run: make ui-test - - uses: ./.github/actions/save_artifacts + - uses: ./.github/promci/actions/save_artifacts with: directory: .tarballs @@ -101,7 +103,8 @@ jobs: thread: [ 0, 1, 2 ] steps: - uses: actions/checkout@v3 - - uses: ./.github/actions/build + - uses: prometheus/promci@v0.0.2 + - uses: ./.github/promci/actions/build with: promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" parallelism: 3 @@ -123,7 +126,8 @@ jobs: # should also be updated. steps: - uses: actions/checkout@v3 - - uses: ./.github/actions/build + - uses: prometheus/promci@v0.0.2 + - uses: ./.github/promci/actions/build with: parallelism: 12 thread: ${{ matrix.thread }} @@ -157,7 +161,8 @@ jobs: if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - uses: actions/checkout@v3 - - uses: ./.github/actions/publish_main + - uses: prometheus/promci@v0.0.2 + - uses: ./.github/promci/actions/publish_main with: docker_hub_login: ${{ secrets.docker_hub_login }} docker_hub_password: ${{ secrets.docker_hub_password }} @@ -170,7 +175,8 @@ jobs: if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') steps: - uses: actions/checkout@v3 - - uses: ./.github/actions/publish_release + - uses: prometheus/promci@v0.0.2 + - uses: ./.github/promci/actions/publish_release with: docker_hub_login: ${{ secrets.docker_hub_login }} docker_hub_password: ${{ secrets.docker_hub_password }} @@ -184,6 +190,7 @@ jobs: steps: - name: Checkout uses: actions/checkout@v3 + - uses: prometheus/promci@v0.0.2 - name: Install nodejs uses: actions/setup-node@v3 with: From 32e9f6a39c2f842e4855e80bf20944925a810291 Mon Sep 17 00:00:00 2001 From: Ben Whetstone Date: Wed, 11 Jan 2023 11:19:08 -0500 Subject: [PATCH 2/9] Add container ID as a meta label for pod targets Signed-off-by: Ben Whetstone --- discovery/kubernetes/pod.go | 27 ++++++++++++++++++++++++++ discovery/kubernetes/pod_test.go | 33 ++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go index 2e3687a06..396720c22 100644 --- a/discovery/kubernetes/pod.go +++ b/discovery/kubernetes/pod.go @@ -183,6 +183,7 @@ const ( podNameLabel = metaLabelPrefix + "pod_name" podIPLabel = metaLabelPrefix + "pod_ip" podContainerNameLabel = metaLabelPrefix + "pod_container_name" + podContainerIDLabel = metaLabelPrefix + "pod_container_id" podContainerImageLabel = metaLabelPrefix + "pod_container_image" podContainerPortNameLabel = metaLabelPrefix + "pod_container_port_name" podContainerPortNumberLabel = metaLabelPrefix + "pod_container_port_number" @@ -248,6 +249,24 @@ func podLabels(pod *apiv1.Pod) model.LabelSet { return ls } +func (p *Pod) findPodContainerStatus(statuses *[]apiv1.ContainerStatus, containerName string) (*apiv1.ContainerStatus, error) { + for _, s := range *statuses { + if s.Name == containerName { + return &s, nil + } + } + return nil, fmt.Errorf("cannot find container with name %v", containerName) +} + +func (p *Pod) findPodContainerID(statuses *[]apiv1.ContainerStatus, containerName string) string { + cStatus, err := p.findPodContainerStatus(statuses, containerName) + if err != nil { + level.Debug(p.logger).Log("msg", "cannot find container ID", "err", err) + return "" + } + return cStatus.ContainerID +} + func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { tg := &targetgroup.Group{ Source: podSource(pod), @@ -267,6 +286,12 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { for i, c := range containers { isInit := i >= len(pod.Spec.Containers) + cStatuses := &pod.Status.ContainerStatuses + if isInit { + cStatuses = &pod.Status.InitContainerStatuses + } + cID := p.findPodContainerID(cStatuses, c.Name) + // If no ports are defined for the container, create an anonymous // target per container. if len(c.Ports) == 0 { @@ -275,6 +300,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { tg.Targets = append(tg.Targets, model.LabelSet{ model.AddressLabel: lv(pod.Status.PodIP), podContainerNameLabel: lv(c.Name), + podContainerIDLabel: lv(cID), podContainerImageLabel: lv(c.Image), podContainerIsInit: lv(strconv.FormatBool(isInit)), }) @@ -288,6 +314,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { tg.Targets = append(tg.Targets, model.LabelSet{ model.AddressLabel: lv(addr), podContainerNameLabel: lv(c.Name), + podContainerIDLabel: lv(cID), podContainerImageLabel: lv(c.Image), podContainerPortNumberLabel: lv(ports), podContainerPortNameLabel: lv(port.Name), diff --git a/discovery/kubernetes/pod_test.go b/discovery/kubernetes/pod_test.go index f2b79dbb8..286a1a230 100644 --- a/discovery/kubernetes/pod_test.go +++ b/discovery/kubernetes/pod_test.go @@ -81,6 +81,16 @@ func makeMultiPortPods() *v1.Pod { Status: v1.ConditionTrue, }, }, + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "testcontainer0", + ContainerID: "docker://a1b2c3d4e5f6", + }, + { + Name: "testcontainer1", + ContainerID: "containerd://6f5e4d3c2b1a", + }, + }, }, } } @@ -118,6 +128,12 @@ func makePods() *v1.Pod { Status: v1.ConditionTrue, }, }, + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "testcontainer", + ContainerID: "docker://a1b2c3d4e5f6", + }, + }, }, } } @@ -162,6 +178,18 @@ func makeInitContainerPods() *v1.Pod { Status: v1.ConditionFalse, }, }, + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "testcontainer", + ContainerID: "docker://a1b2c3d4e5f6", + }, + }, + InitContainerStatuses: []v1.ContainerStatus{ + { + Name: "initcontainer", + ContainerID: "containerd://6f5e4d3c2b1a", + }, + }, }, } } @@ -179,6 +207,7 @@ func expectedPodTargetGroups(ns string) map[string]*targetgroup.Group { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_init": "false", + "__meta_kubernetes_pod_container_id": "docker://a1b2c3d4e5f6", }, }, Labels: model.LabelSet{ @@ -230,6 +259,7 @@ func TestPodDiscoveryBeforeRun(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_init": "false", + "__meta_kubernetes_pod_container_id": "docker://a1b2c3d4e5f6", }, { "__address__": "1.2.3.4:9001", @@ -239,12 +269,14 @@ func TestPodDiscoveryBeforeRun(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9001", "__meta_kubernetes_pod_container_port_protocol": "UDP", "__meta_kubernetes_pod_container_init": "false", + "__meta_kubernetes_pod_container_id": "docker://a1b2c3d4e5f6", }, { "__address__": "1.2.3.4", "__meta_kubernetes_pod_container_name": "testcontainer1", "__meta_kubernetes_pod_container_image": "testcontainer1:latest", "__meta_kubernetes_pod_container_init": "false", + "__meta_kubernetes_pod_container_id": "containerd://6f5e4d3c2b1a", }, }, Labels: model.LabelSet{ @@ -280,6 +312,7 @@ func TestPodDiscoveryInitContainer(t *testing.T) { "__meta_kubernetes_pod_container_name": "initcontainer", "__meta_kubernetes_pod_container_image": "initcontainer:latest", "__meta_kubernetes_pod_container_init": "true", + "__meta_kubernetes_pod_container_id": "containerd://6f5e4d3c2b1a", }) expected[key].Labels["__meta_kubernetes_pod_phase"] = "Pending" expected[key].Labels["__meta_kubernetes_pod_ready"] = "false" From 52d5a7c60f4e04714a6ae97bd9874dd007636a9c Mon Sep 17 00:00:00 2001 From: Ben Whetstone Date: Tue, 17 Jan 2023 11:15:52 -0500 Subject: [PATCH 3/9] Document the __meta_kubernetes_pod_container_id meta label Signed-off-by: Ben Whetstone --- docs/configuration/configuration.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 6ac1454ef..5f70f2b85 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1856,6 +1856,7 @@ Available meta labels: * `__meta_kubernetes_pod_annotationpresent_`: `true` for each annotation from the pod object. * `__meta_kubernetes_pod_container_init`: `true` if the container is an [InitContainer](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) * `__meta_kubernetes_pod_container_name`: Name of the container the target address points to. +* `__meta_kubernetes_pod_container_id`: ID of the container the target address points to. The ID is in the form `://`. * `__meta_kubernetes_pod_container_image`: The image the container is using. * `__meta_kubernetes_pod_container_port_name`: Name of the container port. * `__meta_kubernetes_pod_container_port_number`: Number of the container port. From 9ae3572d2456a21126f89cf883039e4f3b288d4a Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 19 Jan 2023 15:52:29 +0000 Subject: [PATCH 4/9] TestConcurrentRangeQueries: log query with error We've seen some timeouts in CI, and wanted to know what queries are involved. Signed-off-by: Bryan Boreham --- promql/promql_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/promql/promql_test.go b/promql/promql_test.go index 5bdeac1d1..1e6535141 100644 --- a/promql/promql_test.go +++ b/promql/promql_test.go @@ -87,6 +87,7 @@ func TestConcurrentRangeQueries(t *testing.T) { } res := qry.Exec(context.Background()) if res.Err != nil { + t.Logf("Query: %q, steps: %d, result: %s", c.expr, c.steps, res.Err) return res.Err } qry.Close() From 138a1362d8ffc8c91685e86b008a0e210350e0f9 Mon Sep 17 00:00:00 2001 From: Peter Nicholson Date: Thu, 19 Jan 2023 17:56:53 +0100 Subject: [PATCH 5/9] Add support for EndpointSlice conditions Signed-off-by: Peter Nicholson --- discovery/kubernetes/endpointslice.go | 10 + discovery/kubernetes/endpointslice_adaptor.go | 18 + .../kubernetes/endpointslice_adaptor_test.go | 4 + discovery/kubernetes/endpointslice_test.go | 310 +++++++++++------- 4 files changed, 225 insertions(+), 117 deletions(-) diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index 0d9c5a25b..135735154 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -259,6 +259,8 @@ const ( endpointSlicePortLabel = metaLabelPrefix + "endpointslice_port" endpointSlicePortAppProtocol = metaLabelPrefix + "endpointslice_port_app_protocol" endpointSliceEndpointConditionsReadyLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_ready" + endpointSliceEndpointConditionsServingLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_serving" + endpointSliceEndpointConditionsTerminatingLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_terminating" endpointSliceEndpointHostnameLabel = metaLabelPrefix + "endpointslice_endpoint_hostname" endpointSliceAddressTargetKindLabel = metaLabelPrefix + "endpointslice_address_target_kind" endpointSliceAddressTargetNameLabel = metaLabelPrefix + "endpointslice_address_target_name" @@ -313,6 +315,14 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou target[endpointSliceEndpointConditionsReadyLabel] = lv(strconv.FormatBool(*ep.conditions().ready())) } + if ep.conditions().serving() != nil { + target[endpointSliceEndpointConditionsServingLabel] = lv(strconv.FormatBool(*ep.conditions().serving())) + } + + if ep.conditions().terminating() != nil { + target[endpointSliceEndpointConditionsTerminatingLabel] = lv(strconv.FormatBool(*ep.conditions().terminating())) + } + if ep.hostname() != nil { target[endpointSliceEndpointHostnameLabel] = lv(*ep.hostname()) } diff --git a/discovery/kubernetes/endpointslice_adaptor.go b/discovery/kubernetes/endpointslice_adaptor.go index 87484b06f..5a21f1b89 100644 --- a/discovery/kubernetes/endpointslice_adaptor.go +++ b/discovery/kubernetes/endpointslice_adaptor.go @@ -49,6 +49,8 @@ type endpointSliceEndpointAdaptor interface { type endpointSliceEndpointConditionsAdaptor interface { ready() *bool + serving() *bool + terminating() *bool } // Adaptor for k8s.io/api/discovery/v1 @@ -193,6 +195,14 @@ func (e *endpointSliceEndpointConditionsAdaptorV1) ready() *bool { return e.endpointConditions.Ready } +func (e *endpointSliceEndpointConditionsAdaptorV1) serving() *bool { + return e.endpointConditions.Serving +} + +func (e *endpointSliceEndpointConditionsAdaptorV1) terminating() *bool { + return e.endpointConditions.Terminating +} + type endpointSliceEndpointAdaptorV1beta1 struct { endpoint v1beta1.Endpoint } @@ -237,6 +247,14 @@ func (e *endpointSliceEndpointConditionsAdaptorV1beta1) ready() *bool { return e.endpointConditions.Ready } +func (e *endpointSliceEndpointConditionsAdaptorV1beta1) serving() *bool { + return e.endpointConditions.Serving +} + +func (e *endpointSliceEndpointConditionsAdaptorV1beta1) terminating() *bool { + return e.endpointConditions.Terminating +} + type endpointSlicePortAdaptorV1 struct { endpointPort v1.EndpointPort } diff --git a/discovery/kubernetes/endpointslice_adaptor_test.go b/discovery/kubernetes/endpointslice_adaptor_test.go index 9a2a00312..e56491093 100644 --- a/discovery/kubernetes/endpointslice_adaptor_test.go +++ b/discovery/kubernetes/endpointslice_adaptor_test.go @@ -35,6 +35,8 @@ func Test_EndpointSliceAdaptor_v1(t *testing.T) { require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses()) require.Equal(t, endpointSlice.Endpoints[i].Hostname, endpointAdaptor.hostname()) require.Equal(t, endpointSlice.Endpoints[i].Conditions.Ready, endpointAdaptor.conditions().ready()) + require.Equal(t, endpointSlice.Endpoints[i].Conditions.Serving, endpointAdaptor.conditions().serving()) + require.Equal(t, endpointSlice.Endpoints[i].Conditions.Terminating, endpointAdaptor.conditions().terminating()) require.Equal(t, endpointSlice.Endpoints[i].TargetRef, endpointAdaptor.targetRef()) require.Equal(t, endpointSlice.Endpoints[i].DeprecatedTopology, endpointAdaptor.topology()) } @@ -61,6 +63,8 @@ func Test_EndpointSliceAdaptor_v1beta1(t *testing.T) { require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses()) require.Equal(t, endpointSlice.Endpoints[i].Hostname, endpointAdaptor.hostname()) require.Equal(t, endpointSlice.Endpoints[i].Conditions.Ready, endpointAdaptor.conditions().ready()) + require.Equal(t, endpointSlice.Endpoints[i].Conditions.Serving, endpointAdaptor.conditions().serving()) + require.Equal(t, endpointSlice.Endpoints[i].Conditions.Terminating, endpointAdaptor.conditions().terminating()) require.Equal(t, endpointSlice.Endpoints[i].TargetRef, endpointAdaptor.targetRef()) require.Equal(t, endpointSlice.Endpoints[i].Topology, endpointAdaptor.topology()) } diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index a0ae543fc..f4076b943 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -64,23 +64,31 @@ func makeEndpointSliceV1() *v1.EndpointSlice { }, Endpoints: []v1.Endpoint{ { - Addresses: []string{"1.2.3.4"}, - Conditions: v1.EndpointConditions{Ready: boolptr(true)}, - Hostname: strptr("testendpoint1"), - TargetRef: &corev1.ObjectReference{}, - NodeName: strptr("foobar"), + Addresses: []string{"1.2.3.4"}, + Conditions: v1.EndpointConditions{ + Ready: boolptr(true), + Serving: boolptr(true), + Terminating: boolptr(false), + }, + Hostname: strptr("testendpoint1"), + TargetRef: &corev1.ObjectReference{}, + NodeName: strptr("foobar"), DeprecatedTopology: map[string]string{ "topology": "value", }, }, { Addresses: []string{"2.3.4.5"}, Conditions: v1.EndpointConditions{ - Ready: boolptr(true), + Ready: boolptr(true), + Serving: boolptr(true), + Terminating: boolptr(false), }, }, { Addresses: []string{"3.4.5.6"}, Conditions: v1.EndpointConditions{ - Ready: boolptr(false), + Ready: boolptr(false), + Serving: boolptr(true), + Terminating: boolptr(true), }, }, }, @@ -111,12 +119,16 @@ func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice { }, { Addresses: []string{"2.3.4.5"}, Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(true), + Ready: boolptr(true), + Serving: boolptr(true), + Terminating: boolptr(false), }, }, { Addresses: []string{"3.4.5.6"}, Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(false), + Ready: boolptr(false), + Serving: boolptr(true), + Terminating: boolptr(true), }, }, }, @@ -141,6 +153,8 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -151,19 +165,23 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ @@ -199,17 +217,21 @@ func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ @@ -367,6 +389,8 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -377,19 +401,23 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: map[model.LabelName]model.LabelValue{ @@ -445,6 +473,8 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -455,19 +485,23 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ @@ -512,6 +546,8 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -522,19 +558,23 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ @@ -574,6 +614,8 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -584,19 +626,23 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ @@ -652,6 +698,8 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -662,19 +710,23 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", }, }, Labels: model.LabelSet{ @@ -719,6 +771,8 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -732,19 +786,23 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ @@ -792,6 +850,8 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -805,19 +865,23 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ @@ -913,6 +977,8 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -923,19 +989,23 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", }, }, Labels: model.LabelSet{ @@ -1039,6 +1109,8 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -1049,19 +1121,23 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", }, }, Labels: model.LabelSet{ From bba95df0e9ef1bbf41e245531e317f906efb437a Mon Sep 17 00:00:00 2001 From: Peter Nicholson Date: Thu, 19 Jan 2023 18:58:17 +0100 Subject: [PATCH 6/9] Update documentation Signed-off-by: Peter Nicholson --- docs/configuration/configuration.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 9df87f9c6..be810541d 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1910,6 +1910,8 @@ Available meta labels: * `__meta_kubernetes_endpointslice_address_target_name`: Name of referenced object. * `__meta_kubernetes_endpointslice_address_type`: The ip protocol family of the address of the target. * `__meta_kubernetes_endpointslice_endpoint_conditions_ready`: Set to `true` or `false` for the referenced endpoint's ready state. + * `__meta_kubernetes_endpointslice_endpoint_conditions_serving`: Set to `true` or `false` for the referenced endpoint's serving state. + * `__meta_kubernetes_endpointslice_endpoint_conditions_terminating`: Set to `true` or `false` for the referenced endpoint's terminating state. * `__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname`: Name of the node hosting the referenced endpoint. * `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: Flag that shows if the referenced object has a kubernetes.io/hostname annotation. * `__meta_kubernetes_endpointslice_port`: Port of the referenced endpoint. From 7063b47abbd61cd712e837d83c518e1eab9b5cb4 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Thu, 19 Jan 2023 19:21:28 +0100 Subject: [PATCH 7/9] lint(yaml) : simplify ignore path for all github workflows Signed-off-by: Matthieu MOREL --- .yamllint | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.yamllint b/.yamllint index 3878a31d3..19552574b 100644 --- a/.yamllint +++ b/.yamllint @@ -21,8 +21,4 @@ rules: line-length: disable truthy: ignore: | - .github/workflows/codeql-analysis.yml - .github/workflows/funcbench.yml - .github/workflows/fuzzing.yml - .github/workflows/prombench.yml - .github/workflows/golangci-lint.yml + .github/workflows/*.yml From 90d6873c7fa9ee8cfddc2b9de93b7ec8b68d27a0 Mon Sep 17 00:00:00 2001 From: Amin Borjian Date: Thu, 19 Jan 2023 23:33:53 +0330 Subject: [PATCH 8/9] promtool: add support of selecting timeseries for TSDB dump Dumping without any limit on the data being dumped will generate a large amount of data. Also, sometimes it is necessary to dump only a part of the data in order to change or transfer it. This change allows to specify a part of the data to dump and by default works same as before. (no public API change) Signed-off-by: Amin Borjian --- cmd/promtool/main.go | 3 ++- cmd/promtool/tsdb.go | 9 +++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index a72183f70..2295e314f 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -190,6 +190,7 @@ func main() { dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() + dumpMatch := tsdbDumpCmd.Flag("match", "Series selector.").Default("{__name__=~'(?s:.*)'}").String() importCmd := tsdbCmd.Command("create-blocks-from", "[Experimental] Import samples from input and produce TSDB blocks. Please refer to the storage docs for more details.") importHumanReadable := importCmd.Flag("human-readable", "Print human readable values.").Short('r').Bool() @@ -296,7 +297,7 @@ func main() { os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable))) case tsdbDumpCmd.FullCommand(): - os.Exit(checkErr(dumpSamples(*dumpPath, *dumpMinTime, *dumpMaxTime))) + os.Exit(checkErr(dumpSamples(*dumpPath, *dumpMinTime, *dumpMaxTime, *dumpMatch))) // TODO(aSquare14): Work on adding support for custom block size. case openMetricsImportCmd.FullCommand(): os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration)) diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 36e33049d..0e0cdb863 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -30,6 +30,7 @@ import ( "text/tabwriter" "time" + "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/index" @@ -624,7 +625,7 @@ func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err err return nil } -func dumpSamples(path string, mint, maxt int64) (err error) { +func dumpSamples(path string, mint, maxt int64, match string) (err error) { db, err := tsdb.OpenDBReadOnly(path, nil) if err != nil { return err @@ -638,7 +639,11 @@ func dumpSamples(path string, mint, maxt int64) (err error) { } defer q.Close() - ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")) + matchers, err := parser.ParseMetricSelector(match) + if err != nil { + return err + } + ss := q.Select(false, nil, matchers...) for ss.Next() { series := ss.At() From 2d9a9cbc088f89848df84d6e5ea2db546b488c7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gy=C3=B6rgy=20Krajcsovits?= Date: Tue, 24 Jan 2023 12:56:30 +0100 Subject: [PATCH 9/9] Fix storage/remote/codec ignoreing histogram reset hint MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- storage/remote/codec.go | 40 +++---- storage/remote/codec_test.go | 196 +++++++++++++++++++++++++++++++++++ 2 files changed, 218 insertions(+), 18 deletions(-) diff --git a/storage/remote/codec.go b/storage/remote/codec.go index 10f24efec..36bff2821 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -528,15 +528,16 @@ func exemplarProtoToExemplar(ep prompb.Exemplar) exemplar.Exemplar { // represents an integer histogram and not a float histogram. func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram { return &histogram.Histogram{ - Schema: hp.Schema, - ZeroThreshold: hp.ZeroThreshold, - ZeroCount: hp.GetZeroCountInt(), - Count: hp.GetCountInt(), - Sum: hp.Sum, - PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()), - PositiveBuckets: hp.GetPositiveDeltas(), - NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()), - NegativeBuckets: hp.GetNegativeDeltas(), + CounterResetHint: histogram.CounterResetHint(hp.ResetHint), + Schema: hp.Schema, + ZeroThreshold: hp.ZeroThreshold, + ZeroCount: hp.GetZeroCountInt(), + Count: hp.GetCountInt(), + Sum: hp.Sum, + PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()), + PositiveBuckets: hp.GetPositiveDeltas(), + NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()), + NegativeBuckets: hp.GetNegativeDeltas(), } } @@ -545,15 +546,16 @@ func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram { // the proto message represents an float histogram and not a integer histogram. func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram { return &histogram.FloatHistogram{ - Schema: hp.Schema, - ZeroThreshold: hp.ZeroThreshold, - ZeroCount: hp.GetZeroCountFloat(), - Count: hp.GetCountFloat(), - Sum: hp.Sum, - PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()), - PositiveBuckets: hp.GetPositiveCounts(), - NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()), - NegativeBuckets: hp.GetNegativeCounts(), + CounterResetHint: histogram.CounterResetHint(hp.ResetHint), + Schema: hp.Schema, + ZeroThreshold: hp.ZeroThreshold, + ZeroCount: hp.GetZeroCountFloat(), + Count: hp.GetCountFloat(), + Sum: hp.Sum, + PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()), + PositiveBuckets: hp.GetPositiveCounts(), + NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()), + NegativeBuckets: hp.GetNegativeCounts(), } } @@ -577,6 +579,7 @@ func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) prompb.H NegativeDeltas: h.NegativeBuckets, PositiveSpans: spansToSpansProto(h.PositiveSpans), PositiveDeltas: h.PositiveBuckets, + ResetHint: prompb.Histogram_ResetHint(h.CounterResetHint), Timestamp: timestamp, } } @@ -592,6 +595,7 @@ func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogra NegativeCounts: fh.NegativeBuckets, PositiveSpans: spansToSpansProto(fh.PositiveSpans), PositiveCounts: fh.PositiveBuckets, + ResetHint: prompb.Histogram_ResetHint(fh.CounterResetHint), Timestamp: timestamp, } } diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index bf3954c4d..f19259b9f 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -371,6 +371,202 @@ func TestNilHistogramProto(t *testing.T) { HistogramProtoToFloatHistogram(prompb.Histogram{}) } +func exampleHistogram() histogram.Histogram { + return histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Schema: 0, + Count: 19, + Sum: 2.7, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 5}, + {Offset: 1, Length: 0}, + {Offset: 0, Length: 1}, + }, + NegativeBuckets: []int64{1, 2, -2, 1, -1, 0}, + } +} + +func exampleHistogramProto() prompb.Histogram { + return prompb.Histogram{ + Count: &prompb.Histogram_CountInt{CountInt: 19}, + Sum: 2.7, + Schema: 0, + ZeroThreshold: 0, + ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, + NegativeSpans: []*prompb.BucketSpan{ + { + Offset: 0, + Length: 5, + }, + { + Offset: 1, + Length: 0, + }, + { + Offset: 0, + Length: 1, + }, + }, + NegativeDeltas: []int64{1, 2, -2, 1, -1, 0}, + PositiveSpans: []*prompb.BucketSpan{ + { + Offset: 0, + Length: 4, + }, + { + Offset: 0, + Length: 0, + }, + { + Offset: 0, + Length: 3, + }, + }, + PositiveDeltas: []int64{1, 2, -2, 1, -1, 0, 0}, + ResetHint: prompb.Histogram_GAUGE, + Timestamp: 1337, + } +} + +func TestHistogramToProtoConvert(t *testing.T) { + tests := []struct { + input histogram.CounterResetHint + expected prompb.Histogram_ResetHint + }{ + { + input: histogram.UnknownCounterReset, + expected: prompb.Histogram_UNKNOWN, + }, + { + input: histogram.CounterReset, + expected: prompb.Histogram_YES, + }, + { + input: histogram.NotCounterReset, + expected: prompb.Histogram_NO, + }, + { + input: histogram.GaugeType, + expected: prompb.Histogram_GAUGE, + }, + } + + for _, test := range tests { + h := exampleHistogram() + h.CounterResetHint = test.input + p := exampleHistogramProto() + p.ResetHint = test.expected + + require.Equal(t, p, HistogramToHistogramProto(1337, &h)) + + require.Equal(t, h, *HistogramProtoToHistogram(p)) + } +} + +func exampleFloatHistogram() histogram.FloatHistogram { + return histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 0, + Count: 19, + Sum: 2.7, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []float64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 5}, + {Offset: 1, Length: 0}, + {Offset: 0, Length: 1}, + }, + NegativeBuckets: []float64{1, 2, -2, 1, -1, 0}, + } +} + +func exampleFloatHistogramProto() prompb.Histogram { + return prompb.Histogram{ + Count: &prompb.Histogram_CountFloat{CountFloat: 19}, + Sum: 2.7, + Schema: 0, + ZeroThreshold: 0, + ZeroCount: &prompb.Histogram_ZeroCountFloat{ZeroCountFloat: 0}, + NegativeSpans: []*prompb.BucketSpan{ + { + Offset: 0, + Length: 5, + }, + { + Offset: 1, + Length: 0, + }, + { + Offset: 0, + Length: 1, + }, + }, + NegativeCounts: []float64{1, 2, -2, 1, -1, 0}, + PositiveSpans: []*prompb.BucketSpan{ + { + Offset: 0, + Length: 4, + }, + { + Offset: 0, + Length: 0, + }, + { + Offset: 0, + Length: 3, + }, + }, + PositiveCounts: []float64{1, 2, -2, 1, -1, 0, 0}, + ResetHint: prompb.Histogram_GAUGE, + Timestamp: 1337, + } +} + +func TestFloatHistogramToProtoConvert(t *testing.T) { + tests := []struct { + input histogram.CounterResetHint + expected prompb.Histogram_ResetHint + }{ + { + input: histogram.UnknownCounterReset, + expected: prompb.Histogram_UNKNOWN, + }, + { + input: histogram.CounterReset, + expected: prompb.Histogram_YES, + }, + { + input: histogram.NotCounterReset, + expected: prompb.Histogram_NO, + }, + { + input: histogram.GaugeType, + expected: prompb.Histogram_GAUGE, + }, + } + + for _, test := range tests { + h := exampleFloatHistogram() + h.CounterResetHint = test.input + p := exampleFloatHistogramProto() + p.ResetHint = test.expected + + require.Equal(t, p, FloatHistogramToHistogramProto(1337, &h)) + + require.Equal(t, h, *HistogramProtoToFloatHistogram(p)) + } +} + func TestStreamResponse(t *testing.T) { lbs1 := labelsToLabelsProto(labels.FromStrings("instance", "localhost1", "job", "demo1"), nil) lbs2 := labelsToLabelsProto(labels.FromStrings("instance", "localhost2", "job", "demo2"), nil)