mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge remote-tracking branch 'prometheus/main' into arve/sync-upstream
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
This commit is contained in:
commit
a3ee0990dd
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -1,4 +1,8 @@
|
|||
<!--
|
||||
Please give your PR a title in the form "area: short description". For example "tsdb: reduce disk usage by 95%"
|
||||
|
||||
If your PR is to fix an issue, put "Fixes #issue-number" in the description.
|
||||
|
||||
Don't forget!
|
||||
|
||||
- Please sign CNCF's Developer Certificate of Origin and sign-off your commits by adding the -s / --signoff flag to `git commit`. See https://github.com/apps/dco for more information.
|
||||
|
@ -7,8 +11,6 @@
|
|||
|
||||
- Where possible use only exported APIs for tests to simplify the review and make it as close as possible to an actual library usage.
|
||||
|
||||
- No tests are needed for internal implementation changes.
|
||||
|
||||
- Performance improvements would need a benchmark test to prove it.
|
||||
|
||||
- All exposed objects should have a comment.
|
||||
|
|
|
@ -16,6 +16,7 @@ package main
|
|||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
@ -643,10 +644,15 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
|
|||
|
||||
for _, chk := range chks {
|
||||
// Load the actual data of the chunk.
|
||||
chk, err := chunkr.Chunk(chk)
|
||||
chk, iterable, err := chunkr.ChunkOrIterable(chk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Chunks within blocks should not need to be re-written, so an
|
||||
// iterable is not expected to be returned from the chunk reader.
|
||||
if iterable != nil {
|
||||
return errors.New("ChunkOrIterable should not return an iterable when reading a block")
|
||||
}
|
||||
switch chk.Encoding() {
|
||||
case chunkenc.EncXOR:
|
||||
floatChunkSamplesCount = append(floatChunkSamplesCount, chk.NumSamples())
|
||||
|
|
|
@ -37,7 +37,7 @@ Generic placeholders are defined as follows:
|
|||
* `<float>`: a floating-point number
|
||||
* `<host>`: a valid string consisting of a hostname or IP followed by an optional port number
|
||||
* `<int>`: an integer value
|
||||
* `<labelname>`: a string matching the regular expression `[a-zA-Z_][a-zA-Z0-9_]*`
|
||||
* `<labelname>`: a string matching the regular expression `[a-zA-Z_][a-zA-Z0-9_]*`. Any other unsupported character in the source label should be converted to an underscore. For example, the label `app.kubernetes.io/name` should be written as `app_kubernetes_io_name`.
|
||||
* `<labelvalue>`: a string of unicode characters
|
||||
* `<path>`: a valid URL path
|
||||
* `<scheme>`: a string that can take the values `http` or `https`
|
||||
|
@ -848,12 +848,12 @@ Available meta labels:
|
|||
* `__meta_docker_container_id`: the id of the container
|
||||
* `__meta_docker_container_name`: the name of the container
|
||||
* `__meta_docker_container_network_mode`: the network mode of the container
|
||||
* `__meta_docker_container_label_<labelname>`: each label of the container
|
||||
* `__meta_docker_container_label_<labelname>`: each label of the container, with any unsupported characters converted to an underscore
|
||||
* `__meta_docker_network_id`: the ID of the network
|
||||
* `__meta_docker_network_name`: the name of the network
|
||||
* `__meta_docker_network_ingress`: whether the network is ingress
|
||||
* `__meta_docker_network_internal`: whether the network is internal
|
||||
* `__meta_docker_network_label_<labelname>`: each label of the network
|
||||
* `__meta_docker_network_label_<labelname>`: each label of the network, with any unsupported characters converted to an underscore
|
||||
* `__meta_docker_network_scope`: the scope of the network
|
||||
* `__meta_docker_network_ip`: the IP of the container in this network
|
||||
* `__meta_docker_port_private`: the port on the container
|
||||
|
@ -964,7 +964,7 @@ Available meta labels:
|
|||
* `__meta_dockerswarm_service_mode`: the mode of the service
|
||||
* `__meta_dockerswarm_service_endpoint_port_name`: the name of the endpoint port, if available
|
||||
* `__meta_dockerswarm_service_endpoint_port_publish_mode`: the publish mode of the endpoint port
|
||||
* `__meta_dockerswarm_service_label_<labelname>`: each label of the service
|
||||
* `__meta_dockerswarm_service_label_<labelname>`: each label of the service, with any unsupported characters converted to an underscore
|
||||
* `__meta_dockerswarm_service_task_container_hostname`: the container hostname of the target, if available
|
||||
* `__meta_dockerswarm_service_task_container_image`: the container image of the target
|
||||
* `__meta_dockerswarm_service_updating_status`: the status of the service, if available
|
||||
|
@ -972,7 +972,7 @@ Available meta labels:
|
|||
* `__meta_dockerswarm_network_name`: the name of the network
|
||||
* `__meta_dockerswarm_network_ingress`: whether the network is ingress
|
||||
* `__meta_dockerswarm_network_internal`: whether the network is internal
|
||||
* `__meta_dockerswarm_network_label_<labelname>`: each label of the network
|
||||
* `__meta_dockerswarm_network_label_<labelname>`: each label of the network, with any unsupported characters converted to an underscore
|
||||
* `__meta_dockerswarm_network_scope`: the scope of the network
|
||||
|
||||
#### `tasks`
|
||||
|
@ -984,7 +984,7 @@ created using the `port` parameter defined in the SD configuration.
|
|||
|
||||
Available meta labels:
|
||||
|
||||
* `__meta_dockerswarm_container_label_<labelname>`: each label of the container
|
||||
* `__meta_dockerswarm_container_label_<labelname>`: each label of the container, with any unsupported characters converted to an underscore
|
||||
* `__meta_dockerswarm_task_id`: the id of the task
|
||||
* `__meta_dockerswarm_task_container_id`: the container id of the task
|
||||
* `__meta_dockerswarm_task_desired_state`: the desired state of the task
|
||||
|
@ -994,19 +994,19 @@ Available meta labels:
|
|||
* `__meta_dockerswarm_service_id`: the id of the service
|
||||
* `__meta_dockerswarm_service_name`: the name of the service
|
||||
* `__meta_dockerswarm_service_mode`: the mode of the service
|
||||
* `__meta_dockerswarm_service_label_<labelname>`: each label of the service
|
||||
* `__meta_dockerswarm_service_label_<labelname>`: each label of the service, with any unsupported characters converted to an underscore
|
||||
* `__meta_dockerswarm_network_id`: the ID of the network
|
||||
* `__meta_dockerswarm_network_name`: the name of the network
|
||||
* `__meta_dockerswarm_network_ingress`: whether the network is ingress
|
||||
* `__meta_dockerswarm_network_internal`: whether the network is internal
|
||||
* `__meta_dockerswarm_network_label_<labelname>`: each label of the network
|
||||
* `__meta_dockerswarm_network_label`: each label of the network
|
||||
* `__meta_dockerswarm_network_label_<labelname>`: each label of the network, with any unsupported characters converted to an underscore
|
||||
* `__meta_dockerswarm_network_label`: each label of the network, with any unsupported characters converted to an underscore
|
||||
* `__meta_dockerswarm_network_scope`: the scope of the network
|
||||
* `__meta_dockerswarm_node_id`: the ID of the node
|
||||
* `__meta_dockerswarm_node_hostname`: the hostname of the node
|
||||
* `__meta_dockerswarm_node_address`: the address of the node
|
||||
* `__meta_dockerswarm_node_availability`: the availability of the node
|
||||
* `__meta_dockerswarm_node_label_<labelname>`: each label of the node
|
||||
* `__meta_dockerswarm_node_label_<labelname>`: each label of the node, with any unsupported characters converted to an underscore
|
||||
* `__meta_dockerswarm_node_platform_architecture`: the architecture of the node
|
||||
* `__meta_dockerswarm_node_platform_os`: the operating system of the node
|
||||
* `__meta_dockerswarm_node_role`: the role of the node
|
||||
|
@ -1026,7 +1026,7 @@ Available meta labels:
|
|||
* `__meta_dockerswarm_node_engine_version`: the version of the node engine
|
||||
* `__meta_dockerswarm_node_hostname`: the hostname of the node
|
||||
* `__meta_dockerswarm_node_id`: the ID of the node
|
||||
* `__meta_dockerswarm_node_label_<labelname>`: each label of the node
|
||||
* `__meta_dockerswarm_node_label_<labelname>`: each label of the node, with any unsupported characters converted to an underscore
|
||||
* `__meta_dockerswarm_node_manager_address`: the address of the manager component of the node
|
||||
* `__meta_dockerswarm_node_manager_leader`: the leadership status of the manager component of the node (true or false)
|
||||
* `__meta_dockerswarm_node_manager_reachability`: the reachability of the manager component of the node
|
||||
|
@ -1615,7 +1615,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
|
|||
|
||||
* `__meta_gce_instance_id`: the numeric id of the instance
|
||||
* `__meta_gce_instance_name`: the name of the instance
|
||||
* `__meta_gce_label_<labelname>`: each GCE label of the instance
|
||||
* `__meta_gce_label_<labelname>`: each GCE label of the instance, with any unsupported characters converted to an underscore
|
||||
* `__meta_gce_machine_type`: full or partial URL of the machine type of the instance
|
||||
* `__meta_gce_metadata_<name>`: each metadata item of the instance
|
||||
* `__meta_gce_network`: the network URL of the instance
|
||||
|
@ -1699,8 +1699,8 @@ The labels below are only available for targets with `role` set to `hcloud`:
|
|||
* `__meta_hetzner_hcloud_memory_size_gb`: the amount of memory of the server (in GB)
|
||||
* `__meta_hetzner_hcloud_disk_size_gb`: the disk size of the server (in GB)
|
||||
* `__meta_hetzner_hcloud_private_ipv4_<networkname>`: the private ipv4 address of the server within a given network
|
||||
* `__meta_hetzner_hcloud_label_<labelname>`: each label of the server
|
||||
* `__meta_hetzner_hcloud_labelpresent_<labelname>`: `true` for each label of the server
|
||||
* `__meta_hetzner_hcloud_label_<labelname>`: each label of the server, with any unsupported characters converted to an underscore
|
||||
* `__meta_hetzner_hcloud_labelpresent_<labelname>`: `true` for each label of the server, with any unsupported characters converted to an underscore
|
||||
|
||||
The labels below are only available for targets with `role` set to `robot`:
|
||||
|
||||
|
@ -1967,8 +1967,8 @@ Available meta labels:
|
|||
|
||||
* `__meta_kubernetes_node_name`: The name of the node object.
|
||||
* `__meta_kubernetes_node_provider_id`: The cloud provider's name for the node object.
|
||||
* `__meta_kubernetes_node_label_<labelname>`: Each label from the node object.
|
||||
* `__meta_kubernetes_node_labelpresent_<labelname>`: `true` for each label from the node object.
|
||||
* `__meta_kubernetes_node_label_<labelname>`: Each label from the node object, with any unsupported characters converted to an underscore.
|
||||
* `__meta_kubernetes_node_labelpresent_<labelname>`: `true` for each label from the node object, with any unsupported characters converted to an underscore.
|
||||
* `__meta_kubernetes_node_annotation_<annotationname>`: Each annotation from the node object.
|
||||
* `__meta_kubernetes_node_annotationpresent_<annotationname>`: `true` for each annotation from the node object.
|
||||
* `__meta_kubernetes_node_address_<address_type>`: The first address for each node address type, if it exists.
|
||||
|
@ -1991,8 +1991,8 @@ Available meta labels:
|
|||
* `__meta_kubernetes_service_cluster_ip`: The cluster IP address of the service. (Does not apply to services of type ExternalName)
|
||||
* `__meta_kubernetes_service_loadbalancer_ip`: The IP address of the loadbalancer. (Applies to services of type LoadBalancer)
|
||||
* `__meta_kubernetes_service_external_name`: The DNS name of the service. (Applies to services of type ExternalName)
|
||||
* `__meta_kubernetes_service_label_<labelname>`: Each label from the service object.
|
||||
* `__meta_kubernetes_service_labelpresent_<labelname>`: `true` for each label of the service object.
|
||||
* `__meta_kubernetes_service_label_<labelname>`: Each label from the service object, with any unsupported characters converted to an underscore.
|
||||
* `__meta_kubernetes_service_labelpresent_<labelname>`: `true` for each label of the service object, with any unsupported characters converted to an underscore.
|
||||
* `__meta_kubernetes_service_name`: The name of the service object.
|
||||
* `__meta_kubernetes_service_port_name`: Name of the service port for the target.
|
||||
* `__meta_kubernetes_service_port_number`: Number of the service port for the target.
|
||||
|
@ -2010,8 +2010,8 @@ Available meta labels:
|
|||
* `__meta_kubernetes_namespace`: The namespace of the pod object.
|
||||
* `__meta_kubernetes_pod_name`: The name of the pod object.
|
||||
* `__meta_kubernetes_pod_ip`: The pod IP of the pod object.
|
||||
* `__meta_kubernetes_pod_label_<labelname>`: Each label from the pod object.
|
||||
* `__meta_kubernetes_pod_labelpresent_<labelname>`: `true` for each label from the pod object.
|
||||
* `__meta_kubernetes_pod_label_<labelname>`: Each label from the pod object, with any unsupported characters converted to an underscore.
|
||||
* `__meta_kubernetes_pod_labelpresent_<labelname>`: `true` for each label from the pod object, with any unsupported characters converted to an underscore.
|
||||
* `__meta_kubernetes_pod_annotation_<annotationname>`: Each annotation from the pod object.
|
||||
* `__meta_kubernetes_pod_annotationpresent_<annotationname>`: `true` for each annotation from the pod object.
|
||||
* `__meta_kubernetes_pod_container_init`: `true` if the container is an [InitContainer](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
|
||||
|
@ -2040,8 +2040,8 @@ Available meta labels:
|
|||
|
||||
* `__meta_kubernetes_namespace`: The namespace of the endpoints object.
|
||||
* `__meta_kubernetes_endpoints_name`: The names of the endpoints object.
|
||||
* `__meta_kubernetes_endpoints_label_<labelname>`: Each label from the endpoints object.
|
||||
* `__meta_kubernetes_endpoints_labelpresent_<labelname>`: `true` for each label from the endpoints object.
|
||||
* `__meta_kubernetes_endpoints_label_<labelname>`: Each label from the endpoints object, with any unsupported characters converted to an underscore.
|
||||
* `__meta_kubernetes_endpoints_labelpresent_<labelname>`: `true` for each label from the endpoints object, with any unsupported characters converted to an underscore.
|
||||
* `__meta_kubernetes_endpoints_annotation_<annotationname>`: Each annotation from the endpoints object.
|
||||
* `__meta_kubernetes_endpoints_annotationpresent_<annotationname>`: `true` for each annotation from the endpoints object.
|
||||
* For all targets discovered directly from the endpoints list (those not additionally inferred
|
||||
|
@ -2066,8 +2066,8 @@ Available meta labels:
|
|||
|
||||
* `__meta_kubernetes_namespace`: The namespace of the endpoints object.
|
||||
* `__meta_kubernetes_endpointslice_name`: The name of endpointslice object.
|
||||
* `__meta_kubernetes_endpointslice_label_<labelname>`: Each label from the endpointslice object.
|
||||
* `__meta_kubernetes_endpointslice_labelpresent_<labelname>`: `true` for each label from the endpointslice object.
|
||||
* `__meta_kubernetes_endpointslice_label_<labelname>`: Each label from the endpointslice object, with any unsupported characters converted to an underscore.
|
||||
* `__meta_kubernetes_endpointslice_labelpresent_<labelname>`: `true` for each label from the endpointslice object, with any unsupported characters converted to an underscore.
|
||||
* `__meta_kubernetes_endpointslice_annotation_<annotationname>`: Each annotation from the endpointslice object.
|
||||
* `__meta_kubernetes_endpointslice_annotationpresent_<annotationname>`: `true` for each annotation from the endpointslice object.
|
||||
* For all targets discovered directly from the endpointslice list (those not additionally inferred
|
||||
|
@ -2096,8 +2096,8 @@ Available meta labels:
|
|||
|
||||
* `__meta_kubernetes_namespace`: The namespace of the ingress object.
|
||||
* `__meta_kubernetes_ingress_name`: The name of the ingress object.
|
||||
* `__meta_kubernetes_ingress_label_<labelname>`: Each label from the ingress object.
|
||||
* `__meta_kubernetes_ingress_labelpresent_<labelname>`: `true` for each label from the ingress object.
|
||||
* `__meta_kubernetes_ingress_label_<labelname>`: Each label from the ingress object, with any unsupported characters converted to an underscore.
|
||||
* `__meta_kubernetes_ingress_labelpresent_<labelname>`: `true` for each label from the ingress object, with any unsupported characters converted to an underscore.
|
||||
* `__meta_kubernetes_ingress_annotation_<annotationname>`: Each annotation from the ingress object.
|
||||
* `__meta_kubernetes_ingress_annotationpresent_<annotationname>`: `true` for each annotation from the ingress object.
|
||||
* `__meta_kubernetes_ingress_class_name`: Class name from ingress spec, if present.
|
||||
|
@ -2487,9 +2487,9 @@ The following meta labels are available on targets during [relabeling](#relabel_
|
|||
* `__meta_marathon_app`: the name of the app (with slashes replaced by dashes)
|
||||
* `__meta_marathon_image`: the name of the Docker image used (if available)
|
||||
* `__meta_marathon_task`: the ID of the Mesos task
|
||||
* `__meta_marathon_app_label_<labelname>`: any Marathon labels attached to the app
|
||||
* `__meta_marathon_port_definition_label_<labelname>`: the port definition labels
|
||||
* `__meta_marathon_port_mapping_label_<labelname>`: the port mapping labels
|
||||
* `__meta_marathon_app_label_<labelname>`: any Marathon labels attached to the app, with any unsupported characters converted to an underscore
|
||||
* `__meta_marathon_port_definition_label_<labelname>`: the port definition labels, with any unsupported characters converted to an underscore
|
||||
* `__meta_marathon_port_mapping_label_<labelname>`: the port mapping labels, with any unsupported characters converted to an underscore
|
||||
* `__meta_marathon_port_index`: the port index number (e.g. `1` for `PORT1`)
|
||||
|
||||
See below for the configuration options for Marathon discovery:
|
||||
|
|
|
@ -268,12 +268,23 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
|
|||
h.Count += other.Count
|
||||
h.Sum += other.Sum
|
||||
|
||||
if other.Schema != h.Schema {
|
||||
other = other.ReduceResolution(h.Schema)
|
||||
var (
|
||||
otherPositiveSpans = other.PositiveSpans
|
||||
otherPositiveBuckets = other.PositiveBuckets
|
||||
otherNegativeSpans = other.NegativeSpans
|
||||
otherNegativeBuckets = other.NegativeBuckets
|
||||
)
|
||||
|
||||
if other.Schema < h.Schema {
|
||||
panic(fmt.Errorf("cannot add histogram with schema %d to %d", other.Schema, h.Schema))
|
||||
} else if other.Schema > h.Schema {
|
||||
otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false)
|
||||
otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false)
|
||||
}
|
||||
|
||||
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.PositiveSpans, h.PositiveBuckets, other.PositiveSpans, other.PositiveBuckets)
|
||||
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.NegativeSpans, h.NegativeBuckets, other.NegativeSpans, other.NegativeBuckets)
|
||||
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
||||
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
|
@ -284,12 +295,23 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
|
|||
h.Count -= other.Count
|
||||
h.Sum -= other.Sum
|
||||
|
||||
if other.Schema != h.Schema {
|
||||
other = other.ReduceResolution(h.Schema)
|
||||
var (
|
||||
otherPositiveSpans = other.PositiveSpans
|
||||
otherPositiveBuckets = other.PositiveBuckets
|
||||
otherNegativeSpans = other.NegativeSpans
|
||||
otherNegativeBuckets = other.NegativeBuckets
|
||||
)
|
||||
|
||||
if other.Schema < h.Schema {
|
||||
panic(fmt.Errorf("cannot subtract histigram with schema %d to %d", other.Schema, h.Schema))
|
||||
} else if other.Schema > h.Schema {
|
||||
otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false)
|
||||
otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false)
|
||||
}
|
||||
|
||||
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.PositiveSpans, h.PositiveBuckets, other.PositiveSpans, other.PositiveBuckets)
|
||||
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.NegativeSpans, h.NegativeBuckets, other.NegativeSpans, other.NegativeBuckets)
|
||||
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
|
||||
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
|
@ -1110,6 +1132,10 @@ func floatBucketsMatch(b1, b2 []float64) bool {
|
|||
// ReduceResolution reduces the float histogram's spans, buckets into target schema.
|
||||
// The target schema must be smaller than the current float histogram's schema.
|
||||
func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram {
|
||||
if targetSchema >= h.Schema {
|
||||
panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
|
||||
}
|
||||
|
||||
h.PositiveSpans, h.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false)
|
||||
h.NegativeSpans, h.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false)
|
||||
h.Schema = targetSchema
|
||||
|
|
|
@ -1573,9 +1573,12 @@ func TestFloatHistogramAdd(t *testing.T) {
|
|||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
in2Copy := c.in2.Copy()
|
||||
require.Equal(t, c.expected, c.in1.Add(c.in2))
|
||||
// Has it also happened in-place?
|
||||
require.Equal(t, c.expected, c.in1)
|
||||
// Check that the argument was not mutated.
|
||||
require.Equal(t, in2Copy, c.in2)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1659,9 +1662,12 @@ func TestFloatHistogramSub(t *testing.T) {
|
|||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
in2Copy := c.in2.Copy()
|
||||
require.Equal(t, c.expected, c.in1.Sub(c.in2))
|
||||
// Has it also happened in-place?
|
||||
require.Equal(t, c.expected, c.in1)
|
||||
// Check that the argument was not mutated.
|
||||
require.Equal(t, in2Copy, c.in2)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -497,6 +497,10 @@ func (c *cumulativeBucketIterator) At() Bucket[uint64] {
|
|||
// ReduceResolution reduces the histogram's spans, buckets into target schema.
|
||||
// The target schema must be smaller than the current histogram's schema.
|
||||
func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram {
|
||||
if targetSchema >= h.Schema {
|
||||
panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
|
||||
}
|
||||
|
||||
h.PositiveSpans, h.PositiveBuckets = reduceResolution(
|
||||
h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, true,
|
||||
)
|
||||
|
|
|
@ -473,10 +473,10 @@ func ChainSampleIteratorFromSeries(it chunkenc.Iterator, series []Series) chunke
|
|||
return csi
|
||||
}
|
||||
|
||||
func ChainSampleIteratorFromMetas(it chunkenc.Iterator, chunks []chunks.Meta) chunkenc.Iterator {
|
||||
csi := getChainSampleIterator(it, len(chunks))
|
||||
for i, c := range chunks {
|
||||
csi.iterators[i] = c.Chunk.Iterator(csi.iterators[i])
|
||||
func ChainSampleIteratorFromIterables(it chunkenc.Iterator, iterables []chunkenc.Iterable) chunkenc.Iterator {
|
||||
csi := getChainSampleIterator(it, len(iterables))
|
||||
for i, c := range iterables {
|
||||
csi.iterators[i] = c.Iterator(csi.iterators[i])
|
||||
}
|
||||
return csi
|
||||
}
|
||||
|
|
|
@ -129,8 +129,19 @@ type ChunkWriter interface {
|
|||
|
||||
// ChunkReader provides reading access of serialized time series data.
|
||||
type ChunkReader interface {
|
||||
// Chunk returns the series data chunk with the given reference.
|
||||
Chunk(meta chunks.Meta) (chunkenc.Chunk, error)
|
||||
// ChunkOrIterable returns the series data for the given chunks.Meta.
|
||||
// Either a single chunk will be returned, or an iterable.
|
||||
// A single chunk should be returned if chunks.Meta maps to a chunk that
|
||||
// already exists and doesn't need modifications.
|
||||
// An iterable should be returned if chunks.Meta maps to a subset of the
|
||||
// samples in a stored chunk, or multiple chunks. (E.g. OOOHeadChunkReader
|
||||
// could return an iterable where multiple histogram samples have counter
|
||||
// resets. There can only be one counter reset per histogram chunk so
|
||||
// multiple chunks would be created from the iterable in this case.)
|
||||
// Only one of chunk or iterable should be returned. In some cases you may
|
||||
// always expect a chunk to be returned. You can check that iterable is nil
|
||||
// in those cases.
|
||||
ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error)
|
||||
|
||||
// Close releases all underlying resources of the reader.
|
||||
Close() error
|
||||
|
|
|
@ -504,6 +504,19 @@ func createBlockFromHead(tb testing.TB, dir string, head *Head) string {
|
|||
return filepath.Join(dir, ulid.String())
|
||||
}
|
||||
|
||||
func createBlockFromOOOHead(tb testing.TB, dir string, head *OOOCompactionHead) string {
|
||||
compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, nil, true)
|
||||
require.NoError(tb, err)
|
||||
|
||||
require.NoError(tb, os.MkdirAll(dir, 0o777))
|
||||
|
||||
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
|
||||
// Because of this block intervals are always +1 than the total samples it includes.
|
||||
ulid, err := compactor.Write(dir, head, head.MinTime(), head.MaxTime()+1, nil)
|
||||
require.NoError(tb, err)
|
||||
return filepath.Join(dir, ulid.String())
|
||||
}
|
||||
|
||||
func createHead(tb testing.TB, w *wlog.WL, series []storage.Series, chunkDir string) *Head {
|
||||
opts := DefaultHeadOptions()
|
||||
opts.ChunkDirRoot = chunkDir
|
||||
|
|
|
@ -67,6 +67,8 @@ const (
|
|||
|
||||
// Chunk holds a sequence of sample pairs that can be iterated over and appended to.
|
||||
type Chunk interface {
|
||||
Iterable
|
||||
|
||||
// Bytes returns the underlying byte slice of the chunk.
|
||||
Bytes() []byte
|
||||
|
||||
|
@ -76,11 +78,6 @@ type Chunk interface {
|
|||
// Appender returns an appender to append samples to the chunk.
|
||||
Appender() (Appender, error)
|
||||
|
||||
// The iterator passed as argument is for re-use.
|
||||
// Depending on implementation, the iterator can
|
||||
// be re-used or a new iterator can be allocated.
|
||||
Iterator(Iterator) Iterator
|
||||
|
||||
// NumSamples returns the number of samples in the chunk.
|
||||
NumSamples() int
|
||||
|
||||
|
@ -92,6 +89,13 @@ type Chunk interface {
|
|||
Compact()
|
||||
}
|
||||
|
||||
type Iterable interface {
|
||||
// The iterator passed as argument is for re-use.
|
||||
// Depending on implementation, the iterator can
|
||||
// be re-used or a new iterator can be allocated.
|
||||
Iterator(Iterator) Iterator
|
||||
}
|
||||
|
||||
// Appender adds sample pairs to a chunk.
|
||||
type Appender interface {
|
||||
Append(int64, float64)
|
||||
|
@ -184,6 +188,19 @@ func (v ValueType) ChunkEncoding() Encoding {
|
|||
}
|
||||
}
|
||||
|
||||
func (v ValueType) NewChunk() (Chunk, error) {
|
||||
switch v {
|
||||
case ValFloat:
|
||||
return NewXORChunk(), nil
|
||||
case ValHistogram:
|
||||
return NewHistogramChunk(), nil
|
||||
case ValFloatHistogram:
|
||||
return NewFloatHistogramChunk(), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("value type %v unsupported", v)
|
||||
}
|
||||
}
|
||||
|
||||
// MockSeriesIterator returns an iterator for a mock series with custom timeStamps and values.
|
||||
func MockSeriesIterator(timestamps []int64, values []float64) Iterator {
|
||||
return &mockSeriesIterator{
|
||||
|
|
|
@ -117,11 +117,16 @@ func (b BlockChunkRef) Unpack() (int, int) {
|
|||
return sgmIndex, chkStart
|
||||
}
|
||||
|
||||
// Meta holds information about a chunk of data.
|
||||
// Meta holds information about one or more chunks.
|
||||
// For examples of when chunks.Meta could refer to multiple chunks, see
|
||||
// ChunkReader.ChunkOrIterable().
|
||||
type Meta struct {
|
||||
// Ref and Chunk hold either a reference that can be used to retrieve
|
||||
// chunk data or the data itself.
|
||||
// If Chunk is nil, call ChunkReader.Chunk(Meta.Ref) to get the chunk and assign it to the Chunk field
|
||||
// If Chunk is nil, call ChunkReader.ChunkOrIterable(Meta.Ref) to get the
|
||||
// chunk and assign it to the Chunk field. If an iterable is returned from
|
||||
// that method, then it may not be possible to set Chunk as the iterable
|
||||
// might form several chunks.
|
||||
Ref ChunkRef
|
||||
Chunk chunkenc.Chunk
|
||||
|
||||
|
@ -667,24 +672,24 @@ func (s *Reader) Size() int64 {
|
|||
}
|
||||
|
||||
// Chunk returns a chunk from a given reference.
|
||||
func (s *Reader) Chunk(meta Meta) (chunkenc.Chunk, error) {
|
||||
func (s *Reader) ChunkOrIterable(meta Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
|
||||
sgmIndex, chkStart := BlockChunkRef(meta.Ref).Unpack()
|
||||
|
||||
if sgmIndex >= len(s.bs) {
|
||||
return nil, fmt.Errorf("segment index %d out of range", sgmIndex)
|
||||
return nil, nil, fmt.Errorf("segment index %d out of range", sgmIndex)
|
||||
}
|
||||
|
||||
sgmBytes := s.bs[sgmIndex]
|
||||
|
||||
if chkStart+MaxChunkLengthFieldSize > sgmBytes.Len() {
|
||||
return nil, fmt.Errorf("segment doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, sgmBytes.Len())
|
||||
return nil, nil, fmt.Errorf("segment doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, sgmBytes.Len())
|
||||
}
|
||||
// With the minimum chunk length this should never cause us reading
|
||||
// over the end of the slice.
|
||||
c := sgmBytes.Range(chkStart, chkStart+MaxChunkLengthFieldSize)
|
||||
chkDataLen, n := binary.Uvarint(c)
|
||||
if n <= 0 {
|
||||
return nil, fmt.Errorf("reading chunk length failed with %d", n)
|
||||
return nil, nil, fmt.Errorf("reading chunk length failed with %d", n)
|
||||
}
|
||||
|
||||
chkEncStart := chkStart + n
|
||||
|
@ -693,17 +698,18 @@ func (s *Reader) Chunk(meta Meta) (chunkenc.Chunk, error) {
|
|||
chkDataEnd := chkEnd - crc32.Size
|
||||
|
||||
if chkEnd > sgmBytes.Len() {
|
||||
return nil, fmt.Errorf("segment doesn't include enough bytes to read the chunk - required:%v, available:%v", chkEnd, sgmBytes.Len())
|
||||
return nil, nil, fmt.Errorf("segment doesn't include enough bytes to read the chunk - required:%v, available:%v", chkEnd, sgmBytes.Len())
|
||||
}
|
||||
|
||||
sum := sgmBytes.Range(chkDataEnd, chkEnd)
|
||||
if err := checkCRC32(sgmBytes.Range(chkEncStart, chkDataEnd), sum); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
chkData := sgmBytes.Range(chkDataStart, chkDataEnd)
|
||||
chkEnc := sgmBytes.Range(chkEncStart, chkEncStart+ChunkEncodingSize)[0]
|
||||
return s.pool.Get(chunkenc.Encoding(chkEnc), chkData)
|
||||
chk, err := s.pool.Get(chunkenc.Encoding(chkEnc), chkData)
|
||||
return chk, nil, err
|
||||
}
|
||||
|
||||
func nextSequenceFile(dir string) (string, int, error) {
|
||||
|
|
|
@ -23,6 +23,6 @@ func TestReaderWithInvalidBuffer(t *testing.T) {
|
|||
b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81})
|
||||
r := &Reader{bs: []ByteSlice{b}}
|
||||
|
||||
_, err := r.Chunk(Meta{Ref: 0})
|
||||
_, _, err := r.ChunkOrIterable(Meta{Ref: 0})
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
|
|
@ -1348,6 +1348,46 @@ func BenchmarkCompactionFromHead(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func BenchmarkCompactionFromOOOHead(b *testing.B) {
|
||||
dir := b.TempDir()
|
||||
totalSeries := 100000
|
||||
totalSamples := 100
|
||||
for labelNames := 1; labelNames < totalSeries; labelNames *= 10 {
|
||||
labelValues := totalSeries / labelNames
|
||||
b.Run(fmt.Sprintf("labelnames=%d,labelvalues=%d", labelNames, labelValues), func(b *testing.B) {
|
||||
chunkDir := b.TempDir()
|
||||
opts := DefaultHeadOptions()
|
||||
opts.ChunkRange = 1000
|
||||
opts.ChunkDirRoot = chunkDir
|
||||
opts.OutOfOrderTimeWindow.Store(int64(totalSamples))
|
||||
h, err := NewHead(nil, nil, nil, nil, opts, nil)
|
||||
require.NoError(b, err)
|
||||
for ln := 0; ln < labelNames; ln++ {
|
||||
app := h.Appender(context.Background())
|
||||
for lv := 0; lv < labelValues; lv++ {
|
||||
lbls := labels.FromStrings(fmt.Sprintf("%d", ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln))
|
||||
_, err = app.Append(0, lbls, int64(totalSamples), 0)
|
||||
require.NoError(b, err)
|
||||
for ts := 0; ts < totalSamples; ts++ {
|
||||
_, err = app.Append(0, lbls, int64(ts), float64(ts))
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
require.NoError(b, app.Commit())
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
oooHead, err := NewOOOCompactionHead(context.TODO(), h)
|
||||
require.NoError(b, err)
|
||||
createBlockFromOOOHead(b, filepath.Join(dir, fmt.Sprintf("%d-%d", i, labelNames)), oooHead)
|
||||
}
|
||||
h.Close()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestDisableAutoCompactions checks that we can
|
||||
// disable and enable the auto compaction.
|
||||
// This is needed for unit tests that rely on
|
||||
|
|
|
@ -2916,8 +2916,9 @@ func TestChunkWriter_ReadAfterWrite(t *testing.T) {
|
|||
|
||||
for _, chks := range test.chks {
|
||||
for _, chkExp := range chks {
|
||||
chkAct, err := r.Chunk(chkExp)
|
||||
chkAct, iterable, err := r.ChunkOrIterable(chkExp)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, iterable)
|
||||
require.Equal(t, chkExp.Chunk.Bytes(), chkAct.Bytes())
|
||||
}
|
||||
}
|
||||
|
@ -2976,8 +2977,9 @@ func TestChunkReader_ConcurrentReads(t *testing.T) {
|
|||
go func(chunk chunks.Meta) {
|
||||
defer wg.Done()
|
||||
|
||||
chkAct, err := r.Chunk(chunk)
|
||||
chkAct, iterable, err := r.ChunkOrIterable(chunk)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, iterable)
|
||||
require.Equal(t, chunk.Chunk.Bytes(), chkAct.Bytes())
|
||||
}(chk)
|
||||
}
|
||||
|
|
|
@ -1927,6 +1927,8 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (
|
|||
deletedForCallback := make(map[chunks.HeadSeriesRef]labels.Labels, deletedFromPrevStripe)
|
||||
s.locks[i].Lock()
|
||||
|
||||
// Delete conflicts first so seriesHashmap.del doesn't move them to the `unique` field,
|
||||
// after deleting `unique`.
|
||||
for hash, all := range s.hashes[i].conflicts {
|
||||
for _, series := range all {
|
||||
check(i, hash, series, deletedForCallback)
|
||||
|
|
|
@ -318,10 +318,10 @@ func (h *headChunkReader) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Chunk returns the chunk for the reference number.
|
||||
func (h *headChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
|
||||
// ChunkOrIterable returns the chunk for the reference number.
|
||||
func (h *headChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
|
||||
chk, _, err := h.chunk(meta, false)
|
||||
return chk, err
|
||||
return chk, nil, err
|
||||
}
|
||||
|
||||
// ChunkWithCopy returns the chunk for the reference number.
|
||||
|
@ -445,13 +445,13 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, cdm chunkDiskMapper, memChunkPo
|
|||
return elem, true, offset == 0, nil
|
||||
}
|
||||
|
||||
// oooMergedChunk returns the requested chunk based on the given chunks.Meta
|
||||
// reference from memory or by m-mapping it from the disk. The returned chunk
|
||||
// might be a merge of all the overlapping chunks, if any, amongst all the
|
||||
// chunks in the OOOHead.
|
||||
// oooMergedChunks return an iterable over one or more OOO chunks for the given
|
||||
// chunks.Meta reference from memory or by m-mapping it from the disk. The
|
||||
// returned iterable will be a merge of all the overlapping chunks, if any,
|
||||
// amongst all the chunks in the OOOHead.
|
||||
// This function is not thread safe unless the caller holds a lock.
|
||||
// The caller must ensure that s.ooo is not nil.
|
||||
func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm chunkDiskMapper, mint, maxt int64) (chunk *mergedOOOChunks, err error) {
|
||||
func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm chunkDiskMapper, mint, maxt int64) (*mergedOOOChunks, error) {
|
||||
_, cid := chunks.HeadChunkRef(meta.Ref).Unpack()
|
||||
|
||||
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk meta's are
|
||||
|
@ -528,11 +528,13 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm chunkDiskMapper, mint,
|
|||
mc := &mergedOOOChunks{}
|
||||
absoluteMax := int64(math.MinInt64)
|
||||
for _, c := range tmpChks {
|
||||
if c.meta.Ref != meta.Ref && (len(mc.chunks) == 0 || c.meta.MinTime > absoluteMax) {
|
||||
if c.meta.Ref != meta.Ref && (len(mc.chunkIterables) == 0 || c.meta.MinTime > absoluteMax) {
|
||||
continue
|
||||
}
|
||||
var iterable chunkenc.Iterable
|
||||
if c.meta.Ref == oooHeadRef {
|
||||
var xor *chunkenc.XORChunk
|
||||
var err error
|
||||
// If head chunk min and max time match the meta OOO markers
|
||||
// that means that the chunk has not expanded so we can append
|
||||
// it as it is.
|
||||
|
@ -545,7 +547,7 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm chunkDiskMapper, mint,
|
|||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert ooo head chunk to xor chunk")
|
||||
}
|
||||
c.meta.Chunk = xor
|
||||
iterable = xor
|
||||
} else {
|
||||
chk, err := cdm.Chunk(c.ref)
|
||||
if err != nil {
|
||||
|
@ -560,12 +562,12 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm chunkDiskMapper, mint,
|
|||
// wrap the chunk within a chunk that doesnt allows us to iterate
|
||||
// through samples out of the OOOLastMinT and OOOLastMaxT
|
||||
// markers.
|
||||
c.meta.Chunk = boundedChunk{chk, meta.OOOLastMinTime, meta.OOOLastMaxTime}
|
||||
iterable = boundedIterable{chk, meta.OOOLastMinTime, meta.OOOLastMaxTime}
|
||||
} else {
|
||||
c.meta.Chunk = chk
|
||||
iterable = chk
|
||||
}
|
||||
}
|
||||
mc.chunks = append(mc.chunks, c.meta)
|
||||
mc.chunkIterables = append(mc.chunkIterables, iterable)
|
||||
if c.meta.MaxTime > absoluteMax {
|
||||
absoluteMax = c.meta.MaxTime
|
||||
}
|
||||
|
@ -574,77 +576,30 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm chunkDiskMapper, mint,
|
|||
return mc, nil
|
||||
}
|
||||
|
||||
var _ chunkenc.Chunk = &mergedOOOChunks{}
|
||||
var _ chunkenc.Iterable = &mergedOOOChunks{}
|
||||
|
||||
// mergedOOOChunks holds the list of overlapping chunks. This struct satisfies
|
||||
// chunkenc.Chunk.
|
||||
// mergedOOOChunks holds the list of iterables for overlapping chunks.
|
||||
type mergedOOOChunks struct {
|
||||
chunks []chunks.Meta
|
||||
}
|
||||
|
||||
// Bytes is a very expensive method because its calling the iterator of all the
|
||||
// chunks in the mergedOOOChunk and building a new chunk with the samples.
|
||||
func (o mergedOOOChunks) Bytes() []byte {
|
||||
xc := chunkenc.NewXORChunk()
|
||||
app, err := xc.Appender()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
it := o.Iterator(nil)
|
||||
for it.Next() == chunkenc.ValFloat {
|
||||
t, v := it.At()
|
||||
app.Append(t, v)
|
||||
}
|
||||
|
||||
return xc.Bytes()
|
||||
}
|
||||
|
||||
func (o mergedOOOChunks) Encoding() chunkenc.Encoding {
|
||||
return chunkenc.EncXOR
|
||||
}
|
||||
|
||||
func (o mergedOOOChunks) Appender() (chunkenc.Appender, error) {
|
||||
return nil, errors.New("can't append to mergedOOOChunks")
|
||||
chunkIterables []chunkenc.Iterable
|
||||
}
|
||||
|
||||
func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator {
|
||||
return storage.ChainSampleIteratorFromMetas(iterator, o.chunks)
|
||||
return storage.ChainSampleIteratorFromIterables(iterator, o.chunkIterables)
|
||||
}
|
||||
|
||||
func (o mergedOOOChunks) NumSamples() int {
|
||||
samples := 0
|
||||
for _, c := range o.chunks {
|
||||
samples += c.Chunk.NumSamples()
|
||||
}
|
||||
return samples
|
||||
}
|
||||
var _ chunkenc.Iterable = &boundedIterable{}
|
||||
|
||||
func (o mergedOOOChunks) Compact() {}
|
||||
|
||||
var _ chunkenc.Chunk = &boundedChunk{}
|
||||
|
||||
// boundedChunk is an implementation of chunkenc.Chunk that uses a
|
||||
// boundedIterable is an implementation of chunkenc.Iterable that uses a
|
||||
// boundedIterator that only iterates through samples which timestamps are
|
||||
// >= minT and <= maxT.
|
||||
type boundedChunk struct {
|
||||
chunkenc.Chunk
|
||||
minT int64
|
||||
maxT int64
|
||||
type boundedIterable struct {
|
||||
chunk chunkenc.Chunk
|
||||
minT int64
|
||||
maxT int64
|
||||
}
|
||||
|
||||
func (b boundedChunk) Bytes() []byte {
|
||||
xor := chunkenc.NewXORChunk()
|
||||
a, _ := xor.Appender()
|
||||
it := b.Iterator(nil)
|
||||
for it.Next() == chunkenc.ValFloat {
|
||||
t, v := it.At()
|
||||
a.Append(t, v)
|
||||
}
|
||||
return xor.Bytes()
|
||||
}
|
||||
|
||||
func (b boundedChunk) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator {
|
||||
it := b.Chunk.Iterator(iterator)
|
||||
func (b boundedIterable) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator {
|
||||
it := b.chunk.Iterator(iterator)
|
||||
if it == nil {
|
||||
panic("iterator shouldn't be nil")
|
||||
}
|
||||
|
|
|
@ -129,21 +129,10 @@ func TestBoundedChunk(t *testing.T) {
|
|||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) {
|
||||
chunk := boundedChunk{tc.inputChunk, tc.inputMinT, tc.inputMaxT}
|
||||
|
||||
// Testing Bytes()
|
||||
expChunk := chunkenc.NewXORChunk()
|
||||
if tc.inputChunk.NumSamples() > 0 {
|
||||
app, err := expChunk.Appender()
|
||||
require.NoError(t, err)
|
||||
for ts := tc.inputMinT; ts <= tc.inputMaxT; ts++ {
|
||||
app.Append(ts, float64(ts))
|
||||
}
|
||||
}
|
||||
require.Equal(t, expChunk.Bytes(), chunk.Bytes())
|
||||
iterable := boundedIterable{tc.inputChunk, tc.inputMinT, tc.inputMaxT}
|
||||
|
||||
var samples []sample
|
||||
it := chunk.Iterator(nil)
|
||||
it := iterable.Iterator(nil)
|
||||
|
||||
if tc.initialSeek != 0 {
|
||||
// Testing Seek()
|
||||
|
|
|
@ -1840,16 +1840,16 @@ func TestGCChunkAccess(t *testing.T) {
|
|||
|
||||
cr, err := h.chunksRange(0, 1500, nil)
|
||||
require.NoError(t, err)
|
||||
_, err = cr.Chunk(chunks[0])
|
||||
_, _, err = cr.ChunkOrIterable(chunks[0])
|
||||
require.NoError(t, err)
|
||||
_, err = cr.Chunk(chunks[1])
|
||||
_, _, err = cr.ChunkOrIterable(chunks[1])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, h.Truncate(1500)) // Remove a chunk.
|
||||
|
||||
_, err = cr.Chunk(chunks[0])
|
||||
_, _, err = cr.ChunkOrIterable(chunks[0])
|
||||
require.Equal(t, storage.ErrNotFound, err)
|
||||
_, err = cr.Chunk(chunks[1])
|
||||
_, _, err = cr.ChunkOrIterable(chunks[1])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -1899,18 +1899,18 @@ func TestGCSeriesAccess(t *testing.T) {
|
|||
|
||||
cr, err := h.chunksRange(0, 2000, nil)
|
||||
require.NoError(t, err)
|
||||
_, err = cr.Chunk(chunks[0])
|
||||
_, _, err = cr.ChunkOrIterable(chunks[0])
|
||||
require.NoError(t, err)
|
||||
_, err = cr.Chunk(chunks[1])
|
||||
_, _, err = cr.ChunkOrIterable(chunks[1])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, h.Truncate(2000)) // Remove the series.
|
||||
|
||||
require.Equal(t, (*memSeries)(nil), h.series.getByID(1))
|
||||
|
||||
_, err = cr.Chunk(chunks[0])
|
||||
_, _, err = cr.ChunkOrIterable(chunks[0])
|
||||
require.Equal(t, storage.ErrNotFound, err)
|
||||
_, err = cr.Chunk(chunks[1])
|
||||
_, _, err = cr.ChunkOrIterable(chunks[1])
|
||||
require.Equal(t, storage.ErrNotFound, err)
|
||||
}
|
||||
|
||||
|
@ -5475,8 +5475,9 @@ func TestCuttingNewHeadChunks(t *testing.T) {
|
|||
require.Len(t, chkMetas, len(tc.expectedChks))
|
||||
|
||||
for i, expected := range tc.expectedChks {
|
||||
chk, err := chkReader.Chunk(chkMetas[i])
|
||||
chk, iterable, err := chkReader.ChunkOrIterable(chkMetas[i])
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, iterable)
|
||||
|
||||
require.Equal(t, expected.numSamples, chk.NumSamples())
|
||||
require.Len(t, chk.Bytes(), expected.numBytes)
|
||||
|
@ -5524,8 +5525,9 @@ func TestHeadDetectsDuplicateSampleAtSizeLimit(t *testing.T) {
|
|||
|
||||
storedSampleCount := 0
|
||||
for _, chunkMeta := range chunks {
|
||||
chunk, err := chunkReader.Chunk(chunkMeta)
|
||||
chunk, iterable, err := chunkReader.ChunkOrIterable(chunkMeta)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, iterable)
|
||||
storedSampleCount += chunk.NumSamples()
|
||||
}
|
||||
|
||||
|
@ -5630,7 +5632,10 @@ func labelsWithHashCollision() (labels.Labels, labels.Labels) {
|
|||
return ls1, ls2
|
||||
}
|
||||
|
||||
func TestStripeSeries_getOrSet(t *testing.T) {
|
||||
// stripeSeriesWithCollidingSeries returns a stripeSeries with two memSeries having the same, colliding, hash.
|
||||
func stripeSeriesWithCollidingSeries(t *testing.T) (*stripeSeries, *memSeries, *memSeries) {
|
||||
t.Helper()
|
||||
|
||||
lbls1, lbls2 := labelsWithHashCollision()
|
||||
ms1 := memSeries{
|
||||
lset: lbls1,
|
||||
|
@ -5656,11 +5661,31 @@ func TestStripeSeries_getOrSet(t *testing.T) {
|
|||
require.True(t, created)
|
||||
require.Same(t, &ms2, got)
|
||||
|
||||
return s, &ms1, &ms2
|
||||
}
|
||||
|
||||
func TestStripeSeries_getOrSet(t *testing.T) {
|
||||
s, ms1, ms2 := stripeSeriesWithCollidingSeries(t)
|
||||
hash := ms1.lset.Hash()
|
||||
|
||||
// Verify that we can get both of the series despite the hash collision
|
||||
got = s.getByHash(hash, lbls1)
|
||||
require.Same(t, &ms1, got)
|
||||
got = s.getByHash(hash, lbls2)
|
||||
require.Same(t, &ms2, got)
|
||||
got := s.getByHash(hash, ms1.lset)
|
||||
require.Same(t, ms1, got)
|
||||
got = s.getByHash(hash, ms2.lset)
|
||||
require.Same(t, ms2, got)
|
||||
}
|
||||
|
||||
func TestStripeSeries_gc(t *testing.T) {
|
||||
s, ms1, ms2 := stripeSeriesWithCollidingSeries(t)
|
||||
hash := ms1.lset.Hash()
|
||||
|
||||
s.gc(0, 0)
|
||||
|
||||
// Verify that we can get neither ms1 nor ms2 after gc-ing corresponding series
|
||||
got := s.getByHash(hash, ms1.lset)
|
||||
require.Nil(t, got)
|
||||
got = s.getByHash(hash, ms2.lset)
|
||||
require.Nil(t, got)
|
||||
}
|
||||
|
||||
func TestSecondaryHashFunction(t *testing.T) {
|
||||
|
|
|
@ -253,33 +253,33 @@ func NewOOOHeadChunkReader(head *Head, mint, maxt int64, isoState *oooIsolationS
|
|||
}
|
||||
}
|
||||
|
||||
func (cr OOOHeadChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
|
||||
func (cr OOOHeadChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
|
||||
sid, _ := chunks.HeadChunkRef(meta.Ref).Unpack()
|
||||
|
||||
s := cr.head.series.getByID(sid)
|
||||
// This means that the series has been garbage collected.
|
||||
if s == nil {
|
||||
return nil, storage.ErrNotFound
|
||||
return nil, nil, storage.ErrNotFound
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
if s.ooo == nil {
|
||||
// There is no OOO data for this series.
|
||||
s.Unlock()
|
||||
return nil, storage.ErrNotFound
|
||||
return nil, nil, storage.ErrNotFound
|
||||
}
|
||||
c, err := s.oooMergedChunk(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt)
|
||||
mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt)
|
||||
s.Unlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// This means that the query range did not overlap with the requested chunk.
|
||||
if len(c.chunks) == 0 {
|
||||
return nil, storage.ErrNotFound
|
||||
if len(mc.chunkIterables) == 0 {
|
||||
return nil, nil, storage.ErrNotFound
|
||||
}
|
||||
|
||||
return c, nil
|
||||
return nil, mc, nil
|
||||
}
|
||||
|
||||
func (cr OOOHeadChunkReader) Close() error {
|
||||
|
|
|
@ -486,9 +486,10 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
|
||||
cr := NewOOOHeadChunkReader(db.head, 0, 1000, nil)
|
||||
defer cr.Close()
|
||||
c, err := cr.Chunk(chunks.Meta{
|
||||
c, iterable, err := cr.ChunkOrIterable(chunks.Meta{
|
||||
Ref: 0x1000000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300,
|
||||
})
|
||||
require.Nil(t, iterable)
|
||||
require.Equal(t, err, fmt.Errorf("not found"))
|
||||
require.Equal(t, c, nil)
|
||||
})
|
||||
|
@ -853,11 +854,12 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil)
|
||||
defer cr.Close()
|
||||
for i := 0; i < len(chks); i++ {
|
||||
c, err := cr.Chunk(chks[i])
|
||||
c, iterable, err := cr.ChunkOrIterable(chks[i])
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, c)
|
||||
|
||||
var resultSamples chunks.SampleSlice
|
||||
it := c.Iterator(nil)
|
||||
it := iterable.Iterator(nil)
|
||||
for it.Next() == chunkenc.ValFloat {
|
||||
t, v := it.At()
|
||||
resultSamples = append(resultSamples, sample{t: t, f: v})
|
||||
|
@ -1025,11 +1027,12 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
|||
cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil)
|
||||
defer cr.Close()
|
||||
for i := 0; i < len(chks); i++ {
|
||||
c, err := cr.Chunk(chks[i])
|
||||
c, iterable, err := cr.ChunkOrIterable(chks[i])
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, c)
|
||||
|
||||
var resultSamples chunks.SampleSlice
|
||||
it := c.Iterator(nil)
|
||||
it := iterable.Iterator(nil)
|
||||
for it.Next() == chunkenc.ValFloat {
|
||||
ts, v := it.At()
|
||||
resultSamples = append(resultSamples, sample{t: ts, f: v})
|
||||
|
|
247
tsdb/querier.go
247
tsdb/querier.go
|
@ -685,36 +685,42 @@ func (b *blockBaseSeriesSet) Warnings() annotations.Annotations { return nil }
|
|||
// populateWithDelGenericSeriesIterator assumes that chunks that would be fully
|
||||
// removed by intervals are filtered out in previous phase.
|
||||
//
|
||||
// On each iteration currChkMeta is available. If currDelIter is not nil, it
|
||||
// means that the chunk iterator in currChkMeta is invalid and a chunk rewrite
|
||||
// is needed, for which currDelIter should be used.
|
||||
// On each iteration currMeta is available. If currDelIter is not nil, it
|
||||
// means that the chunk in currMeta is invalid and a chunk rewrite is needed,
|
||||
// for which currDelIter should be used.
|
||||
type populateWithDelGenericSeriesIterator struct {
|
||||
blockID ulid.ULID
|
||||
chunks ChunkReader
|
||||
// chks are expected to be sorted by minTime and should be related to
|
||||
cr ChunkReader
|
||||
// metas are expected to be sorted by minTime and should be related to
|
||||
// the same, single series.
|
||||
chks []chunks.Meta
|
||||
// It's possible for a single chunks.Meta to refer to multiple chunks.
|
||||
// cr.ChunkOrIterator() would return an iterable and a nil chunk in this
|
||||
// case.
|
||||
metas []chunks.Meta
|
||||
|
||||
i int // Index into chks; -1 if not started yet.
|
||||
i int // Index into metas; -1 if not started yet.
|
||||
err error
|
||||
bufIter DeletedIterator // Retained for memory re-use. currDelIter may point here.
|
||||
intervals tombstones.Intervals
|
||||
|
||||
currDelIter chunkenc.Iterator
|
||||
currChkMeta chunks.Meta
|
||||
// currMeta is the current chunks.Meta from metas. currMeta.Chunk is set to
|
||||
// the chunk returned from cr.ChunkOrIterable(). As that can return a nil
|
||||
// chunk, currMeta.Chunk is not always guaranteed to be set.
|
||||
currMeta chunks.Meta
|
||||
}
|
||||
|
||||
func (p *populateWithDelGenericSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) {
|
||||
p.blockID = blockID
|
||||
p.chunks = cr
|
||||
p.chks = chks
|
||||
p.cr = cr
|
||||
p.metas = chks
|
||||
p.i = -1
|
||||
p.err = nil
|
||||
// Note we don't touch p.bufIter.Iter; it is holding on to an iterator we might reuse in next().
|
||||
p.bufIter.Intervals = p.bufIter.Intervals[:0]
|
||||
p.intervals = intervals
|
||||
p.currDelIter = nil
|
||||
p.currChkMeta = chunks.Meta{}
|
||||
p.currMeta = chunks.Meta{}
|
||||
}
|
||||
|
||||
// If copyHeadChunk is true, then the head chunk (i.e. the in-memory chunk of the TSDB)
|
||||
|
@ -722,43 +728,54 @@ func (p *populateWithDelGenericSeriesIterator) reset(blockID ulid.ULID, cr Chunk
|
|||
// However, if the deletion intervals overlaps with the head chunk, then the head chunk is
|
||||
// not copied irrespective of copyHeadChunk because it will be re-encoded later anyway.
|
||||
func (p *populateWithDelGenericSeriesIterator) next(copyHeadChunk bool) bool {
|
||||
if p.err != nil || p.i >= len(p.chks)-1 {
|
||||
if p.err != nil || p.i >= len(p.metas)-1 {
|
||||
return false
|
||||
}
|
||||
|
||||
p.i++
|
||||
p.currChkMeta = p.chks[p.i]
|
||||
p.currMeta = p.metas[p.i]
|
||||
|
||||
p.bufIter.Intervals = p.bufIter.Intervals[:0]
|
||||
for _, interval := range p.intervals {
|
||||
if p.currChkMeta.OverlapsClosedInterval(interval.Mint, interval.Maxt) {
|
||||
if p.currMeta.OverlapsClosedInterval(interval.Mint, interval.Maxt) {
|
||||
p.bufIter.Intervals = p.bufIter.Intervals.Add(interval)
|
||||
}
|
||||
}
|
||||
|
||||
hcr, ok := p.chunks.(*headChunkReader)
|
||||
hcr, ok := p.cr.(*headChunkReader)
|
||||
var iterable chunkenc.Iterable
|
||||
if ok && copyHeadChunk && len(p.bufIter.Intervals) == 0 {
|
||||
// ChunkWithCopy will copy the head chunk.
|
||||
var maxt int64
|
||||
p.currChkMeta.Chunk, maxt, p.err = hcr.ChunkWithCopy(p.currChkMeta)
|
||||
p.currMeta.Chunk, maxt, p.err = hcr.ChunkWithCopy(p.currMeta)
|
||||
// For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here.
|
||||
p.currChkMeta.MaxTime = maxt
|
||||
p.currMeta.MaxTime = maxt
|
||||
} else {
|
||||
p.currChkMeta.Chunk, p.err = p.chunks.Chunk(p.currChkMeta)
|
||||
p.currMeta.Chunk, iterable, p.err = p.cr.ChunkOrIterable(p.currMeta)
|
||||
}
|
||||
|
||||
if p.err != nil {
|
||||
p.err = errors.Wrapf(p.err, "cannot populate chunk %d from block %s", p.currChkMeta.Ref, p.blockID.String())
|
||||
p.err = errors.Wrapf(p.err, "cannot populate chunk %d from block %s", p.currMeta.Ref, p.blockID.String())
|
||||
return false
|
||||
}
|
||||
|
||||
if len(p.bufIter.Intervals) == 0 {
|
||||
// If there is no overlap with deletion intervals, we can take chunk as it is.
|
||||
p.currDelIter = nil
|
||||
// Use the single chunk if possible.
|
||||
if p.currMeta.Chunk != nil {
|
||||
if len(p.bufIter.Intervals) == 0 {
|
||||
// If there is no overlap with deletion intervals and a single chunk is
|
||||
// returned, we can take chunk as it is.
|
||||
p.currDelIter = nil
|
||||
return true
|
||||
}
|
||||
// Otherwise we need to iterate over the samples in the single chunk
|
||||
// and create new chunks.
|
||||
p.bufIter.Iter = p.currMeta.Chunk.Iterator(p.bufIter.Iter)
|
||||
p.currDelIter = &p.bufIter
|
||||
return true
|
||||
}
|
||||
|
||||
// We don't want the full chunk, take just a part of it.
|
||||
p.bufIter.Iter = p.currChkMeta.Chunk.Iterator(p.bufIter.Iter)
|
||||
// Otherwise, use the iterable to create an iterator.
|
||||
p.bufIter.Iter = iterable.Iterator(p.bufIter.Iter)
|
||||
p.currDelIter = &p.bufIter
|
||||
return true
|
||||
}
|
||||
|
@ -822,7 +839,7 @@ func (p *populateWithDelSeriesIterator) Next() chunkenc.ValueType {
|
|||
if p.currDelIter != nil {
|
||||
p.curr = p.currDelIter
|
||||
} else {
|
||||
p.curr = p.currChkMeta.Chunk.Iterator(p.curr)
|
||||
p.curr = p.currMeta.Chunk.Iterator(p.curr)
|
||||
}
|
||||
if valueType := p.curr.Next(); valueType != chunkenc.ValNone {
|
||||
return valueType
|
||||
|
@ -874,22 +891,61 @@ func (p *populateWithDelSeriesIterator) Err() error {
|
|||
type populateWithDelChunkSeriesIterator struct {
|
||||
populateWithDelGenericSeriesIterator
|
||||
|
||||
curr chunks.Meta
|
||||
// currMetaWithChunk is current meta with its chunk field set. This meta
|
||||
// is guaranteed to map to a single chunk. This differs from
|
||||
// populateWithDelGenericSeriesIterator.currMeta as that
|
||||
// could refer to multiple chunks.
|
||||
currMetaWithChunk chunks.Meta
|
||||
|
||||
// chunksFromIterable stores the chunks created from iterating through
|
||||
// the iterable returned by cr.ChunkOrIterable() (with deleted samples
|
||||
// removed).
|
||||
chunksFromIterable []chunks.Meta
|
||||
chunksFromIterableIdx int
|
||||
}
|
||||
|
||||
func (p *populateWithDelChunkSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) {
|
||||
p.populateWithDelGenericSeriesIterator.reset(blockID, cr, chks, intervals)
|
||||
p.curr = chunks.Meta{}
|
||||
p.currMetaWithChunk = chunks.Meta{}
|
||||
p.chunksFromIterable = p.chunksFromIterable[:0]
|
||||
p.chunksFromIterableIdx = -1
|
||||
}
|
||||
|
||||
func (p *populateWithDelChunkSeriesIterator) Next() bool {
|
||||
if p.currMeta.Chunk == nil {
|
||||
// If we've been creating chunks from the iterable, check if there are
|
||||
// any more chunks to iterate through.
|
||||
if p.chunksFromIterableIdx < len(p.chunksFromIterable)-1 {
|
||||
p.chunksFromIterableIdx++
|
||||
p.currMetaWithChunk = p.chunksFromIterable[p.chunksFromIterableIdx]
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Move to the next chunk/deletion iterator.
|
||||
if !p.next(true) {
|
||||
return false
|
||||
}
|
||||
p.curr = p.currChkMeta
|
||||
if p.currDelIter == nil {
|
||||
return true
|
||||
|
||||
if p.currMeta.Chunk != nil {
|
||||
if p.currDelIter == nil {
|
||||
p.currMetaWithChunk = p.currMeta
|
||||
return true
|
||||
}
|
||||
// If ChunkOrIterable() returned a non-nil chunk, the samples in
|
||||
// p.currDelIter will only form one chunk, as the only change
|
||||
// p.currDelIter might make is deleting some samples.
|
||||
return p.populateCurrForSingleChunk()
|
||||
}
|
||||
|
||||
// If ChunkOrIterable() returned an iterable, multiple chunks may be
|
||||
// created from the samples in p.currDelIter.
|
||||
return p.populateChunksFromIterable()
|
||||
}
|
||||
|
||||
// populateCurrForSingleChunk sets the fields within p.currMetaWithChunk. This
|
||||
// should be called if the samples in p.currDelIter only form one chunk.
|
||||
func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool {
|
||||
valueType := p.currDelIter.Next()
|
||||
if valueType == chunkenc.ValNone {
|
||||
if err := p.currDelIter.Err(); err != nil {
|
||||
|
@ -897,9 +953,9 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
p.curr.MinTime = p.currDelIter.AtT()
|
||||
p.currMetaWithChunk.MinTime = p.currDelIter.AtT()
|
||||
|
||||
// Re-encode the chunk if iterator is provider. This means that it has
|
||||
// Re-encode the chunk if iterator is provided. This means that it has
|
||||
// some samples to be deleted or chunk is opened.
|
||||
var (
|
||||
newChunk chunkenc.Chunk
|
||||
|
@ -957,7 +1013,7 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
|
|||
}
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("populateWithDelChunkSeriesIterator: value type %v unsupported", valueType)
|
||||
err = fmt.Errorf("populateCurrForSingleChunk: value type %v unsupported", valueType)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -969,12 +1025,127 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
p.curr.Chunk = newChunk
|
||||
p.curr.MaxTime = t
|
||||
p.currMetaWithChunk.Chunk = newChunk
|
||||
p.currMetaWithChunk.MaxTime = t
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *populateWithDelChunkSeriesIterator) At() chunks.Meta { return p.curr }
|
||||
// populateChunksFromIterable reads the samples from currDelIter to create
|
||||
// chunks for chunksFromIterable. It also sets p.currMetaWithChunk to the first
|
||||
// chunk.
|
||||
func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool {
|
||||
p.chunksFromIterable = p.chunksFromIterable[:0]
|
||||
p.chunksFromIterableIdx = -1
|
||||
|
||||
firstValueType := p.currDelIter.Next()
|
||||
if firstValueType == chunkenc.ValNone {
|
||||
if err := p.currDelIter.Err(); err != nil {
|
||||
p.err = errors.Wrap(err, "populateChunksFromIterable: no samples could be read")
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// t is the timestamp for the current sample.
|
||||
t int64
|
||||
cmint int64
|
||||
cmaxt int64
|
||||
|
||||
currentChunk chunkenc.Chunk
|
||||
|
||||
app chunkenc.Appender
|
||||
|
||||
newChunk chunkenc.Chunk
|
||||
recoded bool
|
||||
|
||||
err error
|
||||
)
|
||||
|
||||
prevValueType := chunkenc.ValNone
|
||||
|
||||
for currentValueType := firstValueType; currentValueType != chunkenc.ValNone; currentValueType = p.currDelIter.Next() {
|
||||
// Check if the encoding has changed (i.e. we need to create a new
|
||||
// chunk as chunks can't have multiple encoding types).
|
||||
// For the first sample, the following condition will always be true as
|
||||
// ValNoneNone != ValFloat | ValHistogram | ValFloatHistogram.
|
||||
if currentValueType != prevValueType {
|
||||
if prevValueType != chunkenc.ValNone {
|
||||
p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt})
|
||||
}
|
||||
cmint = p.currDelIter.AtT()
|
||||
if currentChunk, err = currentValueType.NewChunk(); err != nil {
|
||||
break
|
||||
}
|
||||
if app, err = currentChunk.Appender(); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
switch currentValueType {
|
||||
case chunkenc.ValFloat:
|
||||
{
|
||||
var v float64
|
||||
t, v = p.currDelIter.At()
|
||||
app.Append(t, v)
|
||||
}
|
||||
case chunkenc.ValHistogram:
|
||||
{
|
||||
var v *histogram.Histogram
|
||||
t, v = p.currDelIter.AtHistogram()
|
||||
// No need to set prevApp as AppendHistogram will set the
|
||||
// counter reset header for the appender that's returned.
|
||||
newChunk, recoded, app, err = app.AppendHistogram(nil, t, v, false)
|
||||
}
|
||||
case chunkenc.ValFloatHistogram:
|
||||
{
|
||||
var v *histogram.FloatHistogram
|
||||
t, v = p.currDelIter.AtFloatHistogram()
|
||||
// No need to set prevApp as AppendHistogram will set the
|
||||
// counter reset header for the appender that's returned.
|
||||
newChunk, recoded, app, err = app.AppendFloatHistogram(nil, t, v, false)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if newChunk != nil {
|
||||
if !recoded {
|
||||
p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt})
|
||||
}
|
||||
currentChunk = newChunk
|
||||
cmint = t
|
||||
}
|
||||
|
||||
cmaxt = t
|
||||
prevValueType = currentValueType
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
p.err = errors.Wrap(err, "populateChunksFromIterable: error when writing new chunks")
|
||||
return false
|
||||
}
|
||||
if err = p.currDelIter.Err(); err != nil {
|
||||
p.err = errors.Wrap(err, "populateChunksFromIterable: currDelIter error when writing new chunks")
|
||||
return false
|
||||
}
|
||||
|
||||
if prevValueType != chunkenc.ValNone {
|
||||
p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt})
|
||||
}
|
||||
|
||||
if len(p.chunksFromIterable) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
p.currMetaWithChunk = p.chunksFromIterable[0]
|
||||
p.chunksFromIterableIdx = 0
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *populateWithDelChunkSeriesIterator) At() chunks.Meta { return p.currMetaWithChunk }
|
||||
|
||||
// blockSeriesSet allows to iterate over sorted, populated series with applied tombstones.
|
||||
// Series with all deleted chunks are still present as Series with no samples.
|
||||
|
@ -1174,8 +1345,8 @@ func newNopChunkReader() ChunkReader {
|
|||
}
|
||||
}
|
||||
|
||||
func (cr nopChunkReader) Chunk(chunks.Meta) (chunkenc.Chunk, error) {
|
||||
return cr.emptyChunk, nil
|
||||
func (cr nopChunkReader) ChunkOrIterable(chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
|
||||
return cr.emptyChunk, nil, nil
|
||||
}
|
||||
|
||||
func (cr nopChunkReader) Close() error { return nil }
|
||||
|
|
|
@ -725,12 +725,14 @@ func TestBlockQuerierDelete(t *testing.T) {
|
|||
|
||||
type fakeChunksReader struct {
|
||||
ChunkReader
|
||||
chks map[chunks.ChunkRef]chunkenc.Chunk
|
||||
chks map[chunks.ChunkRef]chunkenc.Chunk
|
||||
iterables map[chunks.ChunkRef]chunkenc.Iterable
|
||||
}
|
||||
|
||||
func createFakeReaderAndNotPopulatedChunks(s ...[]chunks.Sample) (*fakeChunksReader, []chunks.Meta) {
|
||||
f := &fakeChunksReader{
|
||||
chks: map[chunks.ChunkRef]chunkenc.Chunk{},
|
||||
chks: map[chunks.ChunkRef]chunkenc.Chunk{},
|
||||
iterables: map[chunks.ChunkRef]chunkenc.Iterable{},
|
||||
}
|
||||
chks := make([]chunks.Meta, 0, len(s))
|
||||
|
||||
|
@ -747,21 +749,102 @@ func createFakeReaderAndNotPopulatedChunks(s ...[]chunks.Sample) (*fakeChunksRea
|
|||
return f, chks
|
||||
}
|
||||
|
||||
func (r *fakeChunksReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
|
||||
chk, ok := r.chks[meta.Ref]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("chunk not found at ref %v", meta.Ref)
|
||||
// Samples in each slice are assumed to be sorted.
|
||||
func createFakeReaderAndIterables(s ...[]chunks.Sample) (*fakeChunksReader, []chunks.Meta) {
|
||||
f := &fakeChunksReader{
|
||||
chks: map[chunks.ChunkRef]chunkenc.Chunk{},
|
||||
iterables: map[chunks.ChunkRef]chunkenc.Iterable{},
|
||||
}
|
||||
return chk, nil
|
||||
chks := make([]chunks.Meta, 0, len(s))
|
||||
|
||||
for ref, samples := range s {
|
||||
f.iterables[chunks.ChunkRef(ref)] = &mockIterable{s: samples}
|
||||
|
||||
var minTime, maxTime int64
|
||||
if len(samples) > 0 {
|
||||
minTime = samples[0].T()
|
||||
maxTime = samples[len(samples)-1].T()
|
||||
}
|
||||
chks = append(chks, chunks.Meta{
|
||||
Ref: chunks.ChunkRef(ref),
|
||||
MinTime: minTime,
|
||||
MaxTime: maxTime,
|
||||
})
|
||||
}
|
||||
return f, chks
|
||||
}
|
||||
|
||||
func (r *fakeChunksReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
|
||||
if chk, ok := r.chks[meta.Ref]; ok {
|
||||
return chk, nil, nil
|
||||
}
|
||||
|
||||
if it, ok := r.iterables[meta.Ref]; ok {
|
||||
return nil, it, nil
|
||||
}
|
||||
return nil, nil, fmt.Errorf("chunk or iterable not found at ref %v", meta.Ref)
|
||||
}
|
||||
|
||||
type mockIterable struct {
|
||||
s []chunks.Sample
|
||||
}
|
||||
|
||||
func (it *mockIterable) Iterator(chunkenc.Iterator) chunkenc.Iterator {
|
||||
return &mockSampleIterator{
|
||||
s: it.s,
|
||||
idx: -1,
|
||||
}
|
||||
}
|
||||
|
||||
type mockSampleIterator struct {
|
||||
s []chunks.Sample
|
||||
idx int
|
||||
}
|
||||
|
||||
func (it *mockSampleIterator) Seek(t int64) chunkenc.ValueType {
|
||||
for ; it.idx < len(it.s); it.idx++ {
|
||||
if it.idx != -1 && it.s[it.idx].T() >= t {
|
||||
return it.s[it.idx].Type()
|
||||
}
|
||||
}
|
||||
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
|
||||
func (it *mockSampleIterator) At() (int64, float64) {
|
||||
return it.s[it.idx].T(), it.s[it.idx].F()
|
||||
}
|
||||
|
||||
func (it *mockSampleIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||
return it.s[it.idx].T(), it.s[it.idx].H()
|
||||
}
|
||||
|
||||
func (it *mockSampleIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||
return it.s[it.idx].T(), it.s[it.idx].FH()
|
||||
}
|
||||
|
||||
func (it *mockSampleIterator) AtT() int64 {
|
||||
return it.s[it.idx].T()
|
||||
}
|
||||
|
||||
func (it *mockSampleIterator) Next() chunkenc.ValueType {
|
||||
if it.idx < len(it.s)-1 {
|
||||
it.idx++
|
||||
return it.s[it.idx].Type()
|
||||
}
|
||||
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
|
||||
func (it *mockSampleIterator) Err() error { return nil }
|
||||
|
||||
func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
||||
type minMaxTimes struct {
|
||||
minTime, maxTime int64
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
chks [][]chunks.Sample
|
||||
name string
|
||||
samples [][]chunks.Sample
|
||||
|
||||
expected []chunks.Sample
|
||||
expectedChks []chunks.Meta
|
||||
|
@ -772,23 +855,38 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
// Seek being zero means do not test seek.
|
||||
seek int64
|
||||
seekSuccess bool
|
||||
|
||||
// Set this to true if a sample slice will form multiple chunks.
|
||||
skipChunkTest bool
|
||||
|
||||
skipIterableTest bool
|
||||
}{
|
||||
{
|
||||
name: "no chunk",
|
||||
chks: [][]chunks.Sample{},
|
||||
name: "no chunk",
|
||||
samples: [][]chunks.Sample{},
|
||||
},
|
||||
{
|
||||
name: "one empty chunk", // This should never happen.
|
||||
chks: [][]chunks.Sample{{}},
|
||||
name: "one empty chunk", // This should never happen.
|
||||
samples: [][]chunks.Sample{{}},
|
||||
|
||||
expectedChks: []chunks.Meta{
|
||||
assureChunkFromSamples(t, []chunks.Sample{}),
|
||||
},
|
||||
expectedMinMaxTimes: []minMaxTimes{{0, 0}},
|
||||
// iterables with no samples will return no chunks instead of empty chunks
|
||||
skipIterableTest: true,
|
||||
},
|
||||
{
|
||||
name: "three empty chunks", // This should never happen.
|
||||
chks: [][]chunks.Sample{{}, {}, {}},
|
||||
name: "one empty iterable",
|
||||
samples: [][]chunks.Sample{{}},
|
||||
|
||||
// iterables with no samples will return no chunks
|
||||
expectedChks: nil,
|
||||
skipChunkTest: true,
|
||||
},
|
||||
{
|
||||
name: "three empty chunks", // This should never happen.
|
||||
samples: [][]chunks.Sample{{}, {}, {}},
|
||||
|
||||
expectedChks: []chunks.Meta{
|
||||
assureChunkFromSamples(t, []chunks.Sample{}),
|
||||
|
@ -796,10 +894,20 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
assureChunkFromSamples(t, []chunks.Sample{}),
|
||||
},
|
||||
expectedMinMaxTimes: []minMaxTimes{{0, 0}, {0, 0}, {0, 0}},
|
||||
// iterables with no samples will return no chunks instead of empty chunks
|
||||
skipIterableTest: true,
|
||||
},
|
||||
{
|
||||
name: "three empty iterables",
|
||||
samples: [][]chunks.Sample{{}, {}, {}},
|
||||
|
||||
// iterables with no samples will return no chunks
|
||||
expectedChks: nil,
|
||||
skipChunkTest: true,
|
||||
},
|
||||
{
|
||||
name: "one chunk",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||
},
|
||||
|
||||
|
@ -815,7 +923,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "two full chunks",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||
},
|
||||
|
@ -835,7 +943,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "three full chunks",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||
{sample{10, 22, nil, nil}, sample{203, 3493, nil, nil}},
|
||||
|
@ -859,15 +967,15 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
// Seek cases.
|
||||
{
|
||||
name: "three empty chunks and seek", // This should never happen.
|
||||
chks: [][]chunks.Sample{{}, {}, {}},
|
||||
seek: 1,
|
||||
name: "three empty chunks and seek", // This should never happen.
|
||||
samples: [][]chunks.Sample{{}, {}, {}},
|
||||
seek: 1,
|
||||
|
||||
seekSuccess: false,
|
||||
},
|
||||
{
|
||||
name: "two chunks and seek beyond chunks",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||
},
|
||||
|
@ -877,7 +985,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "two chunks and seek on middle of first chunk",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||
},
|
||||
|
@ -890,7 +998,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "two chunks and seek before first chunk",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{sample{1, 2, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||
},
|
||||
|
@ -904,12 +1012,12 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
// Deletion / Trim cases.
|
||||
{
|
||||
name: "no chunk with deletion interval",
|
||||
chks: [][]chunks.Sample{},
|
||||
samples: [][]chunks.Sample{},
|
||||
intervals: tombstones.Intervals{{Mint: 20, Maxt: 21}},
|
||||
},
|
||||
{
|
||||
name: "two chunks with trimmed first and last samples from edge chunks",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||
},
|
||||
|
@ -930,7 +1038,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "two chunks with trimmed middle sample of first chunk",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||
},
|
||||
|
@ -951,7 +1059,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "two chunks with deletion across two chunks",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||
},
|
||||
|
@ -973,7 +1081,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
// Deletion with seek.
|
||||
{
|
||||
name: "two chunks with trimmed first and last samples from edge chunks, seek from middle of first chunk",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||
},
|
||||
|
@ -985,9 +1093,20 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
sample{3, 5, nil, nil}, sample{6, 1, nil, nil}, sample{7, 89, nil, nil},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one chunk where all samples are trimmed",
|
||||
samples: [][]chunks.Sample{
|
||||
{sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||
{sample{7, 89, nil, nil}, sample{9, 8, nil, nil}},
|
||||
},
|
||||
intervals: tombstones.Intervals{{Mint: math.MinInt64, Maxt: 3}}.Add(tombstones.Interval{Mint: 4, Maxt: math.MaxInt64}),
|
||||
|
||||
expected: nil,
|
||||
expectedChks: nil,
|
||||
},
|
||||
{
|
||||
name: "one histogram chunk",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
|
||||
|
@ -1013,7 +1132,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "one histogram chunk intersect with earlier deletion interval",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
|
||||
|
@ -1036,7 +1155,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "one histogram chunk intersect with later deletion interval",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{1, 0, tsdbutil.GenerateTestHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.GenerateTestHistogram(2), nil},
|
||||
|
@ -1061,7 +1180,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "one float histogram chunk",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
|
||||
|
@ -1087,7 +1206,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "one float histogram chunk intersect with earlier deletion interval",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
|
||||
|
@ -1110,7 +1229,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "one float histogram chunk intersect with later deletion interval",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.GenerateTestFloatHistogram(2)},
|
||||
|
@ -1135,7 +1254,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "one gauge histogram chunk",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
||||
|
@ -1161,7 +1280,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "one gauge histogram chunk intersect with earlier deletion interval",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
||||
|
@ -1184,7 +1303,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "one gauge histogram chunk intersect with later deletion interval",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{1, 0, tsdbutil.GenerateTestGaugeHistogram(1), nil},
|
||||
sample{2, 0, tsdbutil.GenerateTestGaugeHistogram(2), nil},
|
||||
|
@ -1209,7 +1328,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "one gauge float histogram",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
||||
|
@ -1235,7 +1354,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "one gauge float histogram chunk intersect with earlier deletion interval",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
||||
|
@ -1258,7 +1377,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "one gauge float histogram chunk intersect with later deletion interval",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{1, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(1)},
|
||||
sample{2, 0, nil, tsdbutil.GenerateTestGaugeFloatHistogram(2)},
|
||||
|
@ -1283,7 +1402,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "three full mixed chunks",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{sample{1, 2, nil, nil}, sample{2, 3, nil, nil}, sample{3, 5, nil, nil}, sample{6, 1, nil, nil}},
|
||||
{
|
||||
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
|
||||
|
@ -1315,7 +1434,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "three full mixed chunks in different order",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
|
||||
sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
|
||||
|
@ -1347,7 +1466,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "three full mixed chunks in different order intersect with deletion interval",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
|
||||
sample{9, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
|
||||
|
@ -1378,7 +1497,7 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "three full mixed chunks overlapping",
|
||||
chks: [][]chunks.Sample{
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{7, 0, tsdbutil.GenerateTestGaugeHistogram(89), nil},
|
||||
sample{12, 0, tsdbutil.GenerateTestGaugeHistogram(8), nil},
|
||||
|
@ -1408,11 +1527,237 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
},
|
||||
expectedMinMaxTimes: []minMaxTimes{{7, 12}, {11, 16}, {10, 203}},
|
||||
},
|
||||
{
|
||||
// This case won't actually happen until OOO native histograms is implemented.
|
||||
// Issue: https://github.com/prometheus/prometheus/issues/11220.
|
||||
name: "int histogram iterables with counter resets",
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
||||
sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil},
|
||||
// Counter reset should be detected when chunks are created from the iterable.
|
||||
sample{12, 0, tsdbutil.GenerateTestHistogram(5), nil},
|
||||
sample{15, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
||||
sample{16, 0, tsdbutil.GenerateTestHistogram(7), nil},
|
||||
// Counter reset should be detected when chunks are created from the iterable.
|
||||
sample{17, 0, tsdbutil.GenerateTestHistogram(5), nil},
|
||||
},
|
||||
{
|
||||
sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
||||
sample{19, 0, tsdbutil.GenerateTestHistogram(7), nil},
|
||||
// Counter reset should be detected when chunks are created from the iterable.
|
||||
sample{20, 0, tsdbutil.GenerateTestHistogram(5), nil},
|
||||
sample{21, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
||||
},
|
||||
},
|
||||
|
||||
expected: []chunks.Sample{
|
||||
sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
||||
sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil},
|
||||
sample{12, 0, tsdbutil.GenerateTestHistogram(5), nil},
|
||||
sample{15, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
||||
sample{16, 0, tsdbutil.GenerateTestHistogram(7), nil},
|
||||
sample{17, 0, tsdbutil.GenerateTestHistogram(5), nil},
|
||||
sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
||||
sample{19, 0, tsdbutil.GenerateTestHistogram(7), nil},
|
||||
sample{20, 0, tsdbutil.GenerateTestHistogram(5), nil},
|
||||
sample{21, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
||||
},
|
||||
expectedChks: []chunks.Meta{
|
||||
assureChunkFromSamples(t, []chunks.Sample{
|
||||
sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
||||
sample{8, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(9)), nil},
|
||||
}),
|
||||
assureChunkFromSamples(t, []chunks.Sample{
|
||||
sample{12, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(5)), nil},
|
||||
sample{15, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
|
||||
sample{16, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(7)), nil},
|
||||
}),
|
||||
assureChunkFromSamples(t, []chunks.Sample{
|
||||
sample{17, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(5)), nil},
|
||||
}),
|
||||
assureChunkFromSamples(t, []chunks.Sample{
|
||||
sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
||||
sample{19, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(7)), nil},
|
||||
}),
|
||||
assureChunkFromSamples(t, []chunks.Sample{
|
||||
sample{20, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(5)), nil},
|
||||
sample{21, 0, tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(6)), nil},
|
||||
}),
|
||||
},
|
||||
expectedMinMaxTimes: []minMaxTimes{
|
||||
{7, 8},
|
||||
{12, 16},
|
||||
{17, 17},
|
||||
{18, 19},
|
||||
{20, 21},
|
||||
},
|
||||
|
||||
// Skipping chunk test - can't create a single chunk for each
|
||||
// sample slice since there are counter resets in the middle of
|
||||
// the slices.
|
||||
skipChunkTest: true,
|
||||
},
|
||||
{
|
||||
// This case won't actually happen until OOO native histograms is implemented.
|
||||
// Issue: https://github.com/prometheus/prometheus/issues/11220.
|
||||
name: "float histogram iterables with counter resets",
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)},
|
||||
sample{8, 0, nil, tsdbutil.GenerateTestFloatHistogram(9)},
|
||||
// Counter reset should be detected when chunks are created from the iterable.
|
||||
sample{12, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
|
||||
sample{15, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
||||
sample{16, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)},
|
||||
// Counter reset should be detected when chunks are created from the iterable.
|
||||
sample{17, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
|
||||
},
|
||||
{
|
||||
sample{18, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
||||
sample{19, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)},
|
||||
// Counter reset should be detected when chunks are created from the iterable.
|
||||
sample{20, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
|
||||
sample{21, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
||||
},
|
||||
},
|
||||
|
||||
expected: []chunks.Sample{
|
||||
sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)},
|
||||
sample{8, 0, nil, tsdbutil.GenerateTestFloatHistogram(9)},
|
||||
sample{12, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
|
||||
sample{15, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
||||
sample{16, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)},
|
||||
sample{17, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
|
||||
sample{18, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
||||
sample{19, 0, nil, tsdbutil.GenerateTestFloatHistogram(7)},
|
||||
sample{20, 0, nil, tsdbutil.GenerateTestFloatHistogram(5)},
|
||||
sample{21, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
||||
},
|
||||
expectedChks: []chunks.Meta{
|
||||
assureChunkFromSamples(t, []chunks.Sample{
|
||||
sample{7, 0, nil, tsdbutil.GenerateTestFloatHistogram(8)},
|
||||
sample{8, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(9))},
|
||||
}),
|
||||
assureChunkFromSamples(t, []chunks.Sample{
|
||||
sample{12, 0, nil, tsdbutil.SetFloatHistogramCounterReset(tsdbutil.GenerateTestFloatHistogram(5))},
|
||||
sample{15, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
|
||||
sample{16, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(7))},
|
||||
}),
|
||||
assureChunkFromSamples(t, []chunks.Sample{
|
||||
sample{17, 0, nil, tsdbutil.SetFloatHistogramCounterReset(tsdbutil.GenerateTestFloatHistogram(5))},
|
||||
}),
|
||||
assureChunkFromSamples(t, []chunks.Sample{
|
||||
sample{18, 0, nil, tsdbutil.GenerateTestFloatHistogram(6)},
|
||||
sample{19, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(7))},
|
||||
}),
|
||||
assureChunkFromSamples(t, []chunks.Sample{
|
||||
sample{20, 0, nil, tsdbutil.SetFloatHistogramCounterReset(tsdbutil.GenerateTestFloatHistogram(5))},
|
||||
sample{21, 0, nil, tsdbutil.SetFloatHistogramNotCounterReset(tsdbutil.GenerateTestFloatHistogram(6))},
|
||||
}),
|
||||
},
|
||||
expectedMinMaxTimes: []minMaxTimes{
|
||||
{7, 8},
|
||||
{12, 16},
|
||||
{17, 17},
|
||||
{18, 19},
|
||||
{20, 21},
|
||||
},
|
||||
|
||||
// Skipping chunk test - can't create a single chunk for each
|
||||
// sample slice since there are counter resets in the middle of
|
||||
// the slices.
|
||||
skipChunkTest: true,
|
||||
},
|
||||
{
|
||||
// This case won't actually happen until OOO native histograms is implemented.
|
||||
// Issue: https://github.com/prometheus/prometheus/issues/11220.
|
||||
name: "iterables with mixed encodings and counter resets",
|
||||
samples: [][]chunks.Sample{
|
||||
{
|
||||
sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
||||
sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil},
|
||||
sample{9, 0, nil, tsdbutil.GenerateTestFloatHistogram(10)},
|
||||
sample{10, 0, nil, tsdbutil.GenerateTestFloatHistogram(11)},
|
||||
sample{11, 0, nil, tsdbutil.GenerateTestFloatHistogram(12)},
|
||||
sample{12, 13, nil, nil},
|
||||
sample{13, 14, nil, nil},
|
||||
sample{14, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
||||
// Counter reset should be detected when chunks are created from the iterable.
|
||||
sample{15, 0, tsdbutil.GenerateTestHistogram(7), nil},
|
||||
},
|
||||
{
|
||||
sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
||||
sample{19, 45, nil, nil},
|
||||
},
|
||||
},
|
||||
|
||||
expected: []chunks.Sample{
|
||||
sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
||||
sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil},
|
||||
sample{9, 0, nil, tsdbutil.GenerateTestFloatHistogram(10)},
|
||||
sample{10, 0, nil, tsdbutil.GenerateTestFloatHistogram(11)},
|
||||
sample{11, 0, nil, tsdbutil.GenerateTestFloatHistogram(12)},
|
||||
sample{12, 13, nil, nil},
|
||||
sample{13, 14, nil, nil},
|
||||
sample{14, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
||||
sample{15, 0, tsdbutil.GenerateTestHistogram(7), nil},
|
||||
sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
||||
sample{19, 45, nil, nil},
|
||||
},
|
||||
expectedChks: []chunks.Meta{
|
||||
assureChunkFromSamples(t, []chunks.Sample{
|
||||
sample{7, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
||||
sample{8, 0, tsdbutil.GenerateTestHistogram(9), nil},
|
||||
}),
|
||||
assureChunkFromSamples(t, []chunks.Sample{
|
||||
sample{9, 0, nil, tsdbutil.GenerateTestFloatHistogram(10)},
|
||||
sample{10, 0, nil, tsdbutil.GenerateTestFloatHistogram(11)},
|
||||
sample{11, 0, nil, tsdbutil.GenerateTestFloatHistogram(12)},
|
||||
}),
|
||||
assureChunkFromSamples(t, []chunks.Sample{
|
||||
sample{12, 13, nil, nil},
|
||||
sample{13, 14, nil, nil},
|
||||
}),
|
||||
assureChunkFromSamples(t, []chunks.Sample{
|
||||
sample{14, 0, tsdbutil.GenerateTestHistogram(8), nil},
|
||||
}),
|
||||
assureChunkFromSamples(t, []chunks.Sample{
|
||||
sample{15, 0, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(7)), nil},
|
||||
}),
|
||||
assureChunkFromSamples(t, []chunks.Sample{
|
||||
sample{18, 0, tsdbutil.GenerateTestHistogram(6), nil},
|
||||
}),
|
||||
assureChunkFromSamples(t, []chunks.Sample{
|
||||
sample{19, 45, nil, nil},
|
||||
}),
|
||||
},
|
||||
expectedMinMaxTimes: []minMaxTimes{
|
||||
{7, 8},
|
||||
{9, 11},
|
||||
{12, 13},
|
||||
{14, 14},
|
||||
{15, 15},
|
||||
{18, 18},
|
||||
{19, 19},
|
||||
},
|
||||
|
||||
skipChunkTest: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Run("sample", func(t *testing.T) {
|
||||
f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.chks...)
|
||||
var f *fakeChunksReader
|
||||
var chkMetas []chunks.Meta
|
||||
// If the test case wants to skip the chunks test, it probably
|
||||
// means you can't create valid chunks from sample slices,
|
||||
// therefore create iterables over the samples instead.
|
||||
if tc.skipChunkTest {
|
||||
f, chkMetas = createFakeReaderAndIterables(tc.samples...)
|
||||
} else {
|
||||
f, chkMetas = createFakeReaderAndNotPopulatedChunks(tc.samples...)
|
||||
}
|
||||
it := &populateWithDelSeriesIterator{}
|
||||
it.reset(ulid.ULID{}, f, chkMetas, tc.intervals)
|
||||
|
||||
|
@ -1433,7 +1778,35 @@ func TestPopulateWithTombSeriesIterators(t *testing.T) {
|
|||
require.Equal(t, tc.expected, r)
|
||||
})
|
||||
t.Run("chunk", func(t *testing.T) {
|
||||
f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.chks...)
|
||||
if tc.skipChunkTest {
|
||||
t.Skip()
|
||||
}
|
||||
f, chkMetas := createFakeReaderAndNotPopulatedChunks(tc.samples...)
|
||||
it := &populateWithDelChunkSeriesIterator{}
|
||||
it.reset(ulid.ULID{}, f, chkMetas, tc.intervals)
|
||||
|
||||
if tc.seek != 0 {
|
||||
// Chunk iterator does not have Seek method.
|
||||
return
|
||||
}
|
||||
expandedResult, err := storage.ExpandChunks(it)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We don't care about ref IDs for comparison, only chunk's samples matters.
|
||||
rmChunkRefs(expandedResult)
|
||||
rmChunkRefs(tc.expectedChks)
|
||||
require.Equal(t, tc.expectedChks, expandedResult)
|
||||
|
||||
for i, meta := range expandedResult {
|
||||
require.Equal(t, tc.expectedMinMaxTimes[i].minTime, meta.MinTime)
|
||||
require.Equal(t, tc.expectedMinMaxTimes[i].maxTime, meta.MaxTime)
|
||||
}
|
||||
})
|
||||
t.Run("iterables", func(t *testing.T) {
|
||||
if tc.skipIterableTest {
|
||||
t.Skip()
|
||||
}
|
||||
f, chkMetas := createFakeReaderAndIterables(tc.samples...)
|
||||
it := &populateWithDelChunkSeriesIterator{}
|
||||
it.reset(ulid.ULID{}, f, chkMetas, tc.intervals)
|
||||
|
||||
|
@ -1726,13 +2099,13 @@ func BenchmarkMergedSeriesSet(b *testing.B) {
|
|||
|
||||
type mockChunkReader map[chunks.ChunkRef]chunkenc.Chunk
|
||||
|
||||
func (cr mockChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
|
||||
func (cr mockChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
|
||||
chk, ok := cr[meta.Ref]
|
||||
if ok {
|
||||
return chk, nil
|
||||
return chk, nil, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("Chunk with ref not found")
|
||||
return nil, nil, errors.New("Chunk with ref not found")
|
||||
}
|
||||
|
||||
func (cr mockChunkReader) Close() error {
|
||||
|
@ -3047,7 +3420,7 @@ func TestBlockBaseSeriesSet(t *testing.T) {
|
|||
idx := tc.expIdxs[i]
|
||||
|
||||
require.Equal(t, tc.series[idx].lset, bcs.curr.labels)
|
||||
require.Equal(t, tc.series[idx].chunks, si.chks)
|
||||
require.Equal(t, tc.series[idx].chunks, si.metas)
|
||||
|
||||
i++
|
||||
}
|
||||
|
|
|
@ -116,7 +116,17 @@ func SetHistogramNotCounterReset(h *histogram.Histogram) *histogram.Histogram {
|
|||
return h
|
||||
}
|
||||
|
||||
func SetHistogramCounterReset(h *histogram.Histogram) *histogram.Histogram {
|
||||
h.CounterResetHint = histogram.CounterReset
|
||||
return h
|
||||
}
|
||||
|
||||
func SetFloatHistogramNotCounterReset(h *histogram.FloatHistogram) *histogram.FloatHistogram {
|
||||
h.CounterResetHint = histogram.NotCounterReset
|
||||
return h
|
||||
}
|
||||
|
||||
func SetFloatHistogramCounterReset(h *histogram.FloatHistogram) *histogram.FloatHistogram {
|
||||
h.CounterResetHint = histogram.CounterReset
|
||||
return h
|
||||
}
|
||||
|
|
16
web/ui/package-lock.json
generated
16
web/ui/package-lock.json
generated
|
@ -18028,9 +18028,9 @@
|
|||
"dev": true
|
||||
},
|
||||
"node_modules/sanitize-html": {
|
||||
"version": "2.10.0",
|
||||
"resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.10.0.tgz",
|
||||
"integrity": "sha512-JqdovUd81dG4k87vZt6uA6YhDfWkUGruUu/aPmXLxXi45gZExnt9Bnw/qeQU8oGf82vPyaE0vO4aH0PbobB9JQ==",
|
||||
"version": "2.11.0",
|
||||
"resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.11.0.tgz",
|
||||
"integrity": "sha512-BG68EDHRaGKqlsNjJ2xUB7gpInPA8gVx/mvjO743hZaeMCZ2DwzW7xvsqZ+KNU4QKwj86HJ3uu2liISf2qBBUA==",
|
||||
"dependencies": {
|
||||
"deepmerge": "^4.2.2",
|
||||
"escape-string-regexp": "^4.0.0",
|
||||
|
@ -20800,7 +20800,7 @@
|
|||
"react-router-dom": "^5.3.4",
|
||||
"react-test-renderer": "^17.0.2",
|
||||
"reactstrap": "^8.10.1",
|
||||
"sanitize-html": "^2.10.0",
|
||||
"sanitize-html": "^2.11.0",
|
||||
"sass": "1.62.1",
|
||||
"tempusdominus-bootstrap-4": "^5.39.2",
|
||||
"tempusdominus-core": "^5.19.3"
|
||||
|
@ -23455,7 +23455,7 @@
|
|||
"react-router-dom": "^5.3.4",
|
||||
"react-test-renderer": "^17.0.2",
|
||||
"reactstrap": "^8.10.1",
|
||||
"sanitize-html": "^2.10.0",
|
||||
"sanitize-html": "^2.11.0",
|
||||
"sass": "1.62.1",
|
||||
"sinon": "^14.0.2",
|
||||
"tempusdominus-bootstrap-4": "^5.39.2",
|
||||
|
@ -34297,9 +34297,9 @@
|
|||
"dev": true
|
||||
},
|
||||
"sanitize-html": {
|
||||
"version": "2.10.0",
|
||||
"resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.10.0.tgz",
|
||||
"integrity": "sha512-JqdovUd81dG4k87vZt6uA6YhDfWkUGruUu/aPmXLxXi45gZExnt9Bnw/qeQU8oGf82vPyaE0vO4aH0PbobB9JQ==",
|
||||
"version": "2.11.0",
|
||||
"resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.11.0.tgz",
|
||||
"integrity": "sha512-BG68EDHRaGKqlsNjJ2xUB7gpInPA8gVx/mvjO743hZaeMCZ2DwzW7xvsqZ+KNU4QKwj86HJ3uu2liISf2qBBUA==",
|
||||
"requires": {
|
||||
"deepmerge": "^4.2.2",
|
||||
"escape-string-regexp": "^4.0.0",
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
"react-router-dom": "^5.3.4",
|
||||
"react-test-renderer": "^17.0.2",
|
||||
"reactstrap": "^8.10.1",
|
||||
"sanitize-html": "^2.10.0",
|
||||
"sanitize-html": "^2.11.0",
|
||||
"sass": "1.62.1",
|
||||
"tempusdominus-bootstrap-4": "^5.39.2",
|
||||
"tempusdominus-core": "^5.19.3"
|
||||
|
|
Loading…
Reference in a new issue