Merge pull request #12928 from alexandear/ci-enable-godot

ci(lint): enable godot; append dot at the end of comments
This commit is contained in:
Björn Rabenstein 2023-11-01 17:15:41 +01:00 committed by GitHub
commit a43669e611
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
54 changed files with 157 additions and 150 deletions

View file

@ -15,6 +15,7 @@ linters:
- depguard - depguard
- errorlint - errorlint
- gocritic - gocritic
- godot
- gofumpt - gofumpt
- goimports - goimports
- misspell - misspell
@ -45,6 +46,9 @@ issues:
- path: web/ - path: web/
linters: linters:
- errorlint - errorlint
- linters:
- godot
source: "^// ==="
linters-settings: linters-settings:
depguard: depguard:

View file

@ -1283,7 +1283,7 @@ func startsOrEndsWithQuote(s string) bool {
strings.HasSuffix(s, "\"") || strings.HasSuffix(s, "'") strings.HasSuffix(s, "\"") || strings.HasSuffix(s, "'")
} }
// compileCORSRegexString compiles given string and adds anchors // compileCORSRegexString compiles given string and adds anchors.
func compileCORSRegexString(s string) (*regexp.Regexp, error) { func compileCORSRegexString(s string) (*regexp.Regexp, error) {
r, err := relabel.NewRegexp(s) r, err := relabel.NewRegexp(s)
if err != nil { if err != nil {

View file

@ -281,7 +281,7 @@ func newCredential(cfg SDConfig, policyClientOptions policy.ClientOptions) (azco
return credential, nil return credential, nil
} }
// virtualMachine represents an Azure virtual machine (which can also be created by a VMSS) // virtualMachine represents an Azure virtual machine (which can also be created by a VMSS).
type virtualMachine struct { type virtualMachine struct {
ID string ID string
Name string Name string

View file

@ -50,7 +50,7 @@ const (
tagsLabel = model.MetaLabelPrefix + "consul_tags" tagsLabel = model.MetaLabelPrefix + "consul_tags"
// serviceLabel is the name of the label containing the service name. // serviceLabel is the name of the label containing the service name.
serviceLabel = model.MetaLabelPrefix + "consul_service" serviceLabel = model.MetaLabelPrefix + "consul_service"
// healthLabel is the name of the label containing the health of the service instance // healthLabel is the name of the label containing the health of the service instance.
healthLabel = model.MetaLabelPrefix + "consul_health" healthLabel = model.MetaLabelPrefix + "consul_health"
// serviceAddressLabel is the name of the label containing the (optional) service address. // serviceAddressLabel is the name of the label containing the (optional) service address.
serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address" serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address"

View file

@ -21,7 +21,7 @@ import (
"testing" "testing"
) )
// SDMock is the interface for the DigitalOcean mock // SDMock is the interface for the DigitalOcean mock.
type SDMock struct { type SDMock struct {
t *testing.T t *testing.T
Server *httptest.Server Server *httptest.Server
@ -35,18 +35,18 @@ func NewSDMock(t *testing.T) *SDMock {
} }
} }
// Endpoint returns the URI to the mock server // Endpoint returns the URI to the mock server.
func (m *SDMock) Endpoint() string { func (m *SDMock) Endpoint() string {
return m.Server.URL + "/" return m.Server.URL + "/"
} }
// Setup creates the mock server // Setup creates the mock server.
func (m *SDMock) Setup() { func (m *SDMock) Setup() {
m.Mux = http.NewServeMux() m.Mux = http.NewServeMux()
m.Server = httptest.NewServer(m.Mux) m.Server = httptest.NewServer(m.Mux)
} }
// ShutdownServer creates the mock server // ShutdownServer creates the mock server.
func (m *SDMock) ShutdownServer() { func (m *SDMock) ShutdownServer() {
m.Server.Close() m.Server.Close()
} }

View file

@ -20,7 +20,7 @@ import (
"testing" "testing"
) )
// SDMock is the interface for the Hetzner Cloud mock // SDMock is the interface for the Hetzner Cloud mock.
type SDMock struct { type SDMock struct {
t *testing.T t *testing.T
Server *httptest.Server Server *httptest.Server
@ -34,19 +34,19 @@ func NewSDMock(t *testing.T) *SDMock {
} }
} }
// Endpoint returns the URI to the mock server // Endpoint returns the URI to the mock server.
func (m *SDMock) Endpoint() string { func (m *SDMock) Endpoint() string {
return m.Server.URL + "/" return m.Server.URL + "/"
} }
// Setup creates the mock server // Setup creates the mock server.
func (m *SDMock) Setup() { func (m *SDMock) Setup() {
m.Mux = http.NewServeMux() m.Mux = http.NewServeMux()
m.Server = httptest.NewServer(m.Mux) m.Server = httptest.NewServer(m.Mux)
m.t.Cleanup(m.Server.Close) m.t.Cleanup(m.Server.Close)
} }
// ShutdownServer creates the mock server // ShutdownServer creates the mock server.
func (m *SDMock) ShutdownServer() { func (m *SDMock) ShutdownServer() {
m.Server.Close() m.Server.Close()
} }

View file

@ -45,7 +45,7 @@ var (
[]string{"endpoint"}, []string{"endpoint"},
) )
// Definition of metrics for client-go workflow metrics provider // Definition of metrics for client-go workflow metrics provider.
clientGoWorkqueueDepthMetricVec = prometheus.NewGaugeVec( clientGoWorkqueueDepthMetricVec = prometheus.NewGaugeVec(
prometheus.GaugeOpts{ prometheus.GaugeOpts{
Namespace: workqueueMetricsNamespace, Namespace: workqueueMetricsNamespace,
@ -106,7 +106,7 @@ func (noopMetric) Dec() {}
func (noopMetric) Observe(float64) {} func (noopMetric) Observe(float64) {}
func (noopMetric) Set(float64) {} func (noopMetric) Set(float64) {}
// Definition of client-go metrics adapters for HTTP requests observation // Definition of client-go metrics adapters for HTTP requests observation.
type clientGoRequestMetricAdapter struct{} type clientGoRequestMetricAdapter struct{}
func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer) { func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer) {
@ -130,7 +130,7 @@ func (clientGoRequestMetricAdapter) Observe(_ context.Context, _ string, u url.U
clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds()) clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds())
} }
// Definition of client-go workqueue metrics provider definition // Definition of client-go workqueue metrics provider definition.
type clientGoWorkqueueMetricsProvider struct{} type clientGoWorkqueueMetricsProvider struct{}
func (f *clientGoWorkqueueMetricsProvider) Register(registerer prometheus.Registerer) { func (f *clientGoWorkqueueMetricsProvider) Register(registerer prometheus.Registerer) {

View file

@ -20,7 +20,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
// endpointSliceAdaptor is an adaptor for the different EndpointSlice versions // endpointSliceAdaptor is an adaptor for the different EndpointSlice versions.
type endpointSliceAdaptor interface { type endpointSliceAdaptor interface {
get() interface{} get() interface{}
getObjectMeta() metav1.ObjectMeta getObjectMeta() metav1.ObjectMeta
@ -55,7 +55,7 @@ type endpointSliceEndpointConditionsAdaptor interface {
terminating() *bool terminating() *bool
} }
// Adaptor for k8s.io/api/discovery/v1 // Adaptor for k8s.io/api/discovery/v1.
type endpointSliceAdaptorV1 struct { type endpointSliceAdaptorV1 struct {
endpointSlice *v1.EndpointSlice endpointSlice *v1.EndpointSlice
} }
@ -108,7 +108,7 @@ func (e *endpointSliceAdaptorV1) labelServiceName() string {
return v1.LabelServiceName return v1.LabelServiceName
} }
// Adaptor for k8s.io/api/discovery/v1beta1 // Adaptor for k8s.io/api/discovery/v1beta1.
type endpointSliceAdaptorV1Beta1 struct { type endpointSliceAdaptorV1Beta1 struct {
endpointSlice *v1beta1.EndpointSlice endpointSlice *v1beta1.EndpointSlice
} }

View file

@ -19,7 +19,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
// ingressAdaptor is an adaptor for the different Ingress versions // ingressAdaptor is an adaptor for the different Ingress versions.
type ingressAdaptor interface { type ingressAdaptor interface {
getObjectMeta() metav1.ObjectMeta getObjectMeta() metav1.ObjectMeta
name() string name() string
@ -36,7 +36,7 @@ type ingressRuleAdaptor interface {
host() string host() string
} }
// Adaptor for networking.k8s.io/v1 // Adaptor for networking.k8s.io/v1.
type ingressAdaptorV1 struct { type ingressAdaptorV1 struct {
ingress *v1.Ingress ingress *v1.Ingress
} }
@ -90,7 +90,7 @@ func (i *ingressRuleAdaptorV1) paths() []string {
func (i *ingressRuleAdaptorV1) host() string { return i.rule.Host } func (i *ingressRuleAdaptorV1) host() string { return i.rule.Host }
// Adaptor for networking.k8s.io/v1beta1 // Adaptor for networking.k8s.io/v1beta1.
type ingressAdaptorV1Beta1 struct { type ingressAdaptorV1Beta1 struct {
ingress *v1beta1.Ingress ingress *v1beta1.Ingress
} }

View file

@ -65,9 +65,9 @@ const (
) )
var ( var (
// Http header // Http header.
userAgent = fmt.Sprintf("Prometheus/%s", version.Version) userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
// Custom events metric // Custom events metric.
eventCount = prometheus.NewCounterVec( eventCount = prometheus.NewCounterVec(
prometheus.CounterOpts{ prometheus.CounterOpts{
Namespace: metricsNamespace, Namespace: metricsNamespace,
@ -76,7 +76,7 @@ var (
}, },
[]string{"role", "event"}, []string{"role", "event"},
) )
// DefaultSDConfig is the default Kubernetes SD configuration // DefaultSDConfig is the default Kubernetes SD configuration.
DefaultSDConfig = SDConfig{ DefaultSDConfig = SDConfig{
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
} }

View file

@ -202,7 +202,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group {
// 5. NodeLegacyHostIP // 5. NodeLegacyHostIP
// 6. NodeHostName // 6. NodeHostName
// //
// Derived from k8s.io/kubernetes/pkg/util/node/node.go // Derived from k8s.io/kubernetes/pkg/util/node/node.go.
func nodeAddress(node *apiv1.Node) (string, map[apiv1.NodeAddressType][]string, error) { func nodeAddress(node *apiv1.Node) (string, map[apiv1.NodeAddressType][]string, error) {
m := map[apiv1.NodeAddressType][]string{} m := map[apiv1.NodeAddressType][]string{}
for _, a := range node.Status.Addresses { for _, a := range node.Status.Addresses {

View file

@ -137,7 +137,7 @@ type Manager struct {
triggerSend chan struct{} triggerSend chan struct{}
} }
// Run starts the background processing // Run starts the background processing.
func (m *Manager) Run() error { func (m *Manager) Run() error {
go m.sender() go m.sender()
<-m.ctx.Done() <-m.ctx.Done()

View file

@ -20,7 +20,7 @@ import (
"testing" "testing"
) )
// SDMock is the interface for the Linode mock // SDMock is the interface for the Linode mock.
type SDMock struct { type SDMock struct {
t *testing.T t *testing.T
Server *httptest.Server Server *httptest.Server
@ -34,18 +34,18 @@ func NewSDMock(t *testing.T) *SDMock {
} }
} }
// Endpoint returns the URI to the mock server // Endpoint returns the URI to the mock server.
func (m *SDMock) Endpoint() string { func (m *SDMock) Endpoint() string {
return m.Server.URL + "/" return m.Server.URL + "/"
} }
// Setup creates the mock server // Setup creates the mock server.
func (m *SDMock) Setup() { func (m *SDMock) Setup() {
m.Mux = http.NewServeMux() m.Mux = http.NewServeMux()
m.Server = httptest.NewServer(m.Mux) m.Server = httptest.NewServer(m.Mux)
} }
// ShutdownServer creates the mock server // ShutdownServer creates the mock server.
func (m *SDMock) ShutdownServer() { func (m *SDMock) ShutdownServer() {
m.Server.Close() m.Server.Close()
} }

View file

@ -92,7 +92,7 @@ type Provider struct {
newSubs map[string]struct{} newSubs map[string]struct{}
} }
// Discoverer return the Discoverer of the provider // Discoverer return the Discoverer of the provider.
func (p *Provider) Discoverer() Discoverer { func (p *Provider) Discoverer() Discoverer {
return p.d return p.d
} }

View file

@ -48,7 +48,7 @@ const (
// imageLabel is the label that is used for the docker image running the service. // imageLabel is the label that is used for the docker image running the service.
imageLabel model.LabelName = metaLabelPrefix + "image" imageLabel model.LabelName = metaLabelPrefix + "image"
// portIndexLabel is the integer port index when multiple ports are defined; // portIndexLabel is the integer port index when multiple ports are defined;
// e.g. PORT1 would have a value of '1' // e.g. PORT1 would have a value of '1'.
portIndexLabel model.LabelName = metaLabelPrefix + "port_index" portIndexLabel model.LabelName = metaLabelPrefix + "port_index"
// taskLabel contains the mesos task name of the app instance. // taskLabel contains the mesos task name of the app instance.
taskLabel model.LabelName = metaLabelPrefix + "task" taskLabel model.LabelName = metaLabelPrefix + "task"

View file

@ -29,7 +29,7 @@ import (
"github.com/prometheus/prometheus/util/strutil" "github.com/prometheus/prometheus/util/strutil"
) )
// SDMock is the interface for the DigitalOcean mock // SDMock is the interface for the DigitalOcean mock.
type SDMock struct { type SDMock struct {
t *testing.T t *testing.T
Server *httptest.Server Server *httptest.Server
@ -47,12 +47,12 @@ func NewSDMock(t *testing.T, directory string) *SDMock {
} }
} }
// Endpoint returns the URI to the mock server // Endpoint returns the URI to the mock server.
func (m *SDMock) Endpoint() string { func (m *SDMock) Endpoint() string {
return m.Server.URL + "/" return m.Server.URL + "/"
} }
// Setup creates the mock server // Setup creates the mock server.
func (m *SDMock) Setup() { func (m *SDMock) Setup() {
m.Mux = http.NewServeMux() m.Mux = http.NewServeMux()
m.Server = httptest.NewServer(m.Mux) m.Server = httptest.NewServer(m.Mux)

View file

@ -30,7 +30,7 @@ type NomadSDTestSuite struct {
Mock *SDMock Mock *SDMock
} }
// SDMock is the interface for the nomad mock // SDMock is the interface for the nomad mock.
type SDMock struct { type SDMock struct {
t *testing.T t *testing.T
Server *httptest.Server Server *httptest.Server

View file

@ -20,7 +20,7 @@ import (
"testing" "testing"
) )
// SDMock is the interface for the OpenStack mock // SDMock is the interface for the OpenStack mock.
type SDMock struct { type SDMock struct {
t *testing.T t *testing.T
Server *httptest.Server Server *httptest.Server
@ -34,12 +34,12 @@ func NewSDMock(t *testing.T) *SDMock {
} }
} }
// Endpoint returns the URI to the mock server // Endpoint returns the URI to the mock server.
func (m *SDMock) Endpoint() string { func (m *SDMock) Endpoint() string {
return m.Server.URL + "/" return m.Server.URL + "/"
} }
// Setup creates the mock server // Setup creates the mock server.
func (m *SDMock) Setup() { func (m *SDMock) Setup() {
m.Mux = http.NewServeMux() m.Mux = http.NewServeMux()
m.Server = httptest.NewServer(m.Mux) m.Server = httptest.NewServer(m.Mux)
@ -60,7 +60,7 @@ func testHeader(t *testing.T, r *http.Request, header, expected string) {
} }
} }
// HandleVersionsSuccessfully mocks version call // HandleVersionsSuccessfully mocks version call.
func (m *SDMock) HandleVersionsSuccessfully() { func (m *SDMock) HandleVersionsSuccessfully() {
m.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, ` fmt.Fprintf(w, `
@ -88,7 +88,7 @@ func (m *SDMock) HandleVersionsSuccessfully() {
}) })
} }
// HandleAuthSuccessfully mocks auth call // HandleAuthSuccessfully mocks auth call.
func (m *SDMock) HandleAuthSuccessfully() { func (m *SDMock) HandleAuthSuccessfully() {
m.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("X-Subject-Token", tokenID) w.Header().Add("X-Subject-Token", tokenID)
@ -236,7 +236,7 @@ const hypervisorListBody = `
] ]
}` }`
// HandleHypervisorListSuccessfully mocks os-hypervisors detail call // HandleHypervisorListSuccessfully mocks os-hypervisors detail call.
func (m *SDMock) HandleHypervisorListSuccessfully() { func (m *SDMock) HandleHypervisorListSuccessfully() {
m.Mux.HandleFunc("/os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc("/os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) {
testMethod(m.t, r, "GET") testMethod(m.t, r, "GET")
@ -533,7 +533,7 @@ const serverListBody = `
} }
` `
// HandleServerListSuccessfully mocks server detail call // HandleServerListSuccessfully mocks server detail call.
func (m *SDMock) HandleServerListSuccessfully() { func (m *SDMock) HandleServerListSuccessfully() {
m.Mux.HandleFunc("/servers/detail", func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc("/servers/detail", func(w http.ResponseWriter, r *http.Request) {
testMethod(m.t, r, "GET") testMethod(m.t, r, "GET")
@ -572,7 +572,7 @@ const listOutput = `
} }
` `
// HandleFloatingIPListSuccessfully mocks floating ips call // HandleFloatingIPListSuccessfully mocks floating ips call.
func (m *SDMock) HandleFloatingIPListSuccessfully() { func (m *SDMock) HandleFloatingIPListSuccessfully() {
m.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) {
testMethod(m.t, r, "GET") testMethod(m.t, r, "GET")

View file

@ -20,7 +20,7 @@ import (
"testing" "testing"
) )
// SDMock is the interface for the Vultr mock // SDMock is the interface for the Vultr mock.
type SDMock struct { type SDMock struct {
t *testing.T t *testing.T
Server *httptest.Server Server *httptest.Server

View file

@ -31,7 +31,7 @@ type BucketCount interface {
// absolute counts directly). Go type parameters don't allow type // absolute counts directly). Go type parameters don't allow type
// specialization. Therefore, where special treatment of deltas between buckets // specialization. Therefore, where special treatment of deltas between buckets
// vs. absolute counts is important, this information has to be provided as a // vs. absolute counts is important, this information has to be provided as a
// separate boolean parameter "deltaBuckets" // separate boolean parameter "deltaBuckets".
type InternalBucketCount interface { type InternalBucketCount interface {
float64 | int64 float64 | int64
} }

View file

@ -450,6 +450,7 @@ func TestLabels_Get(t *testing.T) {
// BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation // BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation
// The results have shown that binary search would only be better when searching last labels in scenarios with more than 10 labels. // The results have shown that binary search would only be better when searching last labels in scenarios with more than 10 labels.
// In the following list, `old` is the linear search while `new` is the binary search implementation (without calling sort.Search, which performs even worse here) // In the following list, `old` is the linear search while `new` is the binary search implementation (without calling sort.Search, which performs even worse here)
//
// name old time/op new time/op delta // name old time/op new time/op delta
// Labels_Get/with_5_labels/get_first_label 5.12ns ± 0% 14.24ns ± 0% ~ (p=1.000 n=1+1) // Labels_Get/with_5_labels/get_first_label 5.12ns ± 0% 14.24ns ± 0% ~ (p=1.000 n=1+1)
// Labels_Get/with_5_labels/get_middle_label 13.5ns ± 0% 18.5ns ± 0% ~ (p=1.000 n=1+1) // Labels_Get/with_5_labels/get_middle_label 13.5ns ± 0% 18.5ns ± 0% ~ (p=1.000 n=1+1)

View file

@ -786,47 +786,47 @@ func funcTan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper)
return simpleFunc(vals, enh, math.Tan), nil return simpleFunc(vals, enh, math.Tan), nil
} }
// == asin(Vector parser.ValueTypeVector) (Vector, Annotations) === // === asin(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Asin), nil return simpleFunc(vals, enh, math.Asin), nil
} }
// == acos(Vector parser.ValueTypeVector) (Vector, Annotations) === // === acos(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Acos), nil return simpleFunc(vals, enh, math.Acos), nil
} }
// == atan(Vector parser.ValueTypeVector) (Vector, Annotations) === // === atan(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Atan), nil return simpleFunc(vals, enh, math.Atan), nil
} }
// == sinh(Vector parser.ValueTypeVector) (Vector, Annotations) === // === sinh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Sinh), nil return simpleFunc(vals, enh, math.Sinh), nil
} }
// == cosh(Vector parser.ValueTypeVector) (Vector, Annotations) === // === cosh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Cosh), nil return simpleFunc(vals, enh, math.Cosh), nil
} }
// == tanh(Vector parser.ValueTypeVector) (Vector, Annotations) === // === tanh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Tanh), nil return simpleFunc(vals, enh, math.Tanh), nil
} }
// == asinh(Vector parser.ValueTypeVector) (Vector, Annotations) === // === asinh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Asinh), nil return simpleFunc(vals, enh, math.Asinh), nil
} }
// == acosh(Vector parser.ValueTypeVector) (Vector, Annotations) === // === acosh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Acosh), nil return simpleFunc(vals, enh, math.Acosh), nil
} }
// == atanh(Vector parser.ValueTypeVector) (Vector, Annotations) === // === atanh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return simpleFunc(vals, enh, math.Atanh), nil return simpleFunc(vals, enh, math.Atanh), nil
} }

View file

@ -59,11 +59,11 @@ func (i Item) Pretty(int) string { return i.String() }
func (i ItemType) IsOperator() bool { return i > operatorsStart && i < operatorsEnd } func (i ItemType) IsOperator() bool { return i > operatorsStart && i < operatorsEnd }
// IsAggregator returns true if the Item belongs to the aggregator functions. // IsAggregator returns true if the Item belongs to the aggregator functions.
// Returns false otherwise // Returns false otherwise.
func (i ItemType) IsAggregator() bool { return i > aggregatorsStart && i < aggregatorsEnd } func (i ItemType) IsAggregator() bool { return i > aggregatorsStart && i < aggregatorsEnd }
// IsAggregatorWithParam returns true if the Item is an aggregator that takes a parameter. // IsAggregatorWithParam returns true if the Item is an aggregator that takes a parameter.
// Returns false otherwise // Returns false otherwise.
func (i ItemType) IsAggregatorWithParam() bool { func (i ItemType) IsAggregatorWithParam() bool {
return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE
} }

View file

@ -171,7 +171,7 @@ func ParseExpr(input string) (expr Expr, err error) {
return p.ParseExpr() return p.ParseExpr()
} }
// ParseMetric parses the input into a metric // ParseMetric parses the input into a metric.
func ParseMetric(input string) (m labels.Labels, err error) { func ParseMetric(input string) (m labels.Labels, err error) {
p := NewParser(input) p := NewParser(input)
defer p.Close() defer p.Close()

View file

@ -472,7 +472,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
} }
// State returns the maximum state of alert instances for this rule. // State returns the maximum state of alert instances for this rule.
// StateFiring > StatePending > StateInactive // StateFiring > StatePending > StateInactive.
func (r *AlertingRule) State() AlertState { func (r *AlertingRule) State() AlertState {
r.activeMtx.Lock() r.activeMtx.Lock()
defer r.activeMtx.Unlock() defer r.activeMtx.Unlock()

View file

@ -250,7 +250,7 @@ type GroupLoader interface {
} }
// FileLoader is the default GroupLoader implementation. It defers to rulefmt.ParseFile // FileLoader is the default GroupLoader implementation. It defers to rulefmt.ParseFile
// and parser.ParseExpr // and parser.ParseExpr.
type FileLoader struct{} type FileLoader struct{}
func (FileLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) { func (FileLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) {

View file

@ -34,7 +34,7 @@ import (
"github.com/prometheus/prometheus/util/osutil" "github.com/prometheus/prometheus/util/osutil"
) )
// NewManager is the Manager constructor // NewManager is the Manager constructor.
func NewManager(o *Options, logger log.Logger, app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) { func NewManager(o *Options, logger log.Logger, app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) {
if o == nil { if o == nil {
o = &Options{} o = &Options{}

View file

@ -112,7 +112,7 @@ type scrapeLoopOptions struct {
const maxAheadTime = 10 * time.Minute const maxAheadTime = 10 * time.Minute
// returning an empty label set is interpreted as "drop" // returning an empty label set is interpreted as "drop".
type labelsMutator func(labels.Labels) labels.Labels type labelsMutator func(labels.Labels) labels.Labels
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, options *Options, metrics *scrapeMetrics) (*scrapePool, error) { func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, options *Options, metrics *scrapeMetrics) (*scrapePool, error) {

View file

@ -196,7 +196,7 @@ func (t *Target) DiscoveredLabels() labels.Labels {
return t.discoveredLabels.Copy() return t.discoveredLabels.Copy()
} }
// SetDiscoveredLabels sets new DiscoveredLabels // SetDiscoveredLabels sets new DiscoveredLabels.
func (t *Target) SetDiscoveredLabels(l labels.Labels) { func (t *Target) SetDiscoveredLabels(l labels.Labels) {
t.mtx.Lock() t.mtx.Lock()
defer t.mtx.Unlock() defer t.mtx.Unlock()

View file

@ -327,7 +327,7 @@ func (s testSeriesSet) At() Series { return s.series }
func (s testSeriesSet) Err() error { return nil } func (s testSeriesSet) Err() error { return nil }
func (s testSeriesSet) Warnings() annotations.Annotations { return nil } func (s testSeriesSet) Warnings() annotations.Annotations { return nil }
// TestSeriesSet returns a mock series set // TestSeriesSet returns a mock series set.
func TestSeriesSet(series Series) SeriesSet { func TestSeriesSet(series Series) SeriesSet {
return testSeriesSet{series: series} return testSeriesSet{series: series}
} }

View file

@ -43,7 +43,7 @@ const (
IngestionPublicAudience = "https://monitor.azure.com//.default" IngestionPublicAudience = "https://monitor.azure.com//.default"
) )
// ManagedIdentityConfig is used to store managed identity config values // ManagedIdentityConfig is used to store managed identity config values.
type ManagedIdentityConfig struct { type ManagedIdentityConfig struct {
// ClientID is the clientId of the managed identity that is being used to authenticate. // ClientID is the clientId of the managed identity that is being used to authenticate.
ClientID string `yaml:"client_id,omitempty"` ClientID string `yaml:"client_id,omitempty"`
@ -235,7 +235,7 @@ func newManagedIdentityTokenCredential(clientOpts *azcore.ClientOptions, managed
return azidentity.NewManagedIdentityCredential(opts) return azidentity.NewManagedIdentityCredential(opts)
} }
// newOAuthTokenCredential returns new OAuth token credential // newOAuthTokenCredential returns new OAuth token credential.
func newOAuthTokenCredential(clientOpts *azcore.ClientOptions, oAuthConfig *OAuthConfig) (azcore.TokenCredential, error) { func newOAuthTokenCredential(clientOpts *azcore.ClientOptions, oAuthConfig *OAuthConfig) (azcore.TokenCredential, error) {
opts := &azidentity.ClientSecretCredentialOptions{ClientOptions: *clientOpts} opts := &azidentity.ClientSecretCredentialOptions{ClientOptions: *clientOpts}
return azidentity.NewClientSecretCredential(oAuthConfig.TenantID, oAuthConfig.ClientID, oAuthConfig.ClientSecret, opts) return azidentity.NewClientSecretCredential(oAuthConfig.TenantID, oAuthConfig.ClientID, oAuthConfig.ClientSecret, opts)
@ -326,7 +326,7 @@ func getAudience(cloud string) (string, error) {
} }
} }
// getCloudConfiguration returns the cloud Configuration which contains AAD endpoint for different clouds // getCloudConfiguration returns the cloud Configuration which contains AAD endpoint for different clouds.
func getCloudConfiguration(c string) (cloud.Configuration, error) { func getCloudConfiguration(c string) (cloud.Configuration, error) {
switch strings.ToLower(c) { switch strings.ToLower(c) {
case strings.ToLower(AzureChina): case strings.ToLower(AzureChina):

View file

@ -475,7 +475,7 @@ func (c *concreteSeriesIterator) At() (t int64, v float64) {
return s.Timestamp, s.Value return s.Timestamp, s.Value
} }
// AtHistogram implements chunkenc.Iterator // AtHistogram implements chunkenc.Iterator.
func (c *concreteSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { func (c *concreteSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
if c.curValType != chunkenc.ValHistogram { if c.curValType != chunkenc.ValHistogram {
panic("iterator is not on an integer histogram sample") panic("iterator is not on an integer histogram sample")
@ -484,7 +484,7 @@ func (c *concreteSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
return h.Timestamp, HistogramProtoToHistogram(h) return h.Timestamp, HistogramProtoToHistogram(h)
} }
// AtFloatHistogram implements chunkenc.Iterator // AtFloatHistogram implements chunkenc.Iterator.
func (c *concreteSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { func (c *concreteSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
switch c.curValType { switch c.curValType {
case chunkenc.ValHistogram: case chunkenc.ValHistogram:
@ -547,7 +547,7 @@ func (c *concreteSeriesIterator) Err() error {
} }
// validateLabelsAndMetricName validates the label names/values and metric names returned from remote read, // validateLabelsAndMetricName validates the label names/values and metric names returned from remote read,
// also making sure that there are no labels with duplicate names // also making sure that there are no labels with duplicate names.
func validateLabelsAndMetricName(ls []prompb.Label) error { func validateLabelsAndMetricName(ls []prompb.Label) error {
for i, l := range ls { for i, l := range ls {
if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) { if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) {
@ -752,7 +752,7 @@ func spansToSpansProto(s []histogram.Span) []prompb.BucketSpan {
return spans return spans
} }
// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric // LabelProtosToMetric unpack a []*prompb.Label to a model.Metric.
func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric { func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
metric := make(model.Metric, len(labelPairs)) metric := make(model.Metric, len(labelPairs))
for _, l := range labelPairs { for _, l := range labelPairs {

View file

@ -8,13 +8,13 @@ import (
"unicode" "unicode"
) )
// Normalizes the specified label to follow Prometheus label names standard // Normalizes the specified label to follow Prometheus label names standard.
// //
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels // See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
// //
// Labels that start with non-letter rune will be prefixed with "key_" // Labels that start with non-letter rune will be prefixed with "key_".
// //
// Exception is made for double-underscores which are allowed // Exception is made for double-underscores which are allowed.
func NormalizeLabel(label string) string { func NormalizeLabel(label string) string {
// Trivial case // Trivial case
if len(label) == 0 { if len(label) == 0 {
@ -32,7 +32,7 @@ func NormalizeLabel(label string) string {
return label return label
} }
// Return '_' for anything non-alphanumeric // Return '_' for anything non-alphanumeric.
func sanitizeRune(r rune) rune { func sanitizeRune(r rune) rune {
if unicode.IsLetter(r) || unicode.IsDigit(r) { if unicode.IsLetter(r) || unicode.IsDigit(r) {
return r return r

View file

@ -10,7 +10,7 @@ import (
"go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric"
) )
// The map to translate OTLP units to Prometheus units // The map to translate OTLP units to Prometheus units.
// OTLP metrics use the c/s notation as specified at https://ucum.org/ucum.html // OTLP metrics use the c/s notation as specified at https://ucum.org/ucum.html
// (See also https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/README.md#instrument-units) // (See also https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/README.md#instrument-units)
// Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units // Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units
@ -57,8 +57,8 @@ var unitMap = map[string]string{
"$": "dollars", "$": "dollars",
} }
// The map that translates the "per" unit // The map that translates the "per" unit.
// Example: s => per second (singular) // Example: s => per second (singular).
var perUnitMap = map[string]string{ var perUnitMap = map[string]string{
"s": "second", "s": "second",
"m": "minute", "m": "minute",
@ -69,7 +69,7 @@ var perUnitMap = map[string]string{
"y": "year", "y": "year",
} }
// Build a Prometheus-compliant metric name for the specified metric // Build a Prometheus-compliant metric name for the specified metric.
// //
// Metric name is prefixed with specified namespace and underscore (if any). // Metric name is prefixed with specified namespace and underscore (if any).
// Namespace is not cleaned up. Make sure specified namespace follows Prometheus // Namespace is not cleaned up. Make sure specified namespace follows Prometheus
@ -202,7 +202,7 @@ func removeSuffix(tokens []string, suffix string) []string {
return tokens return tokens
} }
// Clean up specified string so it's Prometheus compliant // Clean up specified string so it's Prometheus compliant.
func CleanUpString(s string) string { func CleanUpString(s string) string {
return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }), "_") return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }), "_")
} }
@ -211,8 +211,8 @@ func RemovePromForbiddenRunes(s string) string {
return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != '_' && r != ':' }), "_") return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != '_' && r != ':' }), "_")
} }
// Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit // Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit.
// Returns the specified unit if not found in unitMap // Returns the specified unit if not found in unitMap.
func unitMapGetOrDefault(unit string) string { func unitMapGetOrDefault(unit string) string {
if promUnit, ok := unitMap[unit]; ok { if promUnit, ok := unitMap[unit]; ok {
return promUnit return promUnit
@ -220,8 +220,8 @@ func unitMapGetOrDefault(unit string) string {
return unit return unit
} }
// Retrieve the Prometheus "per" unit corresponding to the specified "per" unit // Retrieve the Prometheus "per" unit corresponding to the specified "per" unit.
// Returns the specified unit if not found in perUnitMap // Returns the specified unit if not found in perUnitMap.
func perUnitMapGetOrDefault(perUnit string) string { func perUnitMapGetOrDefault(perUnit string) string {
if promPerUnit, ok := perUnitMap[perUnit]; ok { if promPerUnit, ok := perUnitMap[perUnit]; ok {
return promPerUnit return promPerUnit
@ -229,7 +229,7 @@ func perUnitMapGetOrDefault(perUnit string) string {
return perUnit return perUnit
} }
// Returns whether the slice contains the specified value // Returns whether the slice contains the specified value.
func contains(slice []string, value string) bool { func contains(slice []string, value string) bool {
for _, sliceEntry := range slice { for _, sliceEntry := range slice {
if sliceEntry == value { if sliceEntry == value {
@ -239,7 +239,7 @@ func contains(slice []string, value string) bool {
return false return false
} }
// Remove the specified value from the slice // Remove the specified value from the slice.
func removeItem(slice []string, value string) []string { func removeItem(slice []string, value string) []string {
newSlice := make([]string, 0, len(slice)) newSlice := make([]string, 0, len(slice))
for _, sliceEntry := range slice { for _, sliceEntry := range slice {

View file

@ -15,7 +15,7 @@ func init() {
ilm = resourceMetrics.ScopeMetrics().AppendEmpty() ilm = resourceMetrics.ScopeMetrics().AppendEmpty()
} }
// Returns a new Metric of type "Gauge" with specified name and unit // Returns a new Metric of type "Gauge" with specified name and unit.
func createGauge(name, unit string) pmetric.Metric { func createGauge(name, unit string) pmetric.Metric {
gauge := ilm.Metrics().AppendEmpty() gauge := ilm.Metrics().AppendEmpty()
gauge.SetName(name) gauge.SetName(name)
@ -24,7 +24,7 @@ func createGauge(name, unit string) pmetric.Metric {
return gauge return gauge
} }
// Returns a new Metric of type Monotonic Sum with specified name and unit // Returns a new Metric of type Monotonic Sum with specified name and unit.
func createCounter(name, unit string) pmetric.Metric { func createCounter(name, unit string) pmetric.Metric {
counter := ilm.Metrics().AppendEmpty() counter := ilm.Metrics().AppendEmpty()
counter.SetEmptySum().SetIsMonotonic(true) counter.SetEmptySum().SetIsMonotonic(true)

View file

@ -125,7 +125,7 @@ func TestIgnoreExternalLabels(t *testing.T) {
} }
// baseRemoteWriteConfig copy values from global Default Write config // baseRemoteWriteConfig copy values from global Default Write config
// to avoid change global state and cross impact test execution // to avoid change global state and cross impact test execution.
func baseRemoteWriteConfig(host string) *config.RemoteWriteConfig { func baseRemoteWriteConfig(host string) *config.RemoteWriteConfig {
cfg := config.DefaultRemoteWriteConfig cfg := config.DefaultRemoteWriteConfig
cfg.URL = &common_config.URL{ cfg.URL = &common_config.URL{
@ -137,7 +137,7 @@ func baseRemoteWriteConfig(host string) *config.RemoteWriteConfig {
} }
// baseRemoteReadConfig copy values from global Default Read config // baseRemoteReadConfig copy values from global Default Read config
// to avoid change global state and cross impact test execution // to avoid change global state and cross impact test execution.
func baseRemoteReadConfig(host string) *config.RemoteReadConfig { func baseRemoteReadConfig(host string) *config.RemoteReadConfig {
cfg := config.DefaultRemoteReadConfig cfg := config.DefaultRemoteReadConfig
cfg.URL = &common_config.URL{ cfg.URL = &common_config.URL{

View file

@ -284,7 +284,7 @@ loop:
// cover an entirely different set of buckets. The function returns the // cover an entirely different set of buckets. The function returns the
// “forward” inserts to expand 'a' to also cover all the buckets exclusively // “forward” inserts to expand 'a' to also cover all the buckets exclusively
// covered by 'b', and it returns the “backward” inserts to expand 'b' to also // covered by 'b', and it returns the “backward” inserts to expand 'b' to also
// cover all the buckets exclusively covered by 'a' // cover all the buckets exclusively covered by 'a'.
func expandSpansBothWays(a, b []histogram.Span) (forward, backward []Insert, mergedSpans []histogram.Span) { func expandSpansBothWays(a, b []histogram.Span) (forward, backward []Insert, mergedSpans []histogram.Span) {
ai := newBucketIterator(a) ai := newBucketIterator(a)
bi := newBucketIterator(b) bi := newBucketIterator(b)

View file

@ -93,6 +93,7 @@ func (p HeadChunkRef) Unpack() (HeadSeriesRef, HeadChunkID) {
// //
// Example: // Example:
// assume a memSeries.firstChunkID=7 and memSeries.mmappedChunks=[p5,p6,p7,p8,p9]. // assume a memSeries.firstChunkID=7 and memSeries.mmappedChunks=[p5,p6,p7,p8,p9].
//
// | HeadChunkID value | refers to ... | // | HeadChunkID value | refers to ... |
// |-------------------|----------------------------------------------------------------------------------------| // |-------------------|----------------------------------------------------------------------------------------|
// | 0-6 | chunks that have been compacted to blocks, these won't return data for queries in Head | // | 0-6 | chunks that have been compacted to blocks, these won't return data for queries in Head |
@ -198,7 +199,7 @@ func ChunkFromSamplesGeneric(s Samples) (Meta, error) {
}, nil }, nil
} }
// PopulatedChunk creates a chunk populated with samples every second starting at minTime // PopulatedChunk creates a chunk populated with samples every second starting at minTime.
func PopulatedChunk(numSamples int, minTime int64) (Meta, error) { func PopulatedChunk(numSamples int, minTime int64) (Meta, error) {
samples := make([]Sample, numSamples) samples := make([]Sample, numSamples)
for i := 0; i < numSamples; i++ { for i := 0; i < numSamples; i++ {

View file

@ -4404,7 +4404,7 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) {
// TestOOOCompactionWithDisabledWriteLog tests the scenario where the TSDB is // TestOOOCompactionWithDisabledWriteLog tests the scenario where the TSDB is
// configured to not have wal and wbl but its able to compact both the in-order // configured to not have wal and wbl but its able to compact both the in-order
// and out-of-order head // and out-of-order head.
func TestOOOCompactionWithDisabledWriteLog(t *testing.T) { func TestOOOCompactionWithDisabledWriteLog(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
ctx := context.Background() ctx := context.Background()

View file

@ -1527,7 +1527,7 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) {
return actualInOrderMint, minOOOTime, minMmapFile return actualInOrderMint, minOOOTime, minMmapFile
} }
// Tombstones returns a new reader over the head's tombstones // Tombstones returns a new reader over the head's tombstones.
func (h *Head) Tombstones() (tombstones.Reader, error) { func (h *Head) Tombstones() (tombstones.Reader, error) {
return h.tombstones, nil return h.tombstones, nil
} }
@ -2171,7 +2171,7 @@ func overlapsClosedInterval(mint1, maxt1, mint2, maxt2 int64) bool {
return mint1 <= maxt2 && mint2 <= maxt1 return mint1 <= maxt2 && mint2 <= maxt1
} }
// mmappedChunk describes a head chunk on disk that has been mmapped // mmappedChunk describes a head chunk on disk that has been mmapped.
type mmappedChunk struct { type mmappedChunk struct {
ref chunks.ChunkDiskMapperRef ref chunks.ChunkDiskMapperRef
numSamples uint16 numSamples uint16

View file

@ -202,7 +202,7 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB
// headChunkID returns the HeadChunkID referred to by the given position. // headChunkID returns the HeadChunkID referred to by the given position.
// * 0 <= pos < len(s.mmappedChunks) refer to s.mmappedChunks[pos] // * 0 <= pos < len(s.mmappedChunks) refer to s.mmappedChunks[pos]
// * pos >= len(s.mmappedChunks) refers to s.headChunks linked list // * pos >= len(s.mmappedChunks) refers to s.headChunks linked list.
func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID { func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID {
return chunks.HeadChunkID(pos) + s.firstChunkID return chunks.HeadChunkID(pos) + s.firstChunkID
} }
@ -596,7 +596,7 @@ var _ chunkenc.Chunk = &boundedChunk{}
// boundedChunk is an implementation of chunkenc.Chunk that uses a // boundedChunk is an implementation of chunkenc.Chunk that uses a
// boundedIterator that only iterates through samples which timestamps are // boundedIterator that only iterates through samples which timestamps are
// >= minT and <= maxT // >= minT and <= maxT.
type boundedChunk struct { type boundedChunk struct {
chunkenc.Chunk chunkenc.Chunk
minT int64 minT int64
@ -625,7 +625,7 @@ func (b boundedChunk) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator {
var _ chunkenc.Iterator = &boundedIterator{} var _ chunkenc.Iterator = &boundedIterator{}
// boundedIterator is an implementation of Iterator that only iterates through // boundedIterator is an implementation of Iterator that only iterates through
// samples which timestamps are >= minT and <= maxT // samples which timestamps are >= minT and <= maxT.
type boundedIterator struct { type boundedIterator struct {
chunkenc.Iterator chunkenc.Iterator
minT int64 minT int64
@ -671,7 +671,7 @@ func (b boundedIterator) Seek(t int64) chunkenc.ValueType {
return b.Iterator.Seek(t) return b.Iterator.Seek(t)
} }
// safeHeadChunk makes sure that the chunk can be accessed without a race condition // safeHeadChunk makes sure that the chunk can be accessed without a race condition.
type safeHeadChunk struct { type safeHeadChunk struct {
chunkenc.Chunk chunkenc.Chunk
s *memSeries s *memSeries

View file

@ -1468,7 +1468,7 @@ func (r *Reader) SortedLabelValues(ctx context.Context, name string, matchers ..
// LabelValues returns value tuples that exist for the given label name. // LabelValues returns value tuples that exist for the given label name.
// It is not safe to use the return value beyond the lifetime of the byte slice // It is not safe to use the return value beyond the lifetime of the byte slice
// passed into the Reader. // passed into the Reader.
// TODO(replay): Support filtering by matchers // TODO(replay): Support filtering by matchers.
func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
if len(matchers) > 0 { if len(matchers) > 0 {
return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers) return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers)
@ -1729,7 +1729,7 @@ func (r *Reader) Size() int64 {
} }
// LabelNames returns all the unique label names present in the index. // LabelNames returns all the unique label names present in the index.
// TODO(twilkie) implement support for matchers // TODO(twilkie) implement support for matchers.
func (r *Reader) LabelNames(_ context.Context, matchers ...*labels.Matcher) ([]string, error) { func (r *Reader) LabelNames(_ context.Context, matchers ...*labels.Matcher) ([]string, error) {
if len(matchers) > 0 { if len(matchers) > 0 {
return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers) return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers)

View file

@ -39,7 +39,7 @@ type chunkInterval struct {
maxt int64 maxt int64
} }
// permutateChunkIntervals returns all possible orders of the given chunkIntervals // permutateChunkIntervals returns all possible orders of the given chunkIntervals.
func permutateChunkIntervals(in []chunkInterval, out [][]chunkInterval, left, right int) [][]chunkInterval { func permutateChunkIntervals(in []chunkInterval, out [][]chunkInterval, left, right int) [][]chunkInterval {
if left == right { if left == right {
inCopy := make([]chunkInterval, len(in)) inCopy := make([]chunkInterval, len(in))

View file

@ -21,7 +21,7 @@ import (
const testMaxSize int = 32 const testMaxSize int = 32
// Formulas chosen to make testing easy: // Formulas chosen to make testing easy.
func valEven(pos int) int { return pos*2 + 2 } // s[0]=2, s[1]=4, s[2]=6, ..., s[31]=64 - Predictable pre-existing values func valEven(pos int) int { return pos*2 + 2 } // s[0]=2, s[1]=4, s[2]=6, ..., s[31]=64 - Predictable pre-existing values
func valOdd(pos int) int { return pos*2 + 1 } // s[0]=1, s[1]=3, s[2]=5, ..., s[31]=63 - New values will interject at chosen position because they sort before the pre-existing vals. func valOdd(pos int) int { return pos*2 + 1 } // s[0]=1, s[1]=3, s[2]=5, ..., s[31]=63 - New values will interject at chosen position because they sort before the pre-existing vals.

View file

@ -100,8 +100,8 @@ type seriesSamples struct {
chunks [][]sample chunks [][]sample
} }
// Index: labels -> postings -> chunkMetas -> chunkRef // Index: labels -> postings -> chunkMetas -> chunkRef.
// ChunkReader: ref -> vals // ChunkReader: ref -> vals.
func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkReader, int64, int64) { func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkReader, int64, int64) {
sort.Slice(tc, func(i, j int) bool { sort.Slice(tc, func(i, j int) bool {
return labels.Compare(labels.FromMap(tc[i].lset), labels.FromMap(tc[i].lset)) < 0 return labels.Compare(labels.FromMap(tc[i].lset), labels.FromMap(tc[i].lset)) < 0

View file

@ -174,7 +174,7 @@ func (r *LiveReader) Record() []byte {
// Rebuild a full record from potentially partial records. Returns false // Rebuild a full record from potentially partial records. Returns false
// if there was an error or if we weren't able to read a record for any reason. // if there was an error or if we weren't able to read a record for any reason.
// Returns true if we read a full record. Any record data is appended to // Returns true if we read a full record. Any record data is appended to
// LiveReader.rec // LiveReader.rec.
func (r *LiveReader) buildRecord() (bool, error) { func (r *LiveReader) buildRecord() (bool, error) {
for { for {
// Check that we have data in the internal buffer to read. // Check that we have data in the internal buffer to read.

View file

@ -622,6 +622,7 @@ func (w *WL) flushPage(clear bool) error {
} }
// First Byte of header format: // First Byte of header format:
//
// [3 bits unallocated] [1 bit zstd compression flag] [1 bit snappy compression flag] [3 bit record type ] // [3 bits unallocated] [1 bit zstd compression flag] [1 bit snappy compression flag] [3 bit record type ]
const ( const (
snappyMask = 1 << 3 snappyMask = 1 << 3
@ -836,7 +837,7 @@ func (w *WL) fsync(f *Segment) error {
// Sync forces a file sync on the current write log segment. This function is meant // Sync forces a file sync on the current write log segment. This function is meant
// to be used only on tests due to different behaviour on Operating Systems // to be used only on tests due to different behaviour on Operating Systems
// like windows and linux // like windows and linux.
func (w *WL) Sync() error { func (w *WL) Sync() error {
return w.fsync(w.segment) return w.fsync(w.segment)
} }

View file

@ -21,7 +21,7 @@ import (
"syscall" "syscall"
) )
// Statfs returns the file system type (Unix only) // Statfs returns the file system type (Unix only).
func Statfs(path string) string { func Statfs(path string) string {
// Types of file systems that may be returned by `statfs` // Types of file systems that may be returned by `statfs`
fsTypes := map[int64]string{ fsTypes := map[int64]string{

View file

@ -15,18 +15,18 @@ package testutil
import "time" import "time"
// A MockContext provides a simple stub implementation of a Context // A MockContext provides a simple stub implementation of a Context.
type MockContext struct { type MockContext struct {
Error error Error error
DoneCh chan struct{} DoneCh chan struct{}
} }
// Deadline always will return not set // Deadline always will return not set.
func (c *MockContext) Deadline() (deadline time.Time, ok bool) { func (c *MockContext) Deadline() (deadline time.Time, ok bool) {
return time.Time{}, false return time.Time{}, false
} }
// Done returns a read channel for listening to the Done event // Done returns a read channel for listening to the Done event.
func (c *MockContext) Done() <-chan struct{} { func (c *MockContext) Done() <-chan struct{} {
return c.DoneCh return c.DoneCh
} }
@ -36,7 +36,7 @@ func (c *MockContext) Err() error {
return c.Error return c.Error
} }
// Value ignores the Value and always returns nil // Value ignores the Value and always returns nil.
func (c *MockContext) Value(interface{}) interface{} { func (c *MockContext) Value(interface{}) interface{} {
return nil return nil
} }

View file

@ -33,7 +33,7 @@ const (
// NilCloser is a no-op Closer. // NilCloser is a no-op Closer.
NilCloser = nilCloser(true) NilCloser = nilCloser(true)
// The number of times that a TemporaryDirectory will retry its removal // The number of times that a TemporaryDirectory will retry its removal.
temporaryDirectoryRemoveRetries = 2 temporaryDirectoryRemoveRetries = 2
) )

View file

@ -1294,12 +1294,12 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult {
return apiFuncResult{res, nil, nil, nil} return apiFuncResult{res, nil, nil, nil}
} }
// RuleDiscovery has info for all rules // RuleDiscovery has info for all rules.
type RuleDiscovery struct { type RuleDiscovery struct {
RuleGroups []*RuleGroup `json:"groups"` RuleGroups []*RuleGroup `json:"groups"`
} }
// RuleGroup has info for rules which are part of a group // RuleGroup has info for rules which are part of a group.
type RuleGroup struct { type RuleGroup struct {
Name string `json:"name"` Name string `json:"name"`
File string `json:"file"` File string `json:"file"`

View file

@ -179,7 +179,7 @@ type LocalStorage interface {
api_v1.TSDBAdminStats api_v1.TSDBAdminStats
} }
// Handler serves various HTTP endpoints of the Prometheus server // Handler serves various HTTP endpoints of the Prometheus server.
type Handler struct { type Handler struct {
logger log.Logger logger log.Logger
@ -215,7 +215,7 @@ type Handler struct {
ready atomic.Uint32 // ready is uint32 rather than boolean to be able to use atomic functions. ready atomic.Uint32 // ready is uint32 rather than boolean to be able to use atomic functions.
} }
// ApplyConfig updates the config field of the Handler struct // ApplyConfig updates the config field of the Handler struct.
func (h *Handler) ApplyConfig(conf *config.Config) error { func (h *Handler) ApplyConfig(conf *config.Config) error {
h.mtx.Lock() h.mtx.Lock()
defer h.mtx.Unlock() defer h.mtx.Unlock()
@ -522,7 +522,7 @@ func serveDebug(w http.ResponseWriter, req *http.Request) {
} }
} }
// SetReady sets the ready status of our web Handler // SetReady sets the ready status of our web Handler.
func (h *Handler) SetReady(v bool) { func (h *Handler) SetReady(v bool) {
if v { if v {
h.ready.Store(1) h.ready.Store(1)