mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
*: use latest release of staticcheck (#5057)
* *: use latest release of staticcheck It also fixes a couple of things in the code flagged by the additional checks. Signed-off-by: Simon Pasquier <spasquie@redhat.com> * Use official release of staticcheck Also run 'go list' before staticcheck to avoid failures when downloading packages. Signed-off-by: Simon Pasquier <spasquie@redhat.com>
This commit is contained in:
parent
b50ea4834f
commit
f678e27eb6
5
Makefile
5
Makefile
|
@ -14,13 +14,8 @@
|
||||||
include Makefile.common
|
include Makefile.common
|
||||||
|
|
||||||
STATICCHECK_IGNORE = \
|
STATICCHECK_IGNORE = \
|
||||||
github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go:SA1019 \
|
|
||||||
github.com/prometheus/prometheus/discovery/kubernetes/node.go:SA1019 \
|
|
||||||
github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/main.go:SA1019 \
|
|
||||||
github.com/prometheus/prometheus/pkg/textparse/promlex.l.go:SA4006 \
|
github.com/prometheus/prometheus/pkg/textparse/promlex.l.go:SA4006 \
|
||||||
github.com/prometheus/prometheus/pkg/textparse/openmetricslex.l.go:SA4006 \
|
github.com/prometheus/prometheus/pkg/textparse/openmetricslex.l.go:SA4006 \
|
||||||
github.com/prometheus/prometheus/pkg/pool/pool.go:SA6002 \
|
|
||||||
github.com/prometheus/prometheus/promql/engine.go:SA6002 \
|
|
||||||
github.com/prometheus/prometheus/prompb/rpc.pb.gw.go:SA1019
|
github.com/prometheus/prometheus/prompb/rpc.pb.gw.go:SA1019
|
||||||
|
|
||||||
DOCKER_IMAGE_NAME ?= prometheus
|
DOCKER_IMAGE_NAME ?= prometheus
|
||||||
|
|
|
@ -73,6 +73,8 @@ endif
|
||||||
|
|
||||||
PROMU_VERSION ?= 0.2.0
|
PROMU_VERSION ?= 0.2.0
|
||||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||||
|
STATICCHECK_VERSION ?= 2019.1
|
||||||
|
STATICCHECK_URL := https://github.com/dominikh/go-tools/releases/download/$(STATICCHECK_VERSION)/staticcheck_$(GOHOSTOS)_$(GOHOSTARCH)
|
||||||
|
|
||||||
PREFIX ?= $(shell pwd)
|
PREFIX ?= $(shell pwd)
|
||||||
BIN_DIR ?= $(shell pwd)
|
BIN_DIR ?= $(shell pwd)
|
||||||
|
@ -138,8 +140,12 @@ common-vet:
|
||||||
.PHONY: common-staticcheck
|
.PHONY: common-staticcheck
|
||||||
common-staticcheck: $(STATICCHECK)
|
common-staticcheck: $(STATICCHECK)
|
||||||
@echo ">> running staticcheck"
|
@echo ">> running staticcheck"
|
||||||
|
chmod +x $(STATICCHECK)
|
||||||
ifdef GO111MODULE
|
ifdef GO111MODULE
|
||||||
GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" -checks "SA*" $(pkgs)
|
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
|
||||||
|
# Otherwise staticcheck might fail randomly for some reason not yet explained.
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
|
||||||
|
GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
|
||||||
else
|
else
|
||||||
$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
|
$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
|
||||||
endif
|
endif
|
||||||
|
@ -200,21 +206,9 @@ proto:
|
||||||
@echo ">> generating code from proto files"
|
@echo ">> generating code from proto files"
|
||||||
@./scripts/genproto.sh
|
@./scripts/genproto.sh
|
||||||
|
|
||||||
.PHONY: $(STATICCHECK)
|
|
||||||
$(STATICCHECK):
|
$(STATICCHECK):
|
||||||
ifdef GO111MODULE
|
mkdir -p $(FIRST_GOPATH)/bin
|
||||||
# Get staticcheck from a temporary directory to avoid modifying the local go.{mod,sum}.
|
curl -s -L $(STATICCHECK_URL) > $(STATICCHECK)
|
||||||
# See https://github.com/golang/go/issues/27643.
|
|
||||||
# For now, we are using the next branch of staticcheck because master isn't compatible yet with Go modules.
|
|
||||||
tmpModule=$$(mktemp -d 2>&1) && \
|
|
||||||
mkdir -p $${tmpModule}/staticcheck && \
|
|
||||||
cd "$${tmpModule}"/staticcheck && \
|
|
||||||
GO111MODULE=on $(GO) mod init example.com/staticcheck && \
|
|
||||||
GO111MODULE=on GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck@next && \
|
|
||||||
rm -rf $${tmpModule};
|
|
||||||
else
|
|
||||||
GOOS= GOARCH= GO111MODULE=off $(GO) get -u honnef.co/go/tools/cmd/staticcheck
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifdef GOVENDOR
|
ifdef GOVENDOR
|
||||||
.PHONY: $(GOVENDOR)
|
.PHONY: $(GOVENDOR)
|
||||||
|
|
|
@ -22,12 +22,6 @@ import (
|
||||||
|
|
||||||
const filePerm = 0644
|
const filePerm = 0644
|
||||||
|
|
||||||
type archiver interface {
|
|
||||||
write(filename string, b []byte) error
|
|
||||||
close() error
|
|
||||||
filename() string
|
|
||||||
}
|
|
||||||
|
|
||||||
type tarGzFileWriter struct {
|
type tarGzFileWriter struct {
|
||||||
tarWriter *tar.Writer
|
tarWriter *tar.Writer
|
||||||
gzWriter *gzip.Writer
|
gzWriter *gzip.Writer
|
||||||
|
@ -72,7 +66,3 @@ func (w *tarGzFileWriter) write(filename string, b []byte) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *tarGzFileWriter) filename() string {
|
|
||||||
return w.file.Name()
|
|
||||||
}
|
|
||||||
|
|
|
@ -24,11 +24,6 @@ import (
|
||||||
|
|
||||||
const defaultTimeout = 2 * time.Minute
|
const defaultTimeout = 2 * time.Minute
|
||||||
|
|
||||||
type httpClient interface {
|
|
||||||
do(req *http.Request) (*http.Response, []byte, error)
|
|
||||||
urlJoin(path string) string
|
|
||||||
}
|
|
||||||
|
|
||||||
type prometheusHTTPClient struct {
|
type prometheusHTTPClient struct {
|
||||||
requestTimeout time.Duration
|
requestTimeout time.Duration
|
||||||
httpClient api.Client
|
httpClient api.Client
|
||||||
|
|
|
@ -84,7 +84,7 @@ func ruleUnitTest(filename string) []error {
|
||||||
groupOrderMap := make(map[string]int)
|
groupOrderMap := make(map[string]int)
|
||||||
for i, gn := range unitTestInp.GroupEvalOrder {
|
for i, gn := range unitTestInp.GroupEvalOrder {
|
||||||
if _, ok := groupOrderMap[gn]; ok {
|
if _, ok := groupOrderMap[gn]; ok {
|
||||||
return []error{fmt.Errorf("Group name repeated in evaluation order: %s", gn)}
|
return []error{fmt.Errorf("group name repeated in evaluation order: %s", gn)}
|
||||||
}
|
}
|
||||||
groupOrderMap[gn] = i
|
groupOrderMap[gn] = i
|
||||||
}
|
}
|
||||||
|
|
|
@ -756,23 +756,23 @@ var expectedErrors = []struct {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
filename: "azure_client_id_missing.bad.yml",
|
filename: "azure_client_id_missing.bad.yml",
|
||||||
errMsg: "Azure SD configuration requires a client_id",
|
errMsg: "azure SD configuration requires a client_id",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
filename: "azure_client_secret_missing.bad.yml",
|
filename: "azure_client_secret_missing.bad.yml",
|
||||||
errMsg: "Azure SD configuration requires a client_secret",
|
errMsg: "azure SD configuration requires a client_secret",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
filename: "azure_subscription_id_missing.bad.yml",
|
filename: "azure_subscription_id_missing.bad.yml",
|
||||||
errMsg: "Azure SD configuration requires a subscription_id",
|
errMsg: "azure SD configuration requires a subscription_id",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
filename: "azure_tenant_id_missing.bad.yml",
|
filename: "azure_tenant_id_missing.bad.yml",
|
||||||
errMsg: "Azure SD configuration requires a tenant_id",
|
errMsg: "azure SD configuration requires a tenant_id",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
filename: "azure_authentication_method.bad.yml",
|
filename: "azure_authentication_method.bad.yml",
|
||||||
errMsg: "Unknown authentication_type \"invalid\". Supported types are \"OAuth\" or \"ManagedIdentity\"",
|
errMsg: "unknown authentication_type \"invalid\". Supported types are \"OAuth\" or \"ManagedIdentity\"",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
filename: "empty_scrape_config.bad.yml",
|
filename: "empty_scrape_config.bad.yml",
|
||||||
|
|
|
@ -85,7 +85,7 @@ type SDConfig struct {
|
||||||
|
|
||||||
func validateAuthParam(param, name string) error {
|
func validateAuthParam(param, name string) error {
|
||||||
if len(param) == 0 {
|
if len(param) == 0 {
|
||||||
return fmt.Errorf("Azure SD configuration requires a %s", name)
|
return fmt.Errorf("azure SD configuration requires a %s", name)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -116,7 +116,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.AuthenticationMethod != authMethodOAuth && c.AuthenticationMethod != authMethodManagedIdentity {
|
if c.AuthenticationMethod != authMethodOAuth && c.AuthenticationMethod != authMethodManagedIdentity {
|
||||||
return fmt.Errorf("Unknown authentication_type %q. Supported types are %q or %q", c.AuthenticationMethod, authMethodOAuth, authMethodManagedIdentity)
|
return fmt.Errorf("unknown authentication_type %q. Supported types are %q or %q", c.AuthenticationMethod, authMethodOAuth, authMethodManagedIdentity)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -131,7 +131,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if strings.TrimSpace(c.Server) == "" {
|
if strings.TrimSpace(c.Server) == "" {
|
||||||
return fmt.Errorf("Consul SD configuration requires a server address")
|
return fmt.Errorf("consul SD configuration requires a server address")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -267,7 +267,7 @@ func (d *Discovery) getDatacenter() error {
|
||||||
|
|
||||||
dc, ok := info["Config"]["Datacenter"].(string)
|
dc, ok := info["Config"]["Datacenter"].(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
err := fmt.Errorf("Invalid value '%v' for Config.Datacenter", info["Config"]["Datacenter"])
|
err := fmt.Errorf("invalid value '%v' for Config.Datacenter", info["Config"]["Datacenter"])
|
||||||
level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err)
|
level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -178,7 +178,7 @@ func convertToEndpoints(o interface{}) (*apiv1.Endpoints, error) {
|
||||||
return endpoints, nil
|
return endpoints, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("Received unexpected object: %v", o)
|
return nil, fmt.Errorf("received unexpected object: %v", o)
|
||||||
}
|
}
|
||||||
|
|
||||||
func endpointsSource(ep *apiv1.Endpoints) string {
|
func endpointsSource(ep *apiv1.Endpoints) string {
|
||||||
|
|
|
@ -118,7 +118,7 @@ func convertToIngress(o interface{}) (*v1beta1.Ingress, error) {
|
||||||
return ingress, nil
|
return ingress, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("Received unexpected object: %v", o)
|
return nil, fmt.Errorf("received unexpected object: %v", o)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ingressSource(s *v1beta1.Ingress) string {
|
func ingressSource(s *v1beta1.Ingress) string {
|
||||||
|
|
|
@ -80,7 +80,7 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
case RoleNode, RolePod, RoleService, RoleEndpoint, RoleIngress:
|
case RoleNode, RolePod, RoleService, RoleEndpoint, RoleIngress:
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("Unknown Kubernetes SD role %q", *c)
|
return fmt.Errorf("unknown Kubernetes SD role %q", *c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -127,7 +127,7 @@ func convertToNode(o interface{}) (*apiv1.Node, error) {
|
||||||
return node, nil
|
return node, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("Received unexpected object: %v", o)
|
return nil, fmt.Errorf("received unexpected object: %v", o)
|
||||||
}
|
}
|
||||||
|
|
||||||
func nodeSource(n *apiv1.Node) string {
|
func nodeSource(n *apiv1.Node) string {
|
||||||
|
|
|
@ -131,7 +131,7 @@ func convertToPod(o interface{}) (*apiv1.Pod, error) {
|
||||||
return pod, nil
|
return pod, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("Received unexpected object: %v", o)
|
return nil, fmt.Errorf("received unexpected object: %v", o)
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
|
@ -123,7 +123,7 @@ func convertToService(o interface{}) (*apiv1.Service, error) {
|
||||||
if ok {
|
if ok {
|
||||||
return service, nil
|
return service, nil
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("Received unexpected object: %v", o)
|
return nil, fmt.Errorf("received unexpected object: %v", o)
|
||||||
}
|
}
|
||||||
|
|
||||||
func serviceSource(s *apiv1.Service) string {
|
func serviceSource(s *apiv1.Service) string {
|
||||||
|
|
|
@ -356,7 +356,7 @@ func fetchApps(client *http.Client, url string) (*AppList, error) {
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
if (resp.StatusCode < 200) || (resp.StatusCode >= 300) {
|
if (resp.StatusCode < 200) || (resp.StatusCode >= 300) {
|
||||||
return nil, fmt.Errorf("Non 2xx status '%v' response during marathon service discovery", resp.StatusCode)
|
return nil, fmt.Errorf("non 2xx status '%v' response during marathon service discovery", resp.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
var apps AppList
|
var apps AppList
|
||||||
|
|
|
@ -104,7 +104,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
return fmt.Errorf("role missing (one of: instance, hypervisor)")
|
return fmt.Errorf("role missing (one of: instance, hypervisor)")
|
||||||
}
|
}
|
||||||
if c.Region == "" {
|
if c.Region == "" {
|
||||||
return fmt.Errorf("Openstack SD configuration requires a region")
|
return fmt.Errorf("openstack SD configuration requires a region")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,6 @@ const (
|
||||||
tritonLabelMachineBrand = tritonLabel + "machine_brand"
|
tritonLabelMachineBrand = tritonLabel + "machine_brand"
|
||||||
tritonLabelMachineImage = tritonLabel + "machine_image"
|
tritonLabelMachineImage = tritonLabel + "machine_image"
|
||||||
tritonLabelServerID = tritonLabel + "server_id"
|
tritonLabelServerID = tritonLabel + "server_id"
|
||||||
namespace = "prometheus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -84,16 +83,16 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if c.Account == "" {
|
if c.Account == "" {
|
||||||
return fmt.Errorf("Triton SD configuration requires an account")
|
return fmt.Errorf("triton SD configuration requires an account")
|
||||||
}
|
}
|
||||||
if c.DNSSuffix == "" {
|
if c.DNSSuffix == "" {
|
||||||
return fmt.Errorf("Triton SD configuration requires a dns_suffix")
|
return fmt.Errorf("triton SD configuration requires a dns_suffix")
|
||||||
}
|
}
|
||||||
if c.Endpoint == "" {
|
if c.Endpoint == "" {
|
||||||
return fmt.Errorf("Triton SD configuration requires an endpoint")
|
return fmt.Errorf("triton SD configuration requires an endpoint")
|
||||||
}
|
}
|
||||||
if c.RefreshInterval <= 0 {
|
if c.RefreshInterval <= 0 {
|
||||||
return fmt.Errorf("Triton SD configuration requires RefreshInterval to be a positive integer")
|
return fmt.Errorf("triton SD configuration requires RefreshInterval to be a positive integer")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,18 +43,12 @@ var (
|
||||||
addressLabel = model.MetaLabelPrefix + "consul_address"
|
addressLabel = model.MetaLabelPrefix + "consul_address"
|
||||||
// nodeLabel is the name for the label containing a target's node name.
|
// nodeLabel is the name for the label containing a target's node name.
|
||||||
nodeLabel = model.MetaLabelPrefix + "consul_node"
|
nodeLabel = model.MetaLabelPrefix + "consul_node"
|
||||||
// metaDataLabel is the prefix for the labels mapping to a target's metadata.
|
|
||||||
metaDataLabel = model.MetaLabelPrefix + "consul_metadata_"
|
|
||||||
// tagsLabel is the name of the label containing the tags assigned to the target.
|
// tagsLabel is the name of the label containing the tags assigned to the target.
|
||||||
tagsLabel = model.MetaLabelPrefix + "consul_tags"
|
tagsLabel = model.MetaLabelPrefix + "consul_tags"
|
||||||
// serviceLabel is the name of the label containing the service name.
|
|
||||||
serviceLabel = model.MetaLabelPrefix + "consul_service"
|
|
||||||
// serviceAddressLabel is the name of the label containing the (optional) service address.
|
// serviceAddressLabel is the name of the label containing the (optional) service address.
|
||||||
serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address"
|
serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address"
|
||||||
//servicePortLabel is the name of the label containing the service port.
|
//servicePortLabel is the name of the label containing the service port.
|
||||||
servicePortLabel = model.MetaLabelPrefix + "consul_service_port"
|
servicePortLabel = model.MetaLabelPrefix + "consul_service_port"
|
||||||
// datacenterLabel is the name of the label containing the datacenter ID.
|
|
||||||
datacenterLabel = model.MetaLabelPrefix + "consul_dc"
|
|
||||||
// serviceIDLabel is the name of the label containing the service ID.
|
// serviceIDLabel is the name of the label containing the service ID.
|
||||||
serviceIDLabel = model.MetaLabelPrefix + "consul_service_id"
|
serviceIDLabel = model.MetaLabelPrefix + "consul_service_id"
|
||||||
)
|
)
|
||||||
|
@ -89,12 +83,11 @@ type sdConfig struct {
|
||||||
// Note: This is the struct with your implementation of the Discoverer interface (see Run function).
|
// Note: This is the struct with your implementation of the Discoverer interface (see Run function).
|
||||||
// Discovery retrieves target information from a Consul server and updates them via watches.
|
// Discovery retrieves target information from a Consul server and updates them via watches.
|
||||||
type discovery struct {
|
type discovery struct {
|
||||||
address string
|
address string
|
||||||
refreshInterval int
|
refreshInterval int
|
||||||
clientDatacenter string
|
tagSeparator string
|
||||||
tagSeparator string
|
logger log.Logger
|
||||||
logger log.Logger
|
oldSourceList map[string]bool
|
||||||
oldSourceList map[string]bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*targetgroup.Group, error) {
|
func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*targetgroup.Group, error) {
|
||||||
|
|
|
@ -175,7 +175,7 @@ func (q *query) Exec(ctx context.Context) *Result {
|
||||||
span.SetTag(queryTag, q.stmt.String())
|
span.SetTag(queryTag, q.stmt.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err, warnings := q.ng.exec(ctx, q)
|
res, warnings, err := q.ng.exec(ctx, q)
|
||||||
return &Result{Err: err, Value: res, Warnings: warnings}
|
return &Result{Err: err, Value: res, Warnings: warnings}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -353,7 +353,7 @@ func (ng *Engine) newTestQuery(f func(context.Context) error) Query {
|
||||||
//
|
//
|
||||||
// At this point per query only one EvalStmt is evaluated. Alert and record
|
// At this point per query only one EvalStmt is evaluated. Alert and record
|
||||||
// statements are not handled by the Engine.
|
// statements are not handled by the Engine.
|
||||||
func (ng *Engine) exec(ctx context.Context, q *query) (Value, error, storage.Warnings) {
|
func (ng *Engine) exec(ctx context.Context, q *query) (Value, storage.Warnings, error) {
|
||||||
ng.metrics.currentQueries.Inc()
|
ng.metrics.currentQueries.Inc()
|
||||||
defer ng.metrics.currentQueries.Dec()
|
defer ng.metrics.currentQueries.Dec()
|
||||||
|
|
||||||
|
@ -366,7 +366,7 @@ func (ng *Engine) exec(ctx context.Context, q *query) (Value, error, storage.War
|
||||||
queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime)
|
queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime)
|
||||||
|
|
||||||
if err := ng.gate.Start(ctx); err != nil {
|
if err := ng.gate.Start(ctx); err != nil {
|
||||||
return nil, contextErr(err, "query queue"), nil
|
return nil, nil, contextErr(err, "query queue")
|
||||||
}
|
}
|
||||||
defer ng.gate.Done()
|
defer ng.gate.Done()
|
||||||
|
|
||||||
|
@ -382,14 +382,14 @@ func (ng *Engine) exec(ctx context.Context, q *query) (Value, error, storage.War
|
||||||
|
|
||||||
// The base context might already be canceled on the first iteration (e.g. during shutdown).
|
// The base context might already be canceled on the first iteration (e.g. during shutdown).
|
||||||
if err := contextDone(ctx, env); err != nil {
|
if err := contextDone(ctx, env); err != nil {
|
||||||
return nil, err, nil
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
switch s := q.Statement().(type) {
|
switch s := q.Statement().(type) {
|
||||||
case *EvalStmt:
|
case *EvalStmt:
|
||||||
return ng.execEvalStmt(ctx, q, s)
|
return ng.execEvalStmt(ctx, q, s)
|
||||||
case testStmt:
|
case testStmt:
|
||||||
return nil, s(ctx), nil
|
return nil, nil, s(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
panic(fmt.Errorf("promql.Engine.exec: unhandled statement of type %T", q.Statement()))
|
panic(fmt.Errorf("promql.Engine.exec: unhandled statement of type %T", q.Statement()))
|
||||||
|
@ -404,9 +404,9 @@ func durationMilliseconds(d time.Duration) int64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
// execEvalStmt evaluates the expression of an evaluation statement for the given time range.
|
// execEvalStmt evaluates the expression of an evaluation statement for the given time range.
|
||||||
func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *EvalStmt) (Value, error, storage.Warnings) {
|
func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *EvalStmt) (Value, storage.Warnings, error) {
|
||||||
prepareSpanTimer, ctxPrepare := query.stats.GetSpanTimer(ctx, stats.QueryPreparationTime, ng.metrics.queryPrepareTime)
|
prepareSpanTimer, ctxPrepare := query.stats.GetSpanTimer(ctx, stats.QueryPreparationTime, ng.metrics.queryPrepareTime)
|
||||||
querier, err, warnings := ng.populateSeries(ctxPrepare, query.queryable, s)
|
querier, warnings, err := ng.populateSeries(ctxPrepare, query.queryable, s)
|
||||||
prepareSpanTimer.Finish()
|
prepareSpanTimer.Finish()
|
||||||
|
|
||||||
// XXX(fabxc): the querier returned by populateSeries might be instantiated
|
// XXX(fabxc): the querier returned by populateSeries might be instantiated
|
||||||
|
@ -417,7 +417,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *EvalStmt) (
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err, warnings
|
return nil, warnings, err
|
||||||
}
|
}
|
||||||
|
|
||||||
evalSpanTimer, _ := query.stats.GetSpanTimer(ctx, stats.InnerEvalTime, ng.metrics.queryInnerEval)
|
evalSpanTimer, _ := query.stats.GetSpanTimer(ctx, stats.InnerEvalTime, ng.metrics.queryInnerEval)
|
||||||
|
@ -435,7 +435,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *EvalStmt) (
|
||||||
}
|
}
|
||||||
val, err := evaluator.Eval(s.Expr)
|
val, err := evaluator.Eval(s.Expr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err, warnings
|
return nil, warnings, err
|
||||||
}
|
}
|
||||||
|
|
||||||
evalSpanTimer.Finish()
|
evalSpanTimer.Finish()
|
||||||
|
@ -454,11 +454,11 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *EvalStmt) (
|
||||||
// timestamp as that is when we ran the evaluation.
|
// timestamp as that is when we ran the evaluation.
|
||||||
vector[i] = Sample{Metric: s.Metric, Point: Point{V: s.Points[0].V, T: start}}
|
vector[i] = Sample{Metric: s.Metric, Point: Point{V: s.Points[0].V, T: start}}
|
||||||
}
|
}
|
||||||
return vector, nil, warnings
|
return vector, warnings, nil
|
||||||
case ValueTypeScalar:
|
case ValueTypeScalar:
|
||||||
return Scalar{V: mat[0].Points[0].V, T: start}, nil, warnings
|
return Scalar{V: mat[0].Points[0].V, T: start}, warnings, nil
|
||||||
case ValueTypeMatrix:
|
case ValueTypeMatrix:
|
||||||
return mat, nil, warnings
|
return mat, warnings, nil
|
||||||
default:
|
default:
|
||||||
panic(fmt.Errorf("promql.Engine.exec: unexpected expression type %q", s.Expr.Type()))
|
panic(fmt.Errorf("promql.Engine.exec: unexpected expression type %q", s.Expr.Type()))
|
||||||
}
|
}
|
||||||
|
@ -477,7 +477,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *EvalStmt) (
|
||||||
}
|
}
|
||||||
val, err := evaluator.Eval(s.Expr)
|
val, err := evaluator.Eval(s.Expr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err, warnings
|
return nil, warnings, err
|
||||||
}
|
}
|
||||||
evalSpanTimer.Finish()
|
evalSpanTimer.Finish()
|
||||||
|
|
||||||
|
@ -488,7 +488,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *EvalStmt) (
|
||||||
query.matrix = mat
|
query.matrix = mat
|
||||||
|
|
||||||
if err := contextDone(ctx, "expression evaluation"); err != nil {
|
if err := contextDone(ctx, "expression evaluation"); err != nil {
|
||||||
return nil, err, warnings
|
return nil, warnings, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(fabxc): order ensured by storage?
|
// TODO(fabxc): order ensured by storage?
|
||||||
|
@ -497,7 +497,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *EvalStmt) (
|
||||||
sort.Sort(mat)
|
sort.Sort(mat)
|
||||||
sortSpanTimer.Finish()
|
sortSpanTimer.Finish()
|
||||||
|
|
||||||
return mat, nil, warnings
|
return mat, warnings, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// cumulativeSubqueryOffset returns the sum of range and offset of all subqueries in the path.
|
// cumulativeSubqueryOffset returns the sum of range and offset of all subqueries in the path.
|
||||||
|
@ -512,7 +512,7 @@ func (ng *Engine) cumulativeSubqueryOffset(path []Node) time.Duration {
|
||||||
return subqOffset
|
return subqOffset
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ng *Engine) populateSeries(ctx context.Context, q storage.Queryable, s *EvalStmt) (storage.Querier, error, storage.Warnings) {
|
func (ng *Engine) populateSeries(ctx context.Context, q storage.Queryable, s *EvalStmt) (storage.Querier, storage.Warnings, error) {
|
||||||
var maxOffset time.Duration
|
var maxOffset time.Duration
|
||||||
Inspect(s.Expr, func(node Node, path []Node) error {
|
Inspect(s.Expr, func(node Node, path []Node) error {
|
||||||
subqOffset := ng.cumulativeSubqueryOffset(path)
|
subqOffset := ng.cumulativeSubqueryOffset(path)
|
||||||
|
@ -539,7 +539,7 @@ func (ng *Engine) populateSeries(ctx context.Context, q storage.Queryable, s *Ev
|
||||||
|
|
||||||
querier, err := q.Querier(ctx, timestamp.FromTime(mint), timestamp.FromTime(s.End))
|
querier, err := q.Querier(ctx, timestamp.FromTime(mint), timestamp.FromTime(s.End))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err, nil
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var warnings storage.Warnings
|
var warnings storage.Warnings
|
||||||
|
@ -592,7 +592,7 @@ func (ng *Engine) populateSeries(ctx context.Context, q storage.Queryable, s *Ev
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
return querier, err, warnings
|
return querier, warnings, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// extractFuncFromPath walks up the path and searches for the first instance of
|
// extractFuncFromPath walks up the path and searches for the first instance of
|
||||||
|
@ -1243,6 +1243,7 @@ func getPointSlice(sz int) []Point {
|
||||||
}
|
}
|
||||||
|
|
||||||
func putPointSlice(p []Point) {
|
func putPointSlice(p []Point) {
|
||||||
|
//lint:ignore SA6002 relax staticcheck verification.
|
||||||
pointPool.Put(p[:0])
|
pointPool.Put(p[:0])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1665,21 +1666,6 @@ func vectorElemBinop(op ItemType, lhs, rhs float64) (float64, bool) {
|
||||||
panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op))
|
panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op))
|
||||||
}
|
}
|
||||||
|
|
||||||
// intersection returns the metric of common label/value pairs of two input metrics.
|
|
||||||
func intersection(ls1, ls2 labels.Labels) labels.Labels {
|
|
||||||
res := make(labels.Labels, 0, 5)
|
|
||||||
|
|
||||||
for _, l1 := range ls1 {
|
|
||||||
for _, l2 := range ls2 {
|
|
||||||
if l1.Name == l2.Name && l1.Value == l2.Value {
|
|
||||||
res = append(res, l1)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
type groupedAggregation struct {
|
type groupedAggregation struct {
|
||||||
labels labels.Labels
|
labels labels.Labels
|
||||||
value float64
|
value float64
|
||||||
|
|
|
@ -155,7 +155,7 @@ func (node *MatrixSelector) String() string {
|
||||||
func (node *SubqueryExpr) String() string {
|
func (node *SubqueryExpr) String() string {
|
||||||
step := ""
|
step := ""
|
||||||
if node.Step != 0 {
|
if node.Step != 0 {
|
||||||
step = fmt.Sprintf("%s", model.Duration(node.Step))
|
step = model.Duration(node.Step).String()
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%s[%s:%s]", node.Expr.String(), model.Duration(node.Range), step)
|
return fmt.Sprintf("%s[%s:%s]", node.Expr.String(), model.Duration(node.Range), step)
|
||||||
}
|
}
|
||||||
|
|
|
@ -311,10 +311,7 @@ func (ssi *storageSeriesIterator) At() (t int64, v float64) {
|
||||||
|
|
||||||
func (ssi *storageSeriesIterator) Next() bool {
|
func (ssi *storageSeriesIterator) Next() bool {
|
||||||
ssi.curr++
|
ssi.curr++
|
||||||
if ssi.curr >= len(ssi.points) {
|
return ssi.curr < len(ssi.points)
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ssi *storageSeriesIterator) Err() error {
|
func (ssi *storageSeriesIterator) Err() error {
|
||||||
|
|
|
@ -206,10 +206,6 @@ func (r *AlertingRule) Annotations() labels.Labels {
|
||||||
return r.annotations
|
return r.annotations
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *AlertingRule) equal(o *AlertingRule) bool {
|
|
||||||
return r.name == o.name && labels.Equal(r.labels, o.labels)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample {
|
func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample {
|
||||||
lb := labels.NewBuilder(r.labels)
|
lb := labels.NewBuilder(r.labels)
|
||||||
|
|
||||||
|
|
|
@ -927,6 +927,7 @@ type sample struct {
|
||||||
v float64
|
v float64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//lint:ignore U1000 staticcheck falsely reports that samples is unused.
|
||||||
type samples []sample
|
type samples []sample
|
||||||
|
|
||||||
func (s samples) Len() int { return len(s) }
|
func (s samples) Len() int { return len(s) }
|
||||||
|
|
|
@ -302,13 +302,13 @@ func (c *concreteSeriesIterator) Err() error {
|
||||||
func validateLabelsAndMetricName(ls labels.Labels) error {
|
func validateLabelsAndMetricName(ls labels.Labels) error {
|
||||||
for _, l := range ls {
|
for _, l := range ls {
|
||||||
if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) {
|
if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) {
|
||||||
return fmt.Errorf("Invalid metric name: %v", l.Value)
|
return fmt.Errorf("invalid metric name: %v", l.Value)
|
||||||
}
|
}
|
||||||
if !model.LabelName(l.Name).IsValid() {
|
if !model.LabelName(l.Name).IsValid() {
|
||||||
return fmt.Errorf("Invalid label name: %v", l.Name)
|
return fmt.Errorf("invalid label name: %v", l.Name)
|
||||||
}
|
}
|
||||||
if !model.LabelValue(l.Value).IsValid() {
|
if !model.LabelValue(l.Value).IsValid() {
|
||||||
return fmt.Errorf("Invalid label value: %v", l.Value)
|
return fmt.Errorf("invalid label value: %v", l.Value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -401,10 +401,6 @@ func (t *QueueManager) newShards(numShards int) *shards {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *shards) len() int {
|
|
||||||
return len(s.queues)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *shards) start() {
|
func (s *shards) start() {
|
||||||
for i := 0; i < len(s.queues); i++ {
|
for i := 0; i < len(s.queues); i++ {
|
||||||
go s.runShard(i)
|
go s.runShard(i)
|
||||||
|
@ -426,7 +422,6 @@ func (s *shards) stop(deadline time.Duration) {
|
||||||
// Force an unclean shutdown.
|
// Force an unclean shutdown.
|
||||||
s.cancel()
|
s.cancel()
|
||||||
<-s.done
|
<-s.done
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *shards) enqueue(sample *model.Sample) bool {
|
func (s *shards) enqueue(sample *model.Sample) bool {
|
||||||
|
|
|
@ -323,7 +323,7 @@ func TestShutdown(t *testing.T) {
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
m.Stop()
|
m.Stop()
|
||||||
duration := time.Now().Sub(start)
|
duration := time.Since(start)
|
||||||
if duration > deadline+(deadline/10) {
|
if duration > deadline+(deadline/10) {
|
||||||
t.Errorf("Took too long to shutdown: %s > %s", duration, deadline)
|
t.Errorf("Took too long to shutdown: %s > %s", duration, deadline)
|
||||||
}
|
}
|
||||||
|
|
|
@ -118,7 +118,7 @@ func TestCompressionHandler_Gzip(t *testing.T) {
|
||||||
t.Error("unexpected error while reading from response body")
|
t.Error("unexpected error while reading from response body")
|
||||||
}
|
}
|
||||||
|
|
||||||
actual := string(buf.Bytes())
|
actual := buf.String()
|
||||||
expected := "Hello World!"
|
expected := "Hello World!"
|
||||||
if expected != actual {
|
if expected != actual {
|
||||||
t.Errorf("expected response with content %s, but got %s", expected, actual)
|
t.Errorf("expected response with content %s, but got %s", expected, actual)
|
||||||
|
@ -168,7 +168,7 @@ func TestCompressionHandler_Deflate(t *testing.T) {
|
||||||
t.Error("unexpected error while reading from response body")
|
t.Error("unexpected error while reading from response body")
|
||||||
}
|
}
|
||||||
|
|
||||||
actual := string(buf.Bytes())
|
actual := buf.String()
|
||||||
expected := "Hello World!"
|
expected := "Hello World!"
|
||||||
if expected != actual {
|
if expected != actual {
|
||||||
t.Errorf("expected response with content %s, but got %s", expected, actual)
|
t.Errorf("expected response with content %s, but got %s", expected, actual)
|
||||||
|
|
|
@ -63,12 +63,11 @@ func (zl ZookeeperLogger) Printf(s string, i ...interface{}) {
|
||||||
// A ZookeeperTreeCache keeps data from all children of a Zookeeper path
|
// A ZookeeperTreeCache keeps data from all children of a Zookeeper path
|
||||||
// locally cached and updated according to received events.
|
// locally cached and updated according to received events.
|
||||||
type ZookeeperTreeCache struct {
|
type ZookeeperTreeCache struct {
|
||||||
conn *zk.Conn
|
conn *zk.Conn
|
||||||
prefix string
|
prefix string
|
||||||
events chan ZookeeperTreeCacheEvent
|
events chan ZookeeperTreeCacheEvent
|
||||||
zkEvents chan zk.Event
|
stop chan struct{}
|
||||||
stop chan struct{}
|
head *zookeeperTreeCacheNode
|
||||||
head *zookeeperTreeCacheNode
|
|
||||||
|
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
}
|
}
|
||||||
|
|
|
@ -923,7 +923,7 @@ func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
func (api *API) deleteSeries(r *http.Request) apiFuncResult {
|
func (api *API) deleteSeries(r *http.Request) apiFuncResult {
|
||||||
if !api.enableAdmin {
|
if !api.enableAdmin {
|
||||||
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("Admin APIs disabled")}, nil, nil}
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
|
||||||
}
|
}
|
||||||
db := api.db()
|
db := api.db()
|
||||||
if db == nil {
|
if db == nil {
|
||||||
|
@ -980,7 +980,7 @@ func (api *API) deleteSeries(r *http.Request) apiFuncResult {
|
||||||
|
|
||||||
func (api *API) snapshot(r *http.Request) apiFuncResult {
|
func (api *API) snapshot(r *http.Request) apiFuncResult {
|
||||||
if !api.enableAdmin {
|
if !api.enableAdmin {
|
||||||
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("Admin APIs disabled")}, nil, nil}
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
skipHead bool
|
skipHead bool
|
||||||
|
@ -1019,7 +1019,7 @@ func (api *API) snapshot(r *http.Request) apiFuncResult {
|
||||||
|
|
||||||
func (api *API) cleanTombstones(r *http.Request) apiFuncResult {
|
func (api *API) cleanTombstones(r *http.Request) apiFuncResult {
|
||||||
if !api.enableAdmin {
|
if !api.enableAdmin {
|
||||||
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("Admin APIs disabled")}, nil, nil}
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
|
||||||
}
|
}
|
||||||
db := api.db()
|
db := api.db()
|
||||||
if db == nil {
|
if db == nil {
|
||||||
|
|
|
@ -36,7 +36,6 @@ import (
|
||||||
"github.com/prometheus/tsdb"
|
"github.com/prometheus/tsdb"
|
||||||
tsdbLabels "github.com/prometheus/tsdb/labels"
|
tsdbLabels "github.com/prometheus/tsdb/labels"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
|
||||||
"github.com/prometheus/prometheus/pkg/timestamp"
|
"github.com/prometheus/prometheus/pkg/timestamp"
|
||||||
pb "github.com/prometheus/prometheus/prompb"
|
pb "github.com/prometheus/prometheus/prompb"
|
||||||
)
|
)
|
||||||
|
@ -114,16 +113,6 @@ var (
|
||||||
maxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999)
|
maxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999)
|
||||||
)
|
)
|
||||||
|
|
||||||
func labelsToProto(lset labels.Labels) pb.Labels {
|
|
||||||
r := pb.Labels{
|
|
||||||
Labels: make([]pb.Label, 0, len(lset)),
|
|
||||||
}
|
|
||||||
for _, l := range lset {
|
|
||||||
r.Labels = append(r.Labels, pb.Label{Name: l.Name, Value: l.Value})
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// AdminDisabled implements the administration interface that informs
|
// AdminDisabled implements the administration interface that informs
|
||||||
// that the API endpoints are disabled.
|
// that the API endpoints are disabled.
|
||||||
type AdminDisabled struct {
|
type AdminDisabled struct {
|
||||||
|
|
|
@ -90,7 +90,7 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) {
|
||||||
s, wrns, err := q.Select(params, mset...)
|
s, wrns, err := q.Select(params, mset...)
|
||||||
if wrns != nil {
|
if wrns != nil {
|
||||||
level.Debug(h.logger).Log("msg", "federation select returned warnings", "warnings", wrns)
|
level.Debug(h.logger).Log("msg", "federation select returned warnings", "warnings", wrns)
|
||||||
federationErrors.Add(float64(len(wrns)))
|
federationWarnings.Add(float64(len(wrns)))
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
federationErrors.Inc()
|
federationErrors.Inc()
|
||||||
|
|
24
web/web.go
24
web/web.go
|
@ -124,20 +124,18 @@ type Handler struct {
|
||||||
|
|
||||||
apiV1 *api_v1.API
|
apiV1 *api_v1.API
|
||||||
|
|
||||||
router *route.Router
|
router *route.Router
|
||||||
quitCh chan struct{}
|
quitCh chan struct{}
|
||||||
reloadCh chan chan error
|
reloadCh chan chan error
|
||||||
options *Options
|
options *Options
|
||||||
config *config.Config
|
config *config.Config
|
||||||
configString string
|
versionInfo *PrometheusVersion
|
||||||
versionInfo *PrometheusVersion
|
birth time.Time
|
||||||
birth time.Time
|
cwd string
|
||||||
cwd string
|
flagsMap map[string]string
|
||||||
flagsMap map[string]string
|
|
||||||
|
|
||||||
externalLabels model.LabelSet
|
mtx sync.RWMutex
|
||||||
mtx sync.RWMutex
|
now func() model.Time
|
||||||
now func() model.Time
|
|
||||||
|
|
||||||
ready uint32 // ready is uint32 rather than boolean to be able to use atomic functions.
|
ready uint32 // ready is uint32 rather than boolean to be able to use atomic functions.
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue