From 5393ec22cb8dccae49232dca2dce4531cfc3d633 Mon Sep 17 00:00:00 2001 From: David Leadbeater Date: Fri, 9 Oct 2020 12:53:20 +0100 Subject: [PATCH 1/9] promtool: Don't end alert tests early, in some failure situations If an alert test had a failing test, then any other alert test interval specified after that point would result in the test exiting early. This made debugging some tests more difficult than needed. Now only exit early for evaluation failures. Signed-off-by: David Leadbeater --- cmd/promtool/unittest.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index cbdb8931ef..ed2aad260e 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -234,6 +234,7 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou var errs []error for ts := mint; ts.Before(maxt); ts = ts.Add(evalInterval) { // Collects the alerts asked for unit testing. + var evalErrs []error suite.WithSamplesTill(ts, func(err error) { if err != nil { errs = append(errs, err) @@ -243,13 +244,16 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou g.Eval(suite.Context(), ts) for _, r := range g.Rules() { if r.LastError() != nil { - errs = append(errs, errors.Errorf(" rule: %s, time: %s, err: %v", + evalErrs = append(evalErrs, errors.Errorf(" rule: %s, time: %s, err: %v", r.Name(), ts.Sub(time.Unix(0, 0).UTC()), r.LastError())) } } } }) - if len(errs) > 0 { + errs = append(errs, evalErrs...) + // Only end testing at this point if errors occurred evaluating above, + // rather than any test failures already collected in errs. + if len(evalErrs) > 0 { return errs } From 8787f0aed79c5feffb0b18b6051e8ddd91d7bbce Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Thu, 18 Feb 2021 23:14:49 +0100 Subject: [PATCH 2/9] Update common to support credentials type Most of the backwards compat tests is done in common. Signed-off-by: Julien Pivotto --- config/config.go | 2 +- config/config_test.go | 28 ++- config/testdata/conf.good.yml | 12 +- ...kubernetes_authorization_basicauth.bad.yml | 13 ++ ...tes_http_config_without_api_server.bad.yml | 3 +- .../marathon_authtoken_authorization.bad.yml | 10 + config/testdata/roundtrip.good.yml | 3 +- discovery/digitalocean/digitalocean.go | 2 +- discovery/dockerswarm/dockerswarm.go | 2 +- discovery/hetzner/hetzner.go | 2 +- discovery/marathon/marathon.go | 3 + docs/configuration/configuration.md | 176 +++++++++++------- .../examples/prometheus-digitalocean.yml | 3 +- documentation/examples/prometheus-hetzner.yml | 6 +- .../examples/prometheus-kubernetes.yml | 15 +- go.mod | 2 +- go.sum | 2 + 17 files changed, 191 insertions(+), 93 deletions(-) create mode 100644 config/testdata/kubernetes_authorization_basicauth.bad.yml create mode 100644 config/testdata/marathon_authtoken_authorization.bad.yml diff --git a/config/config.go b/config/config.go index 8cb0e5e44e..118b13d483 100644 --- a/config/config.go +++ b/config/config.go @@ -618,7 +618,7 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err } for header := range c.Headers { if strings.ToLower(header) == "authorization" { - return errors.New("authorization header must be changed via the basic_auth, bearer_token, or bearer_token_file parameter") + return errors.New("authorization header must be changed via the basic_auth or authorization parameter") } if _, ok := unchangeableHeaders[strings.ToLower(header)]; ok { return errors.Errorf("%s is an unchangeable header", header) diff --git a/config/config_test.go b/config/config_test.go index bca8c98a34..bccae5cea2 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -141,7 +141,10 @@ var expectedConf = &Config{ Scheme: DefaultScrapeConfig.Scheme, HTTPClientConfig: config.HTTPClientConfig{ - BearerTokenFile: filepath.FromSlash("testdata/valid_token_file"), + Authorization: &config.Authorization{ + Type: "Bearer", + CredentialsFile: filepath.FromSlash("testdata/valid_token_file"), + }, }, ServiceDiscoveryConfigs: discovery.Configs{ @@ -344,7 +347,10 @@ var expectedConf = &Config{ KeyFile: filepath.FromSlash("testdata/valid_key_file"), }, - BearerToken: "mysecret", + Authorization: &config.Authorization{ + Type: "Bearer", + Credentials: "mysecret", + }, }, }, { @@ -603,7 +609,10 @@ var expectedConf = &Config{ ServiceDiscoveryConfigs: discovery.Configs{ &digitalocean.SDConfig{ HTTPClientConfig: config.HTTPClientConfig{ - BearerToken: "abcdef", + Authorization: &config.Authorization{ + Type: "Bearer", + Credentials: "abcdef", + }, }, Port: 80, RefreshInterval: model.Duration(60 * time.Second), @@ -665,7 +674,10 @@ var expectedConf = &Config{ ServiceDiscoveryConfigs: discovery.Configs{ &hetzner.SDConfig{ HTTPClientConfig: config.HTTPClientConfig{ - BearerToken: "abcdef", + Authorization: &config.Authorization{ + Type: "Bearer", + Credentials: "abcdef", + }, }, Port: 80, RefreshInterval: model.Duration(60 * time.Second), @@ -919,6 +931,9 @@ var expectedErrors = []struct { }, { filename: "kubernetes_bearertoken_basicauth.bad.yml", errMsg: "at most one of basic_auth, bearer_token & bearer_token_file must be configured", + }, { + filename: "kubernetes_authorization_basicauth.bad.yml", + errMsg: "at most one of basic_auth & authorization must be configured", }, { filename: "marathon_no_servers.bad.yml", errMsg: "marathon_sd: must contain at least one Marathon server", @@ -931,6 +946,9 @@ var expectedErrors = []struct { }, { filename: "marathon_authtoken_bearertoken.bad.yml", errMsg: "marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured", + }, { + filename: "marathon_authtoken_authorization.bad.yml", + errMsg: "marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured", }, { filename: "openstack_role.bad.yml", errMsg: "unknown OpenStack SD role", @@ -957,7 +975,7 @@ var expectedErrors = []struct { errMsg: `x-prometheus-remote-write-version is an unchangeable header`, }, { filename: "remote_write_authorization_header.bad.yml", - errMsg: `authorization header must be changed via the basic_auth, bearer_token, or bearer_token_file parameter`, + errMsg: `authorization header must be changed via the basic_auth or authorization parameter`, }, { filename: "remote_write_url_missing.bad.yml", errMsg: `url for remote_write is empty`, diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 6b71ca0dab..33548f477f 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -79,7 +79,8 @@ scrape_configs: replacement: static target_label: abc - bearer_token_file: valid_token_file + authorization: + credentials_file: valid_token_file - job_name: service-x @@ -158,7 +159,8 @@ scrape_configs: cert_file: valid_cert_file key_file: valid_key_file - bearer_token: mysecret + authorization: + credentials: mysecret - job_name: service-kubernetes @@ -263,7 +265,8 @@ scrape_configs: - job_name: digitalocean-droplets digitalocean_sd_configs: - - bearer_token: abcdef + - authorization: + credentials: abcdef - job_name: dockerswarm dockerswarm_sd_configs: @@ -284,7 +287,8 @@ scrape_configs: - job_name: hetzner hetzner_sd_configs: - role: hcloud - bearer_token: abcdef + authorization: + credentials: abcdef - role: robot basic_auth: username: abcdef diff --git a/config/testdata/kubernetes_authorization_basicauth.bad.yml b/config/testdata/kubernetes_authorization_basicauth.bad.yml new file mode 100644 index 0000000000..cfa39bd6d8 --- /dev/null +++ b/config/testdata/kubernetes_authorization_basicauth.bad.yml @@ -0,0 +1,13 @@ +scrape_configs: + - job_name: prometheus + + kubernetes_sd_configs: + - role: pod + api_server: 'https://localhost:1234' + + authorization: + credentials: 1234 + basic_auth: + username: user + password: password + diff --git a/config/testdata/kubernetes_http_config_without_api_server.bad.yml b/config/testdata/kubernetes_http_config_without_api_server.bad.yml index db442c3bd1..30cc3c3d7d 100644 --- a/config/testdata/kubernetes_http_config_without_api_server.bad.yml +++ b/config/testdata/kubernetes_http_config_without_api_server.bad.yml @@ -2,4 +2,5 @@ scrape_configs: - job_name: prometheus kubernetes_sd_configs: - role: pod - bearer_token: 1234 + authorization: + credentials: 1234 diff --git a/config/testdata/marathon_authtoken_authorization.bad.yml b/config/testdata/marathon_authtoken_authorization.bad.yml new file mode 100644 index 0000000000..d3112b12c3 --- /dev/null +++ b/config/testdata/marathon_authtoken_authorization.bad.yml @@ -0,0 +1,10 @@ +scrape_configs: + - job_name: prometheus + + marathon_sd_configs: + - servers: + - 'https://localhost:1234' + + auth_token: 1234 + authorization: + credentials: 4567 diff --git a/config/testdata/roundtrip.good.yml b/config/testdata/roundtrip.good.yml index 4aa3c432ef..ab8ed81407 100644 --- a/config/testdata/roundtrip.good.yml +++ b/config/testdata/roundtrip.good.yml @@ -53,7 +53,8 @@ scrape_configs: key_file: valid_key_file digitalocean_sd_configs: - - bearer_token: + - authorization: + credentials: dockerswarm_sd_configs: - host: http://127.0.0.1:2375 diff --git a/discovery/digitalocean/digitalocean.go b/discovery/digitalocean/digitalocean.go index a824668312..e67afe8cb5 100644 --- a/discovery/digitalocean/digitalocean.go +++ b/discovery/digitalocean/digitalocean.go @@ -89,7 +89,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if err != nil { return err } - return nil + return c.HTTPClientConfig.Validate() } // Discovery periodically performs DigitalOcean requests. It implements diff --git a/discovery/dockerswarm/dockerswarm.go b/discovery/dockerswarm/dockerswarm.go index 2e0b477cd7..92539bf974 100644 --- a/discovery/dockerswarm/dockerswarm.go +++ b/discovery/dockerswarm/dockerswarm.go @@ -102,7 +102,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { default: return fmt.Errorf("invalid role %s, expected tasks, services, or nodes", c.Role) } - return nil + return c.HTTPClientConfig.Validate() } // Discovery periodically performs Docker Swarm requests. It implements diff --git a/discovery/hetzner/hetzner.go b/discovery/hetzner/hetzner.go index 9d1e3b264d..22f88cc2e5 100644 --- a/discovery/hetzner/hetzner.go +++ b/discovery/hetzner/hetzner.go @@ -110,7 +110,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if c.Role == "" { return errors.New("role missing (one of: robot, hcloud)") } - return nil + return c.HTTPClientConfig.Validate() } // Discovery periodically performs Hetzner requests. It implements diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index efd4769e3a..9f046d1969 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -111,6 +111,9 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if (len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0) && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) { return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured") } + if c.HTTPClientConfig.Authorization != nil && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) { + return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured") + } return c.HTTPClientConfig.Validate() } diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 0aaf605b24..cd657035b7 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -169,12 +169,16 @@ basic_auth: [ password_file: ] # Sets the `Authorization` header on every scrape request with -# the configured bearer token. It is mutually exclusive with `bearer_token_file`. -[ bearer_token: ] - -# Sets the `Authorization` header on every scrape request with the bearer token -# read from the configured file. It is mutually exclusive with `bearer_token`. -[ bearer_token_file: ] +# the configured credentials. +authorization: + # Sets the authentication type of the request. + [ type: | default: Bearer ] + # Sets the credentials of the request. It is mutually exclusive with + # `credentials_file`. + [ credentials: ] + # Sets the credentials of the request with the credentials read from the + # configured file. It is mutually exclusive with `credentials`. + [ credentials_file: ] # Configures the scrape request's TLS settings. tls_config: @@ -436,7 +440,7 @@ The following meta labels are available on targets during [relabeling](#relabel_ ```yaml # Authentication information used to authenticate to the API server. -# Note that `basic_auth`, `bearer_token` and `bearer_token_file` options are +# Note that `basic_auth` and `authorization` options are # mutually exclusive. # password and password_file are mutually exclusive. @@ -446,11 +450,16 @@ basic_auth: [ password: ] [ password_file: ] -# Optional bearer token authentication information. -[ bearer_token: ] - -# Optional bearer token file authentication information. -[ bearer_token_file: ] +# Optional the `Authorization` header configuration. +authorization: + # Sets the authentication type. + [ type: | default: Bearer ] + # Sets the credentials. It is mutually exclusive with + # `credentials_file`. + [ credentials: ] + # Sets the credentials with the credentials read from the configured file. + # It is mutually exclusive with `credentials`. + [ credentials_file: ] # Optional proxy URL. [ proxy_url: ] @@ -592,7 +601,7 @@ role: [ refresh_interval: | default = 60s ] # Authentication information used to authenticate to the Docker daemon. -# Note that `basic_auth`, `bearer_token` and `bearer_token_file` options are +# Note that `basic_auth` and `authorization` options are # mutually exclusive. # password and password_file are mutually exclusive. @@ -602,11 +611,16 @@ basic_auth: [ password: ] [ password_file: ] -# Optional bearer token authentication information. -[ bearer_token: ] - -# Optional bearer token file authentication information. -[ bearer_token_file: ] +# Optional the `Authorization` header configuration. +authorization: + # Sets the authentication type. + [ type: | default: Bearer ] + # Sets the credentials. It is mutually exclusive with + # `credentials_file`. + [ credentials: ] + # Sets the credentials with the credentials read from the configured file. + # It is mutually exclusive with `credentials`. + [ credentials_file: ] ``` The [relabeling phase](#relabel_config) is the preferred and more powerful @@ -989,7 +1003,7 @@ The labels below are only available for targets with `role` set to `robot`: role: # Authentication information used to authenticate to the API server. -# Note that `basic_auth`, `bearer_token` and `bearer_token_file` options are +# Note that `basic_auth` and `authorization` options are # mutually exclusive. # password and password_file are mutually exclusive. @@ -1000,12 +1014,17 @@ basic_auth: [ password: ] [ password_file: ] -# Optional bearer token authentication information, required when role is hcloud -# Role robot does not support bearer token authentication. -[ bearer_token: ] - -# Optional bearer token file authentication information. -[ bearer_token_file: ] +# Optional the `Authorization` header configuration. required when role is +# hcloud. Role robot does not support bearer token authentication. +authorization: + # Sets the authentication type. + [ type: | default: Bearer ] + # Sets the credentials. It is mutually exclusive with + # `credentials_file`. + [ credentials: ] + # Sets the credentials with the credentials read from the configured file. + # It is mutually exclusive with `credentials`. + [ credentials_file: ] # Optional proxy URL. [ proxy_url: ] @@ -1154,7 +1173,7 @@ See below for the configuration options for Kubernetes discovery: role: # Optional authentication information used to authenticate to the API server. -# Note that `basic_auth`, `bearer_token` and `bearer_token_file` options are +# Note that `basic_auth` and `authorization` options are # mutually exclusive. # password and password_file are mutually exclusive. @@ -1164,11 +1183,16 @@ basic_auth: [ password: ] [ password_file: ] -# Optional bearer token authentication information. -[ bearer_token: ] - -# Optional bearer token file authentication information. -[ bearer_token_file: ] +# Optional the `Authorization` header configuration. +authorization: + # Sets the authentication type. + [ type: | default: Bearer ] + # Sets the credentials. It is mutually exclusive with + # `credentials_file`. + [ credentials: ] + # Sets the credentials with the credentials read from the configured file. + # It is mutually exclusive with `credentials`. + [ credentials_file: ] # Optional proxy URL. [ proxy_url: ] @@ -1253,15 +1277,19 @@ basic_auth: [ password: ] [ password_file: ] -# Sets the `Authorization` header on every request with -# the configured bearer token. It is mutually exclusive with `bearer_token_file` and other authentication mechanisms. -# NOTE: The current version of DC/OS marathon (v1.11.0) does not support standard Bearer token authentication. Use `auth_token` instead. -[ bearer_token: ] - -# Sets the `Authorization` header on every request with the bearer token -# read from the configured file. It is mutually exclusive with `bearer_token` and other authentication mechanisms. -# NOTE: The current version of DC/OS marathon (v1.11.0) does not support standard Bearer token authentication. Use `auth_token_file` instead. -[ bearer_token_file: ] +# Optional the `Authorization` header configuration. +# NOTE: The current version of DC/OS marathon (v1.11.0) does not support +# standard `Authentication` header, use `auth_token` or `auth_token_file` +# instead. +authorization: + # Sets the authentication type. + [ type: | default: Bearer ] + # Sets the credentials. It is mutually exclusive with + # `credentials_file`. + [ credentials: ] + # Sets the credentials with the credentials read from the configured file. + # It is mutually exclusive with `credentials`. + [ credentials_file: ] # TLS configuration for connecting to marathon servers tls_config: @@ -1447,13 +1475,16 @@ basic_auth: [ password: ] [ password_file: ] -# Sets the `Authorization` header on every request with -# the configured bearer token. It is mutually exclusive with `bearer_token_file`. -[ bearer_token: ] - -# Sets the `Authorization` header on every request with the bearer token -# read from the configured file. It is mutually exclusive with `bearer_token`. -[ bearer_token_file: ] +# Optional the `Authorization` header configuration. +authorization: + # Sets the authentication type. + [ type: | default: Bearer ] + # Sets the credentials. It is mutually exclusive with + # `credentials_file`. + [ credentials: ] + # Sets the credentials with the credentials read from the configured file. + # It is mutually exclusive with `credentials`. + [ credentials_file: ] # Configures the scrape request's TLS settings. tls_config: @@ -1616,13 +1647,16 @@ basic_auth: [ password: ] [ password_file: ] -# Sets the `Authorization` header on every request with -# the configured bearer token. It is mutually exclusive with `bearer_token_file`. -[ bearer_token: ] - -# Sets the `Authorization` header on every request with the bearer token -# read from the configured file. It is mutually exclusive with `bearer_token`. -[ bearer_token_file: ] +# Optional the `Authorization` header configuration. +authorization: + # Sets the authentication type. + [ type: | default: Bearer ] + # Sets the credentials. It is mutually exclusive with + # `credentials_file`. + [ credentials: ] + # Sets the credentials with the credentials read from the configured file. + # It is mutually exclusive with `credentials`. + [ credentials_file: ] # Configures the scrape request's TLS settings. tls_config: @@ -1742,13 +1776,16 @@ basic_auth: [ password: ] [ password_file: ] -# Sets the `Authorization` header on every remote write request with -# the configured bearer token. It is mutually exclusive with `bearer_token_file`. -[ bearer_token: ] - -# Sets the `Authorization` header on every remote write request with the bearer token -# read from the configured file. It is mutually exclusive with `bearer_token`. -[ bearer_token_file: ] +# Optional the `Authorization` header configuration. +authorization: + # Sets the authentication type. + [ type: | default: Bearer ] + # Sets the credentials. It is mutually exclusive with + # `credentials_file`. + [ credentials: ] + # Sets the credentials with the credentials read from the configured file. + # It is mutually exclusive with `credentials`. + [ credentials_file: ] # Configures the remote write request's TLS settings. tls_config: @@ -1825,13 +1862,16 @@ basic_auth: [ password: ] [ password_file: ] -# Sets the `Authorization` header on every remote read request with -# the configured bearer token. It is mutually exclusive with `bearer_token_file`. -[ bearer_token: ] - -# Sets the `Authorization` header on every remote read request with the bearer token -# read from the configured file. It is mutually exclusive with `bearer_token`. -[ bearer_token_file: ] +# Optional the `Authorization` header configuration. +authorization: + # Sets the authentication type. + [ type: | default: Bearer ] + # Sets the credentials. It is mutually exclusive with + # `credentials_file`. + [ credentials: ] + # Sets the credentials with the credentials read from the configured file. + # It is mutually exclusive with `credentials`. + [ credentials_file: ] # Configures the remote read request's TLS settings. tls_config: diff --git a/documentation/examples/prometheus-digitalocean.yml b/documentation/examples/prometheus-digitalocean.yml index bb8ee1744c..b1ed4ed730 100644 --- a/documentation/examples/prometheus-digitalocean.yml +++ b/documentation/examples/prometheus-digitalocean.yml @@ -12,7 +12,8 @@ scrape_configs: - job_name: 'node' digitalocean_sd_configs: - - bearer_token: "" + - authorization: + credentials: "" relabel_configs: # Only scrape targets that have a tag 'monitoring'. - source_labels: [__meta_digitalocean_tags] diff --git a/documentation/examples/prometheus-hetzner.yml b/documentation/examples/prometheus-hetzner.yml index 158327bf11..4632f66927 100644 --- a/documentation/examples/prometheus-hetzner.yml +++ b/documentation/examples/prometheus-hetzner.yml @@ -12,7 +12,8 @@ scrape_configs: - job_name: 'node' hetzner_sd_configs: - - bearer_token: "" + - authorization: + credentials: "" platform: "hcloud" relabel_configs: # Use the public IPv4 and port 9100 to scrape the target. @@ -24,7 +25,8 @@ scrape_configs: - job_name: 'node_private' hetzner_sd_configs: - - bearer_token: "" + - authorization: + credentials: "" platform: "hcloud" relabel_configs: # Use the private IPv4 within the Hetzner Cloud Network and port 9100 to scrape the target. diff --git a/documentation/examples/prometheus-kubernetes.yml b/documentation/examples/prometheus-kubernetes.yml index dc702870bb..9f5fe05281 100644 --- a/documentation/examples/prometheus-kubernetes.yml +++ b/documentation/examples/prometheus-kubernetes.yml @@ -25,7 +25,7 @@ scrape_configs: # `http`. scheme: https - # This TLS & bearer token file config is used to connect to the actual scrape + # This TLS & authorization config is used to connect to the actual scrape # endpoints for cluster components. This is separate to discovery auth # configuration because discovery & scraping are two separate concerns in # Prometheus. The discovery auth config is automatic if Prometheus runs inside @@ -40,7 +40,8 @@ scrape_configs: # disable certificate verification by uncommenting the line below. # # insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token # Keep only the default/kubernetes service endpoints for the https port. This # will add targets for each API server which Kubernetes adds an endpoint to @@ -62,7 +63,7 @@ scrape_configs: # `http`. scheme: https - # This TLS & bearer token file config is used to connect to the actual scrape + # This TLS & authorization config is used to connect to the actual scrape # endpoints for cluster components. This is separate to discovery auth # configuration because discovery & scraping are two separate concerns in # Prometheus. The discovery auth config is automatic if Prometheus runs inside @@ -77,7 +78,8 @@ scrape_configs: # disable certificate verification by uncommenting the line below. # # insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token kubernetes_sd_configs: - role: node @@ -112,7 +114,7 @@ scrape_configs: # are used. metrics_path: /metrics/cadvisor - # This TLS & bearer token file config is used to connect to the actual scrape + # This TLS & authorization config is used to connect to the actual scrape # endpoints for cluster components. This is separate to discovery auth # configuration because discovery & scraping are two separate concerns in # Prometheus. The discovery auth config is automatic if Prometheus runs inside @@ -127,7 +129,8 @@ scrape_configs: # disable certificate verification by uncommenting the line below. # # insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token kubernetes_sd_configs: - role: node diff --git a/go.mod b/go.mod index 51cb409acc..f322974315 100644 --- a/go.mod +++ b/go.mod @@ -48,7 +48,7 @@ require ( github.com/prometheus/alertmanager v0.21.0 github.com/prometheus/client_golang v1.9.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.15.0 + github.com/prometheus/common v0.17.0 github.com/prometheus/exporter-toolkit v0.5.1 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 diff --git a/go.sum b/go.sum index a3a989b87f..e640af6988 100644 --- a/go.sum +++ b/go.sum @@ -715,6 +715,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.17.0 h1:kDIZLI74SS+3tedSvEkykgBkD7txMxaJAPj8DtJUKYA= +github.com/prometheus/common v0.17.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/exporter-toolkit v0.5.1 h1:9eqgis5er9xN613ZSADjypCJaDGj9ZlcWBvsIHa8/3c= github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= From 889dd0bbd3217c0ebdb53ff3bab13dc5e4763ef6 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Thu, 18 Feb 2021 23:56:27 +0100 Subject: [PATCH 3/9] Fix DB tests in the default branch The main branch tests are not passing due to the fact that #8489 was not rebased on top of #8007. Signed-off-by: Julien Pivotto --- tsdb/db_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 4849153de2..502f2c5564 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -1115,7 +1115,7 @@ func TestTombstoneCleanResultEmptyBlock(t *testing.T) { smpls := make([]float64, numSamples) for i := int64(0); i < numSamples; i++ { smpls[i] = rand.Float64() - app.Add(labels.Labels{{Name: "a", Value: "b"}}, i, smpls[i]) + app.Append(0, labels.Labels{{Name: "a", Value: "b"}}, i, smpls[i]) } require.NoError(t, app.Commit()) From 42a0e0acad173f8aac8223f68b3ddc401919cef3 Mon Sep 17 00:00:00 2001 From: Danny Kopping Date: Fri, 19 Feb 2021 08:38:05 +0200 Subject: [PATCH 4/9] Prevent lexer from seeking to next rune after lexing escape sequence. (#8517) * Prevent lexer from seeking to next rune after lexing escape sequence. Signed-off-by: Danny Kopping --- promql/parser/lex.go | 6 +++++- promql/parser/parse_test.go | 10 ++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/promql/parser/lex.go b/promql/parser/lex.go index ece762dc4d..313bd8f88b 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -583,8 +583,12 @@ func lexEscape(l *Lexer) stateFn { return lexString } x = x*base + d - ch = l.next() n-- + + // Don't seek after last rune. + if n > 0 { + ch = l.next() + } } if x > max || 0xD800 <= x && x < 0xE000 { diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index 10096188af..d4203c16e5 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -3274,6 +3274,16 @@ var testSeries = []struct { input: `my_metric{a="b"} 1 2 3 `, expectedMetric: labels.FromStrings(labels.MetricName, "my_metric", "a", "b"), expectedValues: newSeq(1, 2, 3), + }, { + // Handle escaped unicode characters as whole label values. + input: `my_metric{a="\u70ac"} 1 2 3`, + expectedMetric: labels.FromStrings(labels.MetricName, "my_metric", "a", `炬`), + expectedValues: newSeq(1, 2, 3), + }, { + // Handle escaped unicode characters as partial label values. + input: `my_metric{a="\u70ac = torch"} 1 2 3`, + expectedMetric: labels.FromStrings(labels.MetricName, "my_metric", "a", `炬 = torch`), + expectedValues: newSeq(1, 2, 3), }, { input: `my_metric{a="b"} -3-3 -3`, fail: true, From d35cf369f213b893d88c52482978b6849891eb26 Mon Sep 17 00:00:00 2001 From: Lucas Hild Date: Fri, 19 Feb 2021 23:42:20 +0100 Subject: [PATCH 5/9] Add expression explorer (Closes #8211) (#8404) * Add expression explorer Signed-off-by: Lucas Hild * Add final new line to all files Signed-off-by: Lucas Hild * Rename expression to metric Signed-off-by: Lucas Hild * Pass dedicated metrics array to metrics explorer Signed-off-by: Lucas Hild * Fix styling of button Signed-off-by: Lucas Hild * Use append instead of prepend Signed-off-by: Lucas Hild * Update max width of modal Signed-off-by: Lucas Hild * Fix code style Signed-off-by: Lucas Hild * Fix inconsistent variable naming Signed-off-by: Lucas Hild * Fix modal title Signed-off-by: Lucas Hild * Fix tests Signed-off-by: Lucas Hild * Prevent request from being cached Signed-off-by: Lucas Hild * Remove timestamp from request Signed-off-by: Lucas Hild * Update button selector in test Signed-off-by: Lucas Hild * Refactor passing down metric names and query history Signed-off-by: Lucas Hild * Fix code style Signed-off-by: Lucas Hild --- .../src/pages/graph/ExpressionInput.test.tsx | 45 +++-- .../src/pages/graph/ExpressionInput.tsx | 169 ++++++++++++------ .../src/pages/graph/MetricsExplorer.css | 14 ++ .../src/pages/graph/MetricsExplorer.tsx | 38 ++++ .../react-app/src/pages/graph/Panel.test.tsx | 14 +- web/ui/react-app/src/pages/graph/Panel.tsx | 6 +- 6 files changed, 190 insertions(+), 96 deletions(-) create mode 100644 web/ui/react-app/src/pages/graph/MetricsExplorer.css create mode 100644 web/ui/react-app/src/pages/graph/MetricsExplorer.tsx diff --git a/web/ui/react-app/src/pages/graph/ExpressionInput.test.tsx b/web/ui/react-app/src/pages/graph/ExpressionInput.test.tsx index d532101a35..86970f59e6 100644 --- a/web/ui/react-app/src/pages/graph/ExpressionInput.test.tsx +++ b/web/ui/react-app/src/pages/graph/ExpressionInput.test.tsx @@ -19,10 +19,8 @@ describe('ExpressionInput', () => { const metricNames = ['instance:node_cpu_utilisation:rate1m', 'node_cpu_guest_seconds_total', 'node_cpu_seconds_total']; const expressionInputProps = { value: 'node_cpu', - autocompleteSections: { - 'Query History': [], - 'Metric Names': metricNames, - }, + queryHistory: [], + metricNames, executeQuery: (): void => { // Do nothing. }, @@ -133,14 +131,16 @@ describe('ExpressionInput', () => { describe('handleKeyPress', () => { it('should call executeQuery on Enter key pressed', () => { const spyExecuteQuery = jest.fn(); - const input = mount(); + const props = { ...expressionInputProps, executeQuery: spyExecuteQuery }; + const input = mount(); const instance: any = input.instance(); instance.handleKeyPress({ preventDefault: jest.fn, key: 'Enter' }); expect(spyExecuteQuery).toHaveBeenCalled(); }); it('should NOT call executeQuery on Enter + Shift', () => { const spyExecuteQuery = jest.fn(); - const input = mount(); + const props = { ...expressionInputProps, executeQuery: spyExecuteQuery }; + const input = mount(); const instance: any = input.instance(); instance.handleKeyPress({ preventDefault: jest.fn, key: 'Enter', shiftKey: true }); expect(spyExecuteQuery).not.toHaveBeenCalled(); @@ -159,8 +159,13 @@ describe('ExpressionInput', () => { }); describe('createAutocompleteSection', () => { + const props = { + ...expressionInputProps, + metricNames: ['foo', 'bar', 'baz'], + }; + it('should close menu if no matches found', () => { - const input = mount(); + const input = mount(); const instance: any = input.instance(); const spyCloseMenu = jest.fn(); instance.createAutocompleteSection({ inputValue: 'qqqqqq', closeMenu: spyCloseMenu }); @@ -168,34 +173,22 @@ describe('ExpressionInput', () => { expect(spyCloseMenu).toHaveBeenCalled(); }); }); - it('should not render lsit if inputValue not exist', () => { - const input = mount(); + it('should not render list if inputValue not exist', () => { + const input = mount(); const instance: any = input.instance(); const spyCloseMenu = jest.fn(); instance.createAutocompleteSection({ closeMenu: spyCloseMenu }); setTimeout(() => expect(spyCloseMenu).toHaveBeenCalled()); }); it('should not render list if enableAutocomplete is false', () => { - const input = mount( - - ); + const input = mount(); const instance: any = input.instance(); const spyCloseMenu = jest.fn(); instance.createAutocompleteSection({ closeMenu: spyCloseMenu }); setTimeout(() => expect(spyCloseMenu).toHaveBeenCalled()); }); it('should render autosuggest-dropdown', () => { - const input = mount( - - ); + const input = mount(); const instance: any = input.instance(); const spyGetMenuProps = jest.fn(); const sections = instance.createAutocompleteSection({ @@ -264,8 +257,10 @@ describe('ExpressionInput', () => { it('renders an execute Button', () => { const addon = expressionInput.find(InputGroupAddon).filterWhere(addon => addon.prop('addonType') === 'append'); - const button = addon.find(Button); - expect(button.prop('className')).toEqual('execute-btn'); + const button = addon + .find(Button) + .find('.execute-btn') + .first(); expect(button.prop('color')).toEqual('primary'); expect(button.text()).toEqual('Execute'); }); diff --git a/web/ui/react-app/src/pages/graph/ExpressionInput.tsx b/web/ui/react-app/src/pages/graph/ExpressionInput.tsx index 6bce6c2d21..ae675bc5f8 100644 --- a/web/ui/react-app/src/pages/graph/ExpressionInput.tsx +++ b/web/ui/react-app/src/pages/graph/ExpressionInput.tsx @@ -6,12 +6,14 @@ import fuzzy from 'fuzzy'; import sanitizeHTML from 'sanitize-html'; import { FontAwesomeIcon } from '@fortawesome/react-fontawesome'; -import { faSearch, faSpinner } from '@fortawesome/free-solid-svg-icons'; +import { faSearch, faSpinner, faGlobeEurope } from '@fortawesome/free-solid-svg-icons'; +import MetricsExplorer from './MetricsExplorer'; interface ExpressionInputProps { value: string; onExpressionChange: (expr: string) => void; - autocompleteSections: { [key: string]: string[] }; + queryHistory: string[]; + metricNames: string[]; executeQuery: () => void; loading: boolean; enableAutocomplete: boolean; @@ -19,6 +21,7 @@ interface ExpressionInputProps { interface ExpressionInputState { height: number | string; + showMetricsExplorer: boolean; } class ExpressionInput extends Component { @@ -28,6 +31,7 @@ class ExpressionInput extends Component) => { const { inputValue = '', closeMenu, highlightedIndex } = downshift; - const { autocompleteSections } = this.props; + const autocompleteSections = { + 'Query History': this.props.queryHistory, + 'Metric Names': this.props.metricNames, + }; let index = 0; const sections = inputValue!.length && this.props.enableAutocomplete @@ -125,67 +132,111 @@ class ExpressionInput extends Component { + this.setState({ + showMetricsExplorer: true, + }); + }; + + updateShowMetricsExplorer = (show: boolean) => { + this.setState({ + showMetricsExplorer: show, + }); + }; + + insertAtCursor = (value: string) => { + if (!this.exprInputRef.current) return; + + const startPosition = this.exprInputRef.current.selectionStart; + const endPosition = this.exprInputRef.current.selectionEnd; + + const previousValue = this.exprInputRef.current.value; + let newValue: string; + if (startPosition && endPosition) { + newValue = + previousValue.substring(0, startPosition) + value + previousValue.substring(endPosition, previousValue.length); + } else { + newValue = previousValue + value; + } + + this.setValue(newValue); + }; + render() { const { executeQuery, value } = this.props; const { height } = this.state; return ( - - {downshift => ( -
- - - - {this.props.loading ? : } - - - { - switch (event.key) { - case 'Home': - case 'End': - // We want to be able to jump to the beginning/end of the input field. - // By default, Downshift otherwise jumps to the first/last suggestion item instead. - (event.nativeEvent as any).preventDownshiftDefault = true; - break; - case 'ArrowUp': - case 'ArrowDown': - if (!downshift.isOpen) { + <> + + {downshift => ( +
+ + + + {this.props.loading ? : } + + + { + switch (event.key) { + case 'Home': + case 'End': + // We want to be able to jump to the beginning/end of the input field. + // By default, Downshift otherwise jumps to the first/last suggestion item instead. (event.nativeEvent as any).preventDownshiftDefault = true; - } - break; - case 'Enter': - downshift.closeMenu(); - break; - case 'Escape': - if (!downshift.isOpen) { - this.exprInputRef.current!.blur(); - } - break; - default: - } - }, - } as any)} - value={value} - /> - - - - - {downshift.isOpen && this.createAutocompleteSection(downshift)} -
- )} -
+ break; + case 'ArrowUp': + case 'ArrowDown': + if (!downshift.isOpen) { + (event.nativeEvent as any).preventDownshiftDefault = true; + } + break; + case 'Enter': + downshift.closeMenu(); + break; + case 'Escape': + if (!downshift.isOpen) { + this.exprInputRef.current!.blur(); + } + break; + default: + } + }, + } as any)} + value={value} + /> + + + + + + +
+ {downshift.isOpen && this.createAutocompleteSection(downshift)} +
+ )} +
+ + + ); } } diff --git a/web/ui/react-app/src/pages/graph/MetricsExplorer.css b/web/ui/react-app/src/pages/graph/MetricsExplorer.css new file mode 100644 index 0000000000..44fc872a8c --- /dev/null +++ b/web/ui/react-app/src/pages/graph/MetricsExplorer.css @@ -0,0 +1,14 @@ +.metrics-explorer.modal-dialog { + max-width: 750px; + overflow-wrap: break-word; +} + +.metrics-explorer .metric { + cursor: pointer; + margin: 0; + padding: 5px; +} + +.metrics-explorer .metric:hover { + background: #efefef; +} diff --git a/web/ui/react-app/src/pages/graph/MetricsExplorer.tsx b/web/ui/react-app/src/pages/graph/MetricsExplorer.tsx new file mode 100644 index 0000000000..325cbfe66a --- /dev/null +++ b/web/ui/react-app/src/pages/graph/MetricsExplorer.tsx @@ -0,0 +1,38 @@ +import React, { Component } from 'react'; +import { Modal, ModalBody, ModalHeader } from 'reactstrap'; +import './MetricsExplorer.css'; + +interface Props { + show: boolean; + updateShow(show: boolean): void; + metrics: string[]; + insertAtCursor(value: string): void; +} + +class MetricsExplorer extends Component { + handleMetricClick = (query: string) => { + this.props.insertAtCursor(query); + this.props.updateShow(false); + }; + + toggle = () => { + this.props.updateShow(!this.props.show); + }; + + render() { + return ( + + Metrics Explorer + + {this.props.metrics.map(metric => ( +

+ {metric} +

+ ))} +
+
+ ); + } +} + +export default MetricsExplorer; diff --git a/web/ui/react-app/src/pages/graph/Panel.test.tsx b/web/ui/react-app/src/pages/graph/Panel.test.tsx index 0849e456f7..ed686f85f5 100644 --- a/web/ui/react-app/src/pages/graph/Panel.test.tsx +++ b/web/ui/react-app/src/pages/graph/Panel.test.tsx @@ -41,14 +41,12 @@ describe('Panel', () => { it('renders an ExpressionInput', () => { const input = panel.find(ExpressionInput); expect(input.prop('value')).toEqual('prometheus_engine'); - expect(input.prop('autocompleteSections')).toEqual({ - 'Metric Names': [ - 'prometheus_engine_queries', - 'prometheus_engine_queries_concurrent_max', - 'prometheus_engine_query_duration_seconds', - ], - 'Query History': [], - }); + expect(input.prop('metricNames')).toEqual([ + 'prometheus_engine_queries', + 'prometheus_engine_queries_concurrent_max', + 'prometheus_engine_query_duration_seconds', + ]); + expect(input.prop('queryHistory')).toEqual([]); }); it('renders NavLinks', () => { diff --git a/web/ui/react-app/src/pages/graph/Panel.tsx b/web/ui/react-app/src/pages/graph/Panel.tsx index 1ac0571157..7920072631 100644 --- a/web/ui/react-app/src/pages/graph/Panel.tsx +++ b/web/ui/react-app/src/pages/graph/Panel.tsx @@ -238,10 +238,8 @@ class Panel extends Component { executeQuery={this.executeQuery} loading={this.state.loading} enableAutocomplete={this.props.enableAutocomplete} - autocompleteSections={{ - 'Query History': pastQueries, - 'Metric Names': metricNames, - }} + queryHistory={pastQueries} + metricNames={metricNames} /> From aff3c702abbf13010ce4bd5688938b2ae5d14914 Mon Sep 17 00:00:00 2001 From: pschou Date: Sat, 20 Feb 2021 10:34:52 -0500 Subject: [PATCH 6/9] promql: Add sgn, clamp and last_over_time functions (#8457) * Add sgn, clamp and last_over_time functions Signed-off-by: schou --- docs/querying/functions.md | 14 +++++++++++ promql/engine.go | 13 ++++++---- promql/functions.go | 43 ++++++++++++++++++++++++++++++++- promql/parser/functions.go | 15 ++++++++++++ promql/testdata/functions.test | 44 +++++++++++++++++++++++++++++++++- 5 files changed, 123 insertions(+), 6 deletions(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 2adaecf00b..16a440165f 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -73,6 +73,15 @@ For each input time series, `changes(v range-vector)` returns the number of times its value has changed within the provided time range as an instant vector. +## `clamp()` + +`clamp(v instant-vector, min scalar, max scalar)` +clamps the sample values of all elements in `v` to have a lower limit of `min` and an upper limit of `max`. + +Special cases: +- Return an empty vector if `min > max` +- Return `NaN` if `min` or `max` is `NaN` + ## `clamp_max()` `clamp_max(v instant-vector, max scalar)` clamps the sample values of all @@ -370,6 +379,10 @@ Given a single-element input vector, `scalar(v instant-vector)` returns the sample value of that single element as a scalar. If the input vector does not have exactly one element, `scalar` will return `NaN`. +## `sgn()` + +`sgn(v instant-vector)` returns a vector with all sample values converted to their sign, defined as this: 1 if v is positive, -1 if v is negative and 0 if v is equal to zero. + ## `sort()` `sort(v instant-vector)` returns vector elements sorted by their sample values, @@ -418,6 +431,7 @@ over time and return an instant vector with per-series aggregation results: * `quantile_over_time(scalar, range-vector)`: the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified interval. * `stddev_over_time(range-vector)`: the population standard deviation of the values in the specified interval. * `stdvar_over_time(range-vector)`: the population standard variance of the values in the specified interval. +* `last_over_time(range-vector)`: the most recent point value in specified interval. Note that all values in the specified interval have the same weight in the aggregation even if the values are not equally spaced throughout the interval. diff --git a/promql/engine.go b/promql/engine.go index 2ed1014419..0923bedbc5 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1216,11 +1216,16 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { ev.currentSamples -= len(points) points = points[:0] it.Reset(s.Iterator()) + metric := selVS.Series[i].Labels() + // The last_over_time function acts like offset; thus, it + // should keep the metric name. For all the other range + // vector functions, the only change needed is to drop the + // metric name in the output. + if e.Func.Name != "last_over_time" { + metric = dropMetricName(metric) + } ss := Series{ - // For all range vector functions, the only change to the - // output labels is dropping the metric name so just do - // it once here. - Metric: dropMetricName(selVS.Series[i].Labels()), + Metric: metric, Points: getPointSlice(numSteps), } inMatrix[0].Metric = selVS.Series[i].Labels() diff --git a/promql/functions.go b/promql/functions.go index 3a96a9ecec..d96d625752 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -278,6 +278,23 @@ func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel return Vector(byValueSorter) } +// === clamp(Vector parser.ValueTypeVector, min, max Scalar) Vector === +func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + vec := vals[0].(Vector) + min := vals[1].(Vector)[0].Point.V + max := vals[2].(Vector)[0].Point.V + if max < min { + return enh.Out + } + for _, el := range vec { + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(el.Metric), + Point: Point{V: math.Max(min, math.Min(max, el.V))}, + }) + } + return enh.Out +} + // === clamp_max(Vector parser.ValueTypeVector, max Scalar) Vector === func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { vec := vals[0].(Vector) @@ -383,7 +400,16 @@ func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNo }) } -// === floor(Vector parser.ValueTypeVector) Vector === +// === last_over_time(Matrix parser.ValueTypeMatrix) Vector === +func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + el := vals[0].(Matrix)[0] + + return append(enh.Out, Sample{ + Metric: el.Metric, + Point: Point{V: el.Points[len(el.Points)-1].V}, + }) +} + // === max_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { return aggrOverTime(vals, enh, func(values []Point) float64 { @@ -537,6 +563,18 @@ func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper return simpleFunc(vals, enh, math.Log10) } +// === sgn(Vector parser.ValueTypeVector) Vector === +func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return simpleFunc(vals, enh, func(v float64) float64 { + if v < 0 { + return -1 + } else if v > 0 { + return 1 + } + return v + }) +} + // === timestamp(Vector parser.ValueTypeVector) Vector === func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { vec := vals[0].(Vector) @@ -893,6 +931,7 @@ var FunctionCalls = map[string]FunctionCall{ "avg_over_time": funcAvgOverTime, "ceil": funcCeil, "changes": funcChanges, + "clamp": funcClamp, "clamp_max": funcClampMax, "clamp_min": funcClampMin, "count_over_time": funcCountOverTime, @@ -914,6 +953,7 @@ var FunctionCalls = map[string]FunctionCall{ "ln": funcLn, "log10": funcLog10, "log2": funcLog2, + "last_over_time": funcLastOverTime, "max_over_time": funcMaxOverTime, "min_over_time": funcMinOverTime, "minute": funcMinute, @@ -924,6 +964,7 @@ var FunctionCalls = map[string]FunctionCall{ "resets": funcResets, "round": funcRound, "scalar": funcScalar, + "sgn": funcSgn, "sort": funcSort, "sort_desc": funcSortDesc, "sqrt": funcSqrt, diff --git a/promql/parser/functions.go b/promql/parser/functions.go index 4516829e55..a127cd28a4 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -54,6 +54,11 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeMatrix}, ReturnType: ValueTypeVector, }, + "clamp": { + Name: "clamp", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar, ValueTypeScalar}, + ReturnType: ValueTypeVector, + }, "clamp_max": { Name: "clamp_max", ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar}, @@ -149,6 +154,11 @@ var Functions = map[string]*Function{ Variadic: -1, ReturnType: ValueTypeVector, }, + "last_over_time": { + Name: "last_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, "ln": { Name: "ln", ArgTypes: []ValueType{ValueTypeVector}, @@ -217,6 +227,11 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeScalar, }, + "sgn": { + Name: "sgn", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, "sort": { Name: "sort", ArgTypes: []ValueType{ValueTypeVector}, diff --git a/promql/testdata/functions.test b/promql/testdata/functions.test index b9dc27cecb..63b67d181b 100644 --- a/promql/testdata/functions.test +++ b/promql/testdata/functions.test @@ -372,7 +372,7 @@ eval instant at 60m vector(time()) {} 3600 -# Tests for clamp_max and clamp_min(). +# Tests for clamp_max, clamp_min(), and clamp(). load 5m test_clamp{src="clamp-a"} -50 test_clamp{src="clamp-b"} 0 @@ -388,6 +388,11 @@ eval instant at 0m clamp_min(test_clamp, -25) {src="clamp-b"} 0 {src="clamp-c"} 100 +eval instant at 0m clamp(test_clamp, -25, 75) + {src="clamp-a"} -25 + {src="clamp-b"} 0 + {src="clamp-c"} 75 + eval instant at 0m clamp_max(clamp_min(test_clamp, -20), 70) {src="clamp-a"} -20 {src="clamp-b"} 0 @@ -398,6 +403,36 @@ eval instant at 0m clamp_max((clamp_min(test_clamp, (-20))), (70)) {src="clamp-b"} 0 {src="clamp-c"} 70 +eval instant at 0m clamp(test_clamp, 0, NaN) + {src="clamp-a"} NaN + {src="clamp-b"} NaN + {src="clamp-c"} NaN + +eval instant at 0m clamp(test_clamp, NaN, 0) + {src="clamp-a"} NaN + {src="clamp-b"} NaN + {src="clamp-c"} NaN + +eval instant at 0m clamp(test_clamp, 5, -5) + +# Test cases for sgn. +clear +load 5m + test_sgn{src="sgn-a"} -Inf + test_sgn{src="sgn-b"} Inf + test_sgn{src="sgn-c"} NaN + test_sgn{src="sgn-d"} -50 + test_sgn{src="sgn-e"} 0 + test_sgn{src="sgn-f"} 100 + +eval instant at 0m sgn(test_sgn) + {src="sgn-a"} -1 + {src="sgn-b"} 1 + {src="sgn-c"} NaN + {src="sgn-d"} -1 + {src="sgn-e"} 0 + {src="sgn-f"} 1 + # Tests for sort/sort_desc. clear @@ -745,6 +780,13 @@ eval instant at 1m max_over_time(data[1m]) {type="some_nan3"} 1 {type="only_nan"} NaN +eval instant at 1m last_over_time(data[1m]) + data{type="numbers"} 3 + data{type="some_nan"} NaN + data{type="some_nan2"} 1 + data{type="some_nan3"} 1 + data{type="only_nan"} NaN + clear # Test for absent() From 432d5ebc6c18367ca4045a29d20fa6aacdc7ec2a Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Mon, 22 Feb 2021 20:19:08 +0100 Subject: [PATCH 7/9] Rename default branch to main Signed-off-by: Julien Pivotto --- .circleci/config.yml | 8 ++++---- .github/workflows/codeql-analysis.yml | 4 ++-- NOTICE | 4 ++-- README.md | 10 +++++----- RELEASE.md | 12 ++++++------ docs/disabled_features.md | 2 +- docs/storage.md | 2 +- documentation/internal_architecture.md | 4 ++-- scripts/sync_repo_files.sh | 19 +++++++++++++++---- tsdb/CHANGELOG.md | 2 -- 10 files changed, 38 insertions(+), 29 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 552fa884c6..9e40ef36eb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -2,7 +2,7 @@ version: 2.1 orbs: - prometheus: prometheus/prometheus@0.4.0 + prometheus: prometheus/prometheus@0.9.0 go: circleci/go@0.2.0 win: circleci/windows@2.3.0 @@ -124,14 +124,14 @@ workflows: filters: tags: only: /.*/ - - prometheus/publish_master: + - prometheus/publish_main: context: org-context requires: - test - build filters: branches: - only: master + only: main image: circleci/golang:1-node - prometheus/publish_release: context: org-context @@ -151,7 +151,7 @@ workflows: filters: branches: only: - - master + - main jobs: - repo_sync: context: org-context diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 1bb0569056..3f1337d921 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -13,10 +13,10 @@ name: "CodeQL" on: push: - branches: [ master, release-* ] + branches: [ main, release-* ] pull_request: # The branches below must be a subset of the branches above - branches: [ master ] + branches: [ main ] schedule: - cron: '26 14 * * 1' diff --git a/NOTICE b/NOTICE index 5e4f509896..7c0e4c1020 100644 --- a/NOTICE +++ b/NOTICE @@ -92,7 +92,7 @@ Copyright (c) 2015,2016 Damian Gryski See https://github.com/dgryski/go-tsz/blob/master/LICENSE for license details. We also use code from a large number of npm packages. For details, see: -- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package.json -- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package-lock.json +- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package.json +- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package-lock.json - The individual package licenses as copied from the node_modules directory can be found in the npm_licenses.tar.bz2 archive in release tarballs and Docker images. diff --git a/README.md b/README.md index c84dffb7be..3090502962 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Prometheus -[![CircleCI](https://circleci.com/gh/prometheus/prometheus/tree/master.svg?style=shield)][circleci] +[![CircleCI](https://circleci.com/gh/prometheus/prometheus/tree/main.svg?style=shield)][circleci] [![Docker Repository on Quay](https://quay.io/repository/prometheus/prometheus/status)][quay] [![Docker Pulls](https://img.shields.io/docker/pulls/prom/prometheus.svg?maxAge=604800)][hub] [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/prometheus)](https://goreportcard.com/report/github.com/prometheus/prometheus) @@ -72,7 +72,7 @@ read its web assets from local filesystem directories under `web/ui/static` and from the root of the cloned repository. Note also that these directories do not include the new experimental React UI unless it has been built explicitly using `make assets` or `make build`. -An example of the above configuration file can be found [here.](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus.yml) +An example of the above configuration file can be found [here.](https://github.com/prometheus/prometheus/blob/main/documentation/examples/prometheus.yml) You can also clone the repository yourself and build using `make build`, which will compile in the web assets so that Prometheus can be run from anywhere: @@ -106,7 +106,7 @@ You can build a docker image locally with the following commands: ## React UI Development -For more information on building, running, and developing on the new React-based UI, see the React app's [README.md](https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/README.md). +For more information on building, running, and developing on the new React-based UI, see the React app's [README.md](web/ui/react-app/README.md). ## More information @@ -116,11 +116,11 @@ For more information on building, running, and developing on the new React-based ## Contributing -Refer to [CONTRIBUTING.md](https://github.com/prometheus/prometheus/blob/master/CONTRIBUTING.md) +Refer to [CONTRIBUTING.md](https://github.com/prometheus/prometheus/blob/main/CONTRIBUTING.md) ## License -Apache License 2.0, see [LICENSE](https://github.com/prometheus/prometheus/blob/master/LICENSE). +Apache License 2.0, see [LICENSE](https://github.com/prometheus/prometheus/blob/main/LICENSE). [hub]: https://hub.docker.com/r/prom/prometheus/ diff --git a/RELEASE.md b/RELEASE.md index fb53c6eade..cf0f7340a6 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -39,7 +39,7 @@ If you are interested in volunteering please create a pull request against the [ The release shepherd is responsible for the entire release series of a minor release, meaning all pre- and patch releases of a minor release. The process formally starts with the initial pre-release, but some preparations should be done a few days in advance. -* We aim to keep the master branch in a working state at all times. In principle, it should be possible to cut a release from master at any time. In practice, things might not work out as nicely. A few days before the pre-release is scheduled, the shepherd should check the state of master. Following their best judgement, the shepherd should try to expedite bug fixes that are still in progress but should make it into the release. On the other hand, the shepherd may hold back merging last-minute invasive and risky changes that are better suited for the next minor release. +* We aim to keep the main branch in a working state at all times. In principle, it should be possible to cut a release from main at any time. In practice, things might not work out as nicely. A few days before the pre-release is scheduled, the shepherd should check the state of main. Following their best judgement, the shepherd should try to expedite bug fixes that are still in progress but should make it into the release. On the other hand, the shepherd may hold back merging last-minute invasive and risky changes that are better suited for the next minor release. * On the date listed in the table above, the release shepherd cuts the first pre-release (using the suffix `-rc.0`) and creates a new branch called `release-.` starting at the commit tagged for the pre-release. In general, a pre-release is considered a release candidate (that's what `rc` stands for) and should therefore not contain any known bugs that are planned to be fixed in the final release. * With the pre-release, the release shepherd is responsible for running and monitoring a benchmark run of the pre-release for 3 days, after which, if successful, the pre-release is promoted to a stable release. * If regressions or critical bugs are detected, they need to get fixed before cutting a new pre-release (called `-rc.1`, `-rc.2`, etc.). @@ -58,9 +58,9 @@ We maintain a separate branch for each minor release, named `release-..` branch. Do not create `release-` for patch or release candidate releases. +At the start of a new major or minor release cycle create the corresponding release branch based on the main branch. For example if we're releasing `2.17.0` and the previous stable release is `2.16.0` we need to create a `release-2.17` branch. Note that all releases are handled in protected release branches, see the above `Branch management and versioning` section. Release candidates and patch releases for any given major or minor release happen in the same `release-.` branch. Do not create `release-` for patch or release candidate releases. Changes for a patch release or release candidate should be merged into the previously mentioned release branch via pull request. @@ -154,6 +154,6 @@ Finally, wait for the build step for the tag to finish. The point here is to wai For release candidate versions (`v2.16.0-rc.0`), run the benchmark for 3 days using the `/prombench vX.Y.Z` command, `vX.Y.Z` being the latest stable patch release's tag of the previous minor release series, such as `v2.15.2`. -If the release has happened in the latest release branch, merge the changes into master. +If the release has happened in the latest release branch, merge the changes into main. Once the binaries have been uploaded, announce the release on `prometheus-announce@googlegroups.com`. (Please do not use `prometheus-users@googlegroups.com` for announcements anymore.) Check out previous announcement mails for inspiration. diff --git a/docs/disabled_features.md b/docs/disabled_features.md index ec6363451b..431d7b5364 100644 --- a/docs/disabled_features.md +++ b/docs/disabled_features.md @@ -6,7 +6,7 @@ sort_rank: 10 # Disabled Features Here is a list of features that are disabled by default since they are breaking changes or are considered experimental. -Their behaviour can change in future releases which will be communicated via the [release changelog](https://github.com/prometheus/prometheus/blob/master/CHANGELOG.md). +Their behaviour can change in future releases which will be communicated via the [release changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md). You can enable them using the `--enable-feature` flag with a comma separated list of features. They may be enabled by default in future versions. diff --git a/docs/storage.md b/docs/storage.md index 6e395724e4..bc989b158c 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -125,7 +125,7 @@ For details on configuring remote storage integrations in Prometheus, see the [r The built-in remote write receiver can be enabled by setting the `--enable-feature=remote-write-receiver` command line flag. When enabled, the remote write receiver endpoint is `/api/v1/write`. -For details on the request and response messages, see the [remote storage protocol buffer definitions](https://github.com/prometheus/prometheus/blob/master/prompb/remote.proto). +For details on the request and response messages, see the [remote storage protocol buffer definitions](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto). Note that on the read path, Prometheus only fetches raw series data for a set of label selectors and time ranges from the remote end. All PromQL evaluation on the raw data still happens in Prometheus itself. This means that remote read queries have some scalability limit, since all necessary data needs to be loaded into the querying Prometheus server first and then processed there. However, supporting fully distributed evaluation of PromQL was deemed infeasible for the time being. diff --git a/documentation/internal_architecture.md b/documentation/internal_architecture.md index d14f65b9b1..1bbd8931a5 100644 --- a/documentation/internal_architecture.md +++ b/documentation/internal_architecture.md @@ -44,7 +44,7 @@ Internally, the scrape discovery manager runs an instance of each configuration- When a configuration change is applied, the discovery manager stops all currently running discovery mechanisms and restarts new ones as defined in the new configuration file. -For more details, see the more extensive [documentation about service discovery internals](https://github.com/prometheus/prometheus/blob/master/discovery/README.md). +For more details, see the more extensive [documentation about service discovery internals](https://github.com/prometheus/prometheus/blob/main/discovery/README.md). ## Scrape manager @@ -93,7 +93,7 @@ Currently rules still read and write directly from/to the fanout storage, but th ### Local storage -About Prometheus's local on-disk time series database, please refer to [`github.com/prometheus/prometheus/tsdb.DB`](https://github.com/prometheus/prometheus/blob/master/tsdb/db.go). You can find more details about the TSDB's on-disk layout in the [local storage documentation](https://prometheus.io/docs/prometheus/latest/storage/). +About Prometheus's local on-disk time series database, please refer to [`github.com/prometheus/prometheus/tsdb.DB`](https://github.com/prometheus/prometheus/blob/main/tsdb/db.go). You can find more details about the TSDB's on-disk layout in the [local storage documentation](https://prometheus.io/docs/prometheus/latest/storage/). ### Remote storage diff --git a/scripts/sync_repo_files.sh b/scripts/sync_repo_files.sh index 64af78b12c..e480848566 100755 --- a/scripts/sync_repo_files.sh +++ b/scripts/sync_repo_files.sh @@ -30,6 +30,11 @@ source_dir="$(pwd)" tmp_dir="$(mktemp -d)" trap 'rm -rf "${tmp_dir}"' EXIT +get_default_branch(){ + local url="https://api.github.com/repos/${1}" + curl --retry 5 --silent -u "${git_user}:${GITHUB_TOKEN}" "${url}" 2>/dev/null | jq -r .default_branch +} + fetch_repos() { local url="https://api.github.com/users/${1}/repos?per_page=100" curl --retry 5 --silent -u "${git_user}:${GITHUB_TOKEN}" "${url}" 2>/dev/null | @@ -47,9 +52,9 @@ push_branch() { --set-upstream "${branch}" 1>/dev/null 2>&1 } -post_template='{"title":"%s","base":"master","head":"%s","body":"%s"}' -post_json="$(printf "${post_template}" "${pr_title}" "${branch}" "${pr_msg}")" post_pull_request() { + post_template='{"title":"%s","base":"%s","head":"%s","body":"%s"}' + post_json="$(printf "${post_template}" "${pr_title}" "${2}" "${branch}" "${pr_msg}")" curl --show-error --silent --fail \ -u "${git_user}:${GITHUB_TOKEN}" \ -d "${post_json}" \ @@ -65,11 +70,17 @@ process_repo() { local org_repo="$1" echo -e "\e[32mAnalyzing '${org_repo}'\e[0m" + default_branch="$(get_default_branch ${1})" + if [[ -z "${target_file}" ]]; then + echo "Can't get the default branch." + return + fi + local needs_update=() for source_file in ${SYNC_FILES}; do source_checksum="$(sha256sum "${source_dir}/${source_file}" | cut -d' ' -f1)" - target_file="$(curl -s --fail "https://raw.githubusercontent.com/${org_repo}/master/${source_file}")" + target_file="$(curl -s --fail "https://raw.githubusercontent.com/${org_repo}/${default_branch}/${source_file}")" if [[ "${source_file}" == 'LICENSE' ]] && ! check_license "${target_file}" ; then echo "LICENSE in ${org_repo} is not apache, skipping." continue @@ -113,7 +124,7 @@ process_repo() { git add . git commit -s -m "${commit_msg}" if push_branch "${org_repo}"; then - post_pull_request "${org_repo}" + post_pull_request "${org_repo}" "${default_branch}" fi fi } diff --git a/tsdb/CHANGELOG.md b/tsdb/CHANGELOG.md index 66d07bf3cc..71a67d3b18 100644 --- a/tsdb/CHANGELOG.md +++ b/tsdb/CHANGELOG.md @@ -1,5 +1,3 @@ -## master / unreleased - ## 0.10.0 - [FEATURE] Added `DBReadOnly` to allow opening a database in read only mode. From 776c2b8f422c24d8371f92a989a812e38cbff901 Mon Sep 17 00:00:00 2001 From: songjiayang Date: Tue, 23 Feb 2021 22:28:04 +0800 Subject: [PATCH 8/9] Speed delta value without loop to calculate resultValue Signed-off-by: songjiayang --- promql/functions.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index d96d625752..e497be364b 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -70,17 +70,17 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod if len(samples.Points) < 2 { return enh.Out } - var ( - counterCorrection float64 - lastValue float64 - ) - for _, sample := range samples.Points { - if isCounter && sample.V < lastValue { - counterCorrection += lastValue + + resultValue := samples.Points[len(samples.Points)-1].V - samples.Points[0].V + if isCounter { + var lastValue float64 + for _, sample := range samples.Points { + if sample.V < lastValue { + resultValue += lastValue + } + lastValue = sample.V } - lastValue = sample.V } - resultValue := lastValue - samples.Points[0].V + counterCorrection // Duration between first/last samples and boundary of range. durationToStart := float64(samples.Points[0].T-rangeStart) / 1000 From f9e2748f6f1cff78a66e2ba1f77dd2872293271e Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 23 Feb 2021 23:23:51 +0100 Subject: [PATCH 9/9] Fix default branch detection Signed-off-by: Julien Pivotto --- scripts/sync_repo_files.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sync_repo_files.sh b/scripts/sync_repo_files.sh index e480848566..96db2a2323 100755 --- a/scripts/sync_repo_files.sh +++ b/scripts/sync_repo_files.sh @@ -71,7 +71,7 @@ process_repo() { echo -e "\e[32mAnalyzing '${org_repo}'\e[0m" default_branch="$(get_default_branch ${1})" - if [[ -z "${target_file}" ]]; then + if [[ -z "${default_branch}" ]]; then echo "Can't get the default branch." return fi