Merge remote-tracking branch 'upstream/master' into merge-release-2.19

Signed-off-by: Ganesh Vernekar <cs15btech11018@iith.ac.in>
This commit is contained in:
Ganesh Vernekar 2020-06-26 14:33:50 +05:30
commit a4c2ea1ca3
No known key found for this signature in database
GPG key ID: 0241A11211763456
137 changed files with 12760 additions and 1512 deletions

View file

@ -87,11 +87,11 @@ jobs:
- setup_remote_docker - setup_remote_docker
- run: ./fuzzit.sh fuzzing - run: ./fuzzit.sh fuzzing
makefile_sync: repo_sync:
executor: golang executor: golang
steps: steps:
- checkout - checkout
- run: ./scripts/sync_makefiles.sh - run: ./scripts/sync_repo_files.sh
workflows: workflows:
version: 2 version: 2
@ -143,7 +143,7 @@ workflows:
only: only:
- master - master
jobs: jobs:
- makefile_sync: - repo_sync:
context: org-context context: org-context
- fuzzit_fuzzing: - fuzzit_fuzzing:
context: org-context context: org-context

View file

@ -9,7 +9,6 @@ jobs:
AUTH_FILE: ${{ secrets.TEST_INFRA_GKE_AUTH }} AUTH_FILE: ${{ secrets.TEST_INFRA_GKE_AUTH }}
BRANCH: ${{ github.event.client_payload.BRANCH }} BRANCH: ${{ github.event.client_payload.BRANCH }}
BENCH_FUNC_REGEX: ${{ github.event.client_payload.BENCH_FUNC_REGEX }} BENCH_FUNC_REGEX: ${{ github.event.client_payload.BENCH_FUNC_REGEX }}
CLUSTER_NAME: test-infra
GITHUB_TOKEN: ${{ secrets.PROMBOT_TOKEN }} GITHUB_TOKEN: ${{ secrets.PROMBOT_TOKEN }}
GITHUB_ORG: prometheus GITHUB_ORG: prometheus
GITHUB_REPO: prometheus GITHUB_REPO: prometheus

View file

@ -1,3 +1,7 @@
## master / unreleased
* [ENHANCEMENT] TSDB: WAL compression is enabled by default.
## 2.19.2 / 2020-06-26 ## 2.19.2 / 2020-06-26
* [BUGFIX] Remote Write: Fix panic when reloading config with modified queue parameters. #7452 * [BUGFIX] Remote Write: Fix panic when reloading config with modified queue parameters. #7452
@ -18,6 +22,9 @@
* [BUGFIX] React UI: Don't null out data when clicking on the current tab. #7243 * [BUGFIX] React UI: Don't null out data when clicking on the current tab. #7243
* [BUGFIX] PromQL: Correctly track number of samples for a query. #7307 * [BUGFIX] PromQL: Correctly track number of samples for a query. #7307
* [BUGFIX] PromQL: Return NaN when histogram buckets have 0 observations. #7318 * [BUGFIX] PromQL: Return NaN when histogram buckets have 0 observations. #7318
## 2.18.2 / 2020-06-09
* [BUGFIX] TSDB: Fix incorrect query results when using Prometheus with remote reads configured #7361 * [BUGFIX] TSDB: Fix incorrect query results when using Prometheus with remote reads configured #7361
## 2.18.1 / 2020-05-07 ## 2.18.1 / 2020-05-07

View file

@ -7,7 +7,7 @@
* `documentation` * `documentation`
* `prometheus-mixin`: @beorn7 * `prometheus-mixin`: @beorn7
* `storage` * `storage`
* `remote`: @csmarchbanks, @cstyan * `remote`: @csmarchbanks, @cstyan, @bwplotka
* `tsdb`: @codesome, @krasi-georgiev * `tsdb`: @codesome, @krasi-georgiev
* `web` * `web`
* `ui`: @juliusv * `ui`: @juliusv

5
NOTICE
View file

@ -86,6 +86,11 @@ https://github.com/samuel/go-zookeeper
Copyright (c) 2013, Samuel Stauffer <samuel@descolada.com> Copyright (c) 2013, Samuel Stauffer <samuel@descolada.com>
See https://github.com/samuel/go-zookeeper/blob/master/LICENSE for license details. See https://github.com/samuel/go-zookeeper/blob/master/LICENSE for license details.
Time series compression algorithm from Facebook's Gorilla paper
https://github.com/dgryski/go-tsz
Copyright (c) 2015,2016 Damian Gryski <damian@gryski.com>
See https://github.com/dgryski/go-tsz/blob/master/LICENSE for license details.
We also use code from a large number of npm packages. For details, see: We also use code from a large number of npm packages. For details, see:
- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package.json - https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package.json
- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package-lock.json - https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package-lock.json

View file

@ -214,7 +214,7 @@ func main() {
Default("false").BoolVar(&cfg.tsdb.AllowOverlappingBlocks) Default("false").BoolVar(&cfg.tsdb.AllowOverlappingBlocks)
a.Flag("storage.tsdb.wal-compression", "Compress the tsdb WAL."). a.Flag("storage.tsdb.wal-compression", "Compress the tsdb WAL.").
Default("false").BoolVar(&cfg.tsdb.WALCompression) Default("true").BoolVar(&cfg.tsdb.WALCompression)
a.Flag("storage.remote.flush-deadline", "How long to wait flushing sample on shutdown or config reload."). a.Flag("storage.remote.flush-deadline", "How long to wait flushing sample on shutdown or config reload.").
Default("1m").PlaceHolder("<duration>").SetValue(&cfg.RemoteFlushDeadline) Default("1m").PlaceHolder("<duration>").SetValue(&cfg.RemoteFlushDeadline)
@ -278,6 +278,12 @@ func main() {
os.Exit(2) os.Exit(2)
} }
// Throw error for invalid config before starting other components.
if _, err := config.LoadFile(cfg.configFile); err != nil {
level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "err", err)
os.Exit(2)
}
cfg.web.ReadTimeout = time.Duration(cfg.webTimeout) cfg.web.ReadTimeout = time.Duration(cfg.webTimeout)
// Default -web.route-prefix to path of -web.external-url. // Default -web.route-prefix to path of -web.external-url.
if cfg.web.RoutePrefix == "" { if cfg.web.RoutePrefix == "" {
@ -955,6 +961,14 @@ func (s *readyStorage) Querier(ctx context.Context, mint, maxt int64) (storage.Q
return nil, tsdb.ErrNotReady return nil, tsdb.ErrNotReady
} }
// ChunkQuerier implements the Storage interface.
func (s *readyStorage) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) {
if x := s.get(); x != nil {
return x.ChunkQuerier(ctx, mint, maxt)
}
return nil, tsdb.ErrNotReady
}
// Appender implements the Storage interface. // Appender implements the Storage interface.
func (s *readyStorage) Appender() storage.Appender { func (s *readyStorage) Appender() storage.Appender {
if x := s.get(); x != nil { if x := s.get(); x != nil {

View file

@ -111,7 +111,7 @@ func TestFailedStartupExitCode(t *testing.T) {
} }
fakeInputFile := "fake-input-file" fakeInputFile := "fake-input-file"
expectedExitStatus := 1 expectedExitStatus := 2
prom := exec.Command(promPath, "-test.main", "--config.file="+fakeInputFile) prom := exec.Command(promPath, "-test.main", "--config.file="+fakeInputFile)
err := prom.Run() err := prom.Run()

13
cmd/promtool/testdata/alerts.yml vendored Normal file
View file

@ -0,0 +1,13 @@
# This is the rules file.
groups:
- name: example
rules:
- alert: InstanceDown
expr: up == 0
for: 5m
labels:
severity: page
annotations:
summary: "Instance {{ $labels.instance }} down"
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes."

21
cmd/promtool/testdata/unittest.yml vendored Normal file
View file

@ -0,0 +1,21 @@
rule_files:
- alerts.yml
evaluation_interval: 1m
tests:
- interval: 1m
input_series:
- series: 'up{job="prometheus", instance="localhost:9090"}'
values: "0+0x1440"
alert_rule_test:
- eval_time: 1d
alertname: InstanceDown
exp_alerts:
- exp_labels:
severity: page
instance: localhost:9090
job: prometheus
exp_annotations:
summary: "Instance localhost:9090 down"
description: "localhost:9090 of job prometheus has been down for more than 5 minutes."

View file

@ -29,6 +29,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v2"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
@ -76,15 +77,16 @@ func ruleUnitTest(filename string) []error {
} }
if unitTestInp.EvaluationInterval == 0 { if unitTestInp.EvaluationInterval == 0 {
unitTestInp.EvaluationInterval = 1 * time.Minute unitTestInp.EvaluationInterval = model.Duration(1 * time.Minute)
} }
// Bounds for evaluating the rules. // Bounds for evaluating the rules.
mint := time.Unix(0, 0).UTC() mint := time.Unix(0, 0).UTC()
maxd := unitTestInp.maxEvalTime() maxd := unitTestInp.maxEvalTime()
maxt := mint.Add(maxd) maxt := mint.Add(maxd)
evalInterval := time.Duration(unitTestInp.EvaluationInterval)
// Rounding off to nearest Eval time (> maxt). // Rounding off to nearest Eval time (> maxt).
maxt = maxt.Add(unitTestInp.EvaluationInterval / 2).Round(unitTestInp.EvaluationInterval) maxt = maxt.Add(evalInterval / 2).Round(evalInterval)
// Giving number for groups mentioned in the file for ordering. // Giving number for groups mentioned in the file for ordering.
// Lower number group should be evaluated before higher number group. // Lower number group should be evaluated before higher number group.
@ -99,7 +101,7 @@ func ruleUnitTest(filename string) []error {
// Testing. // Testing.
var errs []error var errs []error
for _, t := range unitTestInp.Tests { for _, t := range unitTestInp.Tests {
ers := t.test(mint, maxt, unitTestInp.EvaluationInterval, groupOrderMap, ers := t.test(mint, maxt, evalInterval, groupOrderMap,
unitTestInp.RuleFiles...) unitTestInp.RuleFiles...)
if ers != nil { if ers != nil {
errs = append(errs, ers...) errs = append(errs, ers...)
@ -115,7 +117,7 @@ func ruleUnitTest(filename string) []error {
// unitTestFile holds the contents of a single unit test file. // unitTestFile holds the contents of a single unit test file.
type unitTestFile struct { type unitTestFile struct {
RuleFiles []string `yaml:"rule_files"` RuleFiles []string `yaml:"rule_files"`
EvaluationInterval time.Duration `yaml:"evaluation_interval,omitempty"` EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"`
GroupEvalOrder []string `yaml:"group_eval_order"` GroupEvalOrder []string `yaml:"group_eval_order"`
Tests []testGroup `yaml:"tests"` Tests []testGroup `yaml:"tests"`
} }
@ -157,7 +159,7 @@ func resolveAndGlobFilepaths(baseDir string, utf *unitTestFile) error {
// testGroup is a group of input series and tests associated with it. // testGroup is a group of input series and tests associated with it.
type testGroup struct { type testGroup struct {
Interval time.Duration `yaml:"interval"` Interval model.Duration `yaml:"interval"`
InputSeries []series `yaml:"input_series"` InputSeries []series `yaml:"input_series"`
AlertRuleTests []alertTestCase `yaml:"alert_rule_test,omitempty"` AlertRuleTests []alertTestCase `yaml:"alert_rule_test,omitempty"`
PromqlExprTests []promqlTestCase `yaml:"promql_expr_test,omitempty"` PromqlExprTests []promqlTestCase `yaml:"promql_expr_test,omitempty"`
@ -182,7 +184,7 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou
Logger: log.NewNopLogger(), Logger: log.NewNopLogger(),
} }
m := rules.NewManager(opts) m := rules.NewManager(opts)
groupsMap, ers := m.LoadGroups(tg.Interval, tg.ExternalLabels, ruleFiles...) groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, ruleFiles...)
if ers != nil { if ers != nil {
return ers return ers
} }
@ -193,11 +195,11 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou
// This avoids storing them in memory, as the number of evals might be high. // This avoids storing them in memory, as the number of evals might be high.
// All the `eval_time` for which we have unit tests for alerts. // All the `eval_time` for which we have unit tests for alerts.
alertEvalTimesMap := map[time.Duration]struct{}{} alertEvalTimesMap := map[model.Duration]struct{}{}
// Map of all the eval_time+alertname combination present in the unit tests. // Map of all the eval_time+alertname combination present in the unit tests.
alertsInTest := make(map[time.Duration]map[string]struct{}) alertsInTest := make(map[model.Duration]map[string]struct{})
// Map of all the unit tests for given eval_time. // Map of all the unit tests for given eval_time.
alertTests := make(map[time.Duration][]alertTestCase) alertTests := make(map[model.Duration][]alertTestCase)
for _, alert := range tg.AlertRuleTests { for _, alert := range tg.AlertRuleTests {
alertEvalTimesMap[alert.EvalTime] = struct{}{} alertEvalTimesMap[alert.EvalTime] = struct{}{}
@ -208,7 +210,7 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou
alertTests[alert.EvalTime] = append(alertTests[alert.EvalTime], alert) alertTests[alert.EvalTime] = append(alertTests[alert.EvalTime], alert)
} }
alertEvalTimes := make([]time.Duration, 0, len(alertEvalTimesMap)) alertEvalTimes := make([]model.Duration, 0, len(alertEvalTimesMap))
for k := range alertEvalTimesMap { for k := range alertEvalTimesMap {
alertEvalTimes = append(alertEvalTimes, k) alertEvalTimes = append(alertEvalTimes, k)
} }
@ -242,8 +244,8 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou
} }
for { for {
if !(curr < len(alertEvalTimes) && ts.Sub(mint) <= alertEvalTimes[curr] && if !(curr < len(alertEvalTimes) && ts.Sub(mint) <= time.Duration(alertEvalTimes[curr]) &&
alertEvalTimes[curr] < ts.Add(evalInterval).Sub(mint)) { time.Duration(alertEvalTimes[curr]) < ts.Add(time.Duration(evalInterval)).Sub(mint)) {
break break
} }
@ -322,7 +324,7 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou
// Checking promql expressions. // Checking promql expressions.
Outer: Outer:
for _, testCase := range tg.PromqlExprTests { for _, testCase := range tg.PromqlExprTests {
got, err := query(suite.Context(), testCase.Expr, mint.Add(testCase.EvalTime), got, err := query(suite.Context(), testCase.Expr, mint.Add(time.Duration(testCase.EvalTime)),
suite.QueryEngine(), suite.Queryable()) suite.QueryEngine(), suite.Queryable())
if err != nil { if err != nil {
errs = append(errs, errors.Errorf(" expr: %q, time: %s, err: %s", testCase.Expr, errs = append(errs, errors.Errorf(" expr: %q, time: %s, err: %s", testCase.Expr,
@ -373,15 +375,15 @@ Outer:
// seriesLoadingString returns the input series in PromQL notation. // seriesLoadingString returns the input series in PromQL notation.
func (tg *testGroup) seriesLoadingString() string { func (tg *testGroup) seriesLoadingString() string {
result := ""
result += "load " + shortDuration(tg.Interval) + "\n" result := fmt.Sprintf("load %v\n", shortDuration(tg.Interval))
for _, is := range tg.InputSeries { for _, is := range tg.InputSeries {
result += " " + is.Series + " " + is.Values + "\n" result += fmt.Sprintf(" %v %v\n", is.Series, is.Values)
} }
return result return result
} }
func shortDuration(d time.Duration) string { func shortDuration(d model.Duration) string {
s := d.String() s := d.String()
if strings.HasSuffix(s, "m0s") { if strings.HasSuffix(s, "m0s") {
s = s[:len(s)-2] s = s[:len(s)-2]
@ -407,7 +409,7 @@ func orderedGroups(groupsMap map[string]*rules.Group, groupOrderMap map[string]i
// maxEvalTime returns the max eval time among all alert and promql unit tests. // maxEvalTime returns the max eval time among all alert and promql unit tests.
func (tg *testGroup) maxEvalTime() time.Duration { func (tg *testGroup) maxEvalTime() time.Duration {
var maxd time.Duration var maxd model.Duration
for _, alert := range tg.AlertRuleTests { for _, alert := range tg.AlertRuleTests {
if alert.EvalTime > maxd { if alert.EvalTime > maxd {
maxd = alert.EvalTime maxd = alert.EvalTime
@ -418,7 +420,7 @@ func (tg *testGroup) maxEvalTime() time.Duration {
maxd = pet.EvalTime maxd = pet.EvalTime
} }
} }
return maxd return time.Duration(maxd)
} }
func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, qu storage.Queryable) (promql.Vector, error) { func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, qu storage.Queryable) (promql.Vector, error) {
@ -483,7 +485,7 @@ type series struct {
} }
type alertTestCase struct { type alertTestCase struct {
EvalTime time.Duration `yaml:"eval_time"` EvalTime model.Duration `yaml:"eval_time"`
Alertname string `yaml:"alertname"` Alertname string `yaml:"alertname"`
ExpAlerts []alert `yaml:"exp_alerts"` ExpAlerts []alert `yaml:"exp_alerts"`
} }
@ -495,7 +497,7 @@ type alert struct {
type promqlTestCase struct { type promqlTestCase struct {
Expr string `yaml:"expr"` Expr string `yaml:"expr"`
EvalTime time.Duration `yaml:"eval_time"` EvalTime model.Duration `yaml:"eval_time"`
ExpSamples []sample `yaml:"exp_samples"` ExpSamples []sample `yaml:"exp_samples"`
} }

View file

@ -0,0 +1,42 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import "testing"
func TestRulesUnitTest(t *testing.T) {
type args struct {
files []string
}
tests := []struct {
name string
args args
want int
}{
{
name: "Passing Unit Tests",
args: args{
files: []string{"./testdata/unittest.yml"},
},
want: 0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := RulesUnitTest(tt.args.files...); got != tt.want {
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
}
})
}
}

View file

@ -18,6 +18,7 @@ import (
"github.com/prometheus/prometheus/discovery/azure" "github.com/prometheus/prometheus/discovery/azure"
"github.com/prometheus/prometheus/discovery/consul" "github.com/prometheus/prometheus/discovery/consul"
"github.com/prometheus/prometheus/discovery/digitalocean"
"github.com/prometheus/prometheus/discovery/dns" "github.com/prometheus/prometheus/discovery/dns"
"github.com/prometheus/prometheus/discovery/ec2" "github.com/prometheus/prometheus/discovery/ec2"
"github.com/prometheus/prometheus/discovery/file" "github.com/prometheus/prometheus/discovery/file"
@ -40,6 +41,8 @@ type ServiceDiscoveryConfig struct {
FileSDConfigs []*file.SDConfig `yaml:"file_sd_configs,omitempty"` FileSDConfigs []*file.SDConfig `yaml:"file_sd_configs,omitempty"`
// List of Consul service discovery configurations. // List of Consul service discovery configurations.
ConsulSDConfigs []*consul.SDConfig `yaml:"consul_sd_configs,omitempty"` ConsulSDConfigs []*consul.SDConfig `yaml:"consul_sd_configs,omitempty"`
// List of DigitalOcean service discovery configurations.
DigitalOceanSDConfigs []*digitalocean.SDConfig `yaml:"digitalocean_sd_configs,omitempty"`
// List of Serverset service discovery configurations. // List of Serverset service discovery configurations.
ServersetSDConfigs []*zookeeper.ServersetSDConfig `yaml:"serverset_sd_configs,omitempty"` ServersetSDConfigs []*zookeeper.ServersetSDConfig `yaml:"serverset_sd_configs,omitempty"`
// NerveSDConfigs is a list of Nerve service discovery configurations. // NerveSDConfigs is a list of Nerve service discovery configurations.
@ -72,6 +75,11 @@ func (c *ServiceDiscoveryConfig) Validate() error {
return errors.New("empty or null section in consul_sd_configs") return errors.New("empty or null section in consul_sd_configs")
} }
} }
for _, cfg := range c.DigitalOceanSDConfigs {
if cfg == nil {
return errors.New("empty or null section in digitalocean_sd_configs")
}
}
for _, cfg := range c.DNSSDConfigs { for _, cfg := range c.DNSSDConfigs {
if cfg == nil { if cfg == nil {
return errors.New("empty or null section in dns_sd_configs") return errors.New("empty or null section in dns_sd_configs")

View file

@ -36,7 +36,7 @@ import (
) )
const ( const (
watchTimeout = 30 * time.Second watchTimeout = 10 * time.Minute
retryInterval = 15 * time.Second retryInterval = 15 * time.Second
// addressLabel is the name for the label containing a target's address. // addressLabel is the name for the label containing a target's address.
@ -95,7 +95,7 @@ var (
Scheme: "http", Scheme: "http",
Server: "localhost:8500", Server: "localhost:8500",
AllowStale: true, AllowStale: true,
RefreshInterval: model.Duration(watchTimeout), RefreshInterval: model.Duration(30 * time.Second),
} }
) )
@ -175,7 +175,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
return nil, err return nil, err
} }
transport := &http.Transport{ transport := &http.Transport{
IdleConnTimeout: 5 * time.Duration(conf.RefreshInterval), IdleConnTimeout: 2 * time.Duration(watchTimeout),
TLSClientConfig: tls, TLSClientConfig: tls,
DialContext: conntrack.NewDialContextFunc( DialContext: conntrack.NewDialContextFunc(
conntrack.DialWithTracing(), conntrack.DialWithTracing(),

View file

@ -223,20 +223,20 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) {
switch r.URL.String() { switch r.URL.String() {
case "/v1/agent/self": case "/v1/agent/self":
response = AgentAnswer response = AgentAnswer
case "/v1/health/service/test?node-meta=rack_name%3A2304&stale=&tag=tag1&wait=30000ms": case "/v1/health/service/test?node-meta=rack_name%3A2304&stale=&tag=tag1&wait=600000ms":
response = ServiceTestAnswer response = ServiceTestAnswer
case "/v1/health/service/test?wait=30000ms": case "/v1/health/service/test?wait=600000ms":
response = ServiceTestAnswer response = ServiceTestAnswer
case "/v1/health/service/other?wait=30000ms": case "/v1/health/service/other?wait=600000ms":
response = `[]` response = `[]`
case "/v1/catalog/services?node-meta=rack_name%3A2304&stale=&wait=30000ms": case "/v1/catalog/services?node-meta=rack_name%3A2304&stale=&wait=600000ms":
response = ServicesTestAnswer response = ServicesTestAnswer
case "/v1/catalog/services?wait=30000ms": case "/v1/catalog/services?wait=600000ms":
response = ServicesTestAnswer response = ServicesTestAnswer
case "/v1/catalog/services?index=1&node-meta=rack_name%3A2304&stale=&wait=30000ms": case "/v1/catalog/services?index=1&node-meta=rack_name%3A2304&stale=&wait=600000ms":
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
response = ServicesTestAnswer response = ServicesTestAnswer
case "/v1/catalog/services?index=1&wait=30000ms": case "/v1/catalog/services?index=1&wait=600000ms":
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
response = ServicesTestAnswer response = ServicesTestAnswer
default: default:

View file

@ -0,0 +1,193 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package digitalocean
import (
"context"
"fmt"
"net"
"net/http"
"strconv"
"strings"
"time"
"github.com/digitalocean/godo"
"github.com/go-kit/kit/log"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/version"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
const (
doLabel = model.MetaLabelPrefix + "digitalocean_"
doLabelID = doLabel + "droplet_id"
doLabelName = doLabel + "droplet_name"
doLabelImage = doLabel + "image"
doLabelPrivateIPv4 = doLabel + "private_ipv4"
doLabelPublicIPv4 = doLabel + "public_ipv4"
doLabelPublicIPv6 = doLabel + "public_ipv6"
doLabelRegion = doLabel + "region"
doLabelSize = doLabel + "size"
doLabelStatus = doLabel + "status"
doLabelFeatures = doLabel + "features"
doLabelTags = doLabel + "tags"
separator = ","
)
// DefaultSDConfig is the default DigitalOcean SD configuration.
var DefaultSDConfig = SDConfig{
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
}
// SDConfig is the configuration for DigitalOcean based service discovery.
type SDConfig struct {
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
RefreshInterval model.Duration `yaml:"refresh_interval"`
Port int `yaml:"port"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))
if err != nil {
return err
}
return nil
}
// Discovery periodically performs DigitalOcean requests. It implements
// the Discoverer interface.
type Discovery struct {
*refresh.Discovery
client *godo.Client
port int
}
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
d := &Discovery{
port: conf.Port,
}
rt, err := config_util.NewRoundTripperFromConfig(conf.HTTPClientConfig, "digitalocean_sd", false)
if err != nil {
return nil, err
}
d.client, err = godo.New(
&http.Client{
Transport: rt,
Timeout: time.Duration(conf.RefreshInterval),
},
godo.SetUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)),
)
if err != nil {
return nil, fmt.Errorf("error setting up digital ocean agent: %w", err)
}
d.Discovery = refresh.NewDiscovery(
logger,
"digitalocean",
time.Duration(conf.RefreshInterval),
d.refresh,
)
return d, nil
}
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
tg := &targetgroup.Group{
Source: "DigitalOcean",
}
droplets, err := d.listDroplets()
if err != nil {
return nil, err
}
for _, droplet := range droplets {
if droplet.Networks == nil || len(droplet.Networks.V4) == 0 {
continue
}
privateIPv4, err := droplet.PrivateIPv4()
if err != nil {
return nil, fmt.Errorf("error while reading private IPv4 of droplet %d: %w", droplet.ID, err)
}
publicIPv4, err := droplet.PublicIPv4()
if err != nil {
return nil, fmt.Errorf("error while reading public IPv4 of droplet %d: %w", droplet.ID, err)
}
publicIPv6, err := droplet.PublicIPv6()
if err != nil {
return nil, fmt.Errorf("error while reading public IPv6 of droplet %d: %w", droplet.ID, err)
}
labels := model.LabelSet{
doLabelID: model.LabelValue(fmt.Sprintf("%d", droplet.ID)),
doLabelName: model.LabelValue(droplet.Name),
doLabelImage: model.LabelValue(droplet.Image.Slug),
doLabelPrivateIPv4: model.LabelValue(privateIPv4),
doLabelPublicIPv4: model.LabelValue(publicIPv4),
doLabelPublicIPv6: model.LabelValue(publicIPv6),
doLabelRegion: model.LabelValue(droplet.Region.Slug),
doLabelSize: model.LabelValue(droplet.SizeSlug),
doLabelStatus: model.LabelValue(droplet.Status),
}
addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10))
labels[model.AddressLabel] = model.LabelValue(addr)
if len(droplet.Features) > 0 {
// We surround the separated list with the separator as well. This way regular expressions
// in relabeling rules don't have to consider feature positions.
features := separator + strings.Join(droplet.Features, separator) + separator
labels[doLabelFeatures] = model.LabelValue(features)
}
if len(droplet.Tags) > 0 {
// We surround the separated list with the separator as well. This way regular expressions
// in relabeling rules don't have to consider tag positions.
tags := separator + strings.Join(droplet.Tags, separator) + separator
labels[doLabelTags] = model.LabelValue(tags)
}
tg.Targets = append(tg.Targets, labels)
}
return []*targetgroup.Group{tg}, nil
}
func (d *Discovery) listDroplets() ([]godo.Droplet, error) {
var (
droplets []godo.Droplet
opts = &godo.ListOptions{Page: 1}
)
for {
paginatedDroplets, resp, err := d.client.Droplets.List(context.Background(), opts)
if err != nil {
return nil, fmt.Errorf("error while listing droplets page %d: %w", opts.Page, err)
}
droplets = append(droplets, paginatedDroplets...)
if resp.Links == nil || resp.Links.IsLastPage() {
break
}
opts.Page++
}
return droplets, nil
}

View file

@ -0,0 +1,125 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package digitalocean
import (
"context"
"fmt"
"net/url"
"testing"
"github.com/go-kit/kit/log"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/util/testutil"
)
type DigitalOceanSDTestSuite struct {
Mock *SDMock
}
func (s *DigitalOceanSDTestSuite) TearDownSuite() {
s.Mock.ShutdownServer()
}
func (s *DigitalOceanSDTestSuite) SetupTest(t *testing.T) {
s.Mock = NewSDMock(t)
s.Mock.Setup()
s.Mock.HandleDropletsList()
}
func TestDigitalOceanSDRefresh(t *testing.T) {
sdmock := &DigitalOceanSDTestSuite{}
sdmock.SetupTest(t)
t.Cleanup(sdmock.TearDownSuite)
cfg := DefaultSDConfig
cfg.HTTPClientConfig.BearerToken = tokenID
d, err := NewDiscovery(&cfg, log.NewNopLogger())
testutil.Ok(t, err)
endpoint, err := url.Parse(sdmock.Mock.Endpoint())
testutil.Ok(t, err)
d.client.BaseURL = endpoint
ctx := context.Background()
tgs, err := d.refresh(ctx)
testutil.Ok(t, err)
testutil.Equals(t, 1, len(tgs))
tg := tgs[0]
testutil.Assert(t, tg != nil, "tg should not be nil")
testutil.Assert(t, tg.Targets != nil, "tg.targets should not be nil")
testutil.Equals(t, 4, len(tg.Targets))
for i, lbls := range []model.LabelSet{
{
"__address__": model.LabelValue("104.236.32.182:80"),
"__meta_digitalocean_droplet_id": model.LabelValue("3164444"),
"__meta_digitalocean_droplet_name": model.LabelValue("example.com"),
"__meta_digitalocean_image": model.LabelValue("ubuntu-16-04-x64"),
"__meta_digitalocean_private_ipv4": model.LabelValue(""),
"__meta_digitalocean_public_ipv4": model.LabelValue("104.236.32.182"),
"__meta_digitalocean_public_ipv6": model.LabelValue("2604:A880:0800:0010:0000:0000:02DD:4001"),
"__meta_digitalocean_region": model.LabelValue("nyc3"),
"__meta_digitalocean_size": model.LabelValue("s-1vcpu-1gb"),
"__meta_digitalocean_status": model.LabelValue("active"),
"__meta_digitalocean_features": model.LabelValue(",backups,ipv6,virtio,"),
},
{
"__address__": model.LabelValue("104.131.186.241:80"),
"__meta_digitalocean_droplet_id": model.LabelValue("3164494"),
"__meta_digitalocean_droplet_name": model.LabelValue("prometheus"),
"__meta_digitalocean_image": model.LabelValue("ubuntu-16-04-x64"),
"__meta_digitalocean_private_ipv4": model.LabelValue(""),
"__meta_digitalocean_public_ipv4": model.LabelValue("104.131.186.241"),
"__meta_digitalocean_public_ipv6": model.LabelValue(""),
"__meta_digitalocean_region": model.LabelValue("nyc3"),
"__meta_digitalocean_size": model.LabelValue("s-1vcpu-1gb"),
"__meta_digitalocean_status": model.LabelValue("active"),
"__meta_digitalocean_tags": model.LabelValue(",monitor,"),
"__meta_digitalocean_features": model.LabelValue(",virtio,"),
},
{
"__address__": model.LabelValue("167.172.111.118:80"),
"__meta_digitalocean_droplet_id": model.LabelValue("175072239"),
"__meta_digitalocean_droplet_name": model.LabelValue("prometheus-demo-old"),
"__meta_digitalocean_image": model.LabelValue("ubuntu-18-04-x64"),
"__meta_digitalocean_private_ipv4": model.LabelValue("10.135.64.211"),
"__meta_digitalocean_public_ipv4": model.LabelValue("167.172.111.118"),
"__meta_digitalocean_public_ipv6": model.LabelValue(""),
"__meta_digitalocean_region": model.LabelValue("fra1"),
"__meta_digitalocean_size": model.LabelValue("s-1vcpu-1gb"),
"__meta_digitalocean_status": model.LabelValue("off"),
"__meta_digitalocean_features": model.LabelValue(",ipv6,private_networking,"),
},
{
"__address__": model.LabelValue("138.65.56.69:80"),
"__meta_digitalocean_droplet_id": model.LabelValue("176011507"),
"__meta_digitalocean_droplet_name": model.LabelValue("prometheus-demo"),
"__meta_digitalocean_image": model.LabelValue("ubuntu-18-04-x64"),
"__meta_digitalocean_private_ipv4": model.LabelValue("10.135.64.212"),
"__meta_digitalocean_public_ipv4": model.LabelValue("138.65.56.69"),
"__meta_digitalocean_public_ipv6": model.LabelValue("2a03:b0c0:3:f0::cf2:4"),
"__meta_digitalocean_region": model.LabelValue("fra1"),
"__meta_digitalocean_size": model.LabelValue("s-1vcpu-1gb"),
"__meta_digitalocean_status": model.LabelValue("active"),
"__meta_digitalocean_features": model.LabelValue(",ipv6,private_networking,"),
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
testutil.Equals(t, lbls, tg.Targets[i])
})
}
}

View file

@ -0,0 +1,644 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package digitalocean
import (
"fmt"
"net/http"
"net/http/httptest"
"strconv"
"testing"
)
// SDMock is the interface for the DigitalOcean mock
type SDMock struct {
t *testing.T
Server *httptest.Server
Mux *http.ServeMux
}
// NewSDMock returns a new SDMock.
func NewSDMock(t *testing.T) *SDMock {
return &SDMock{
t: t,
}
}
// Endpoint returns the URI to the mock server
func (m *SDMock) Endpoint() string {
return m.Server.URL + "/"
}
// Setup creates the mock server
func (m *SDMock) Setup() {
m.Mux = http.NewServeMux()
m.Server = httptest.NewServer(m.Mux)
}
// ShutdownServer creates the mock server
func (m *SDMock) ShutdownServer() {
m.Server.Close()
}
const tokenID = "3c9a75a2-24fd-4508-b4f2-11f18aa97411"
// HandleDropletsList mocks droplet list.
func (m *SDMock) HandleDropletsList() {
m.Mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", tokenID) {
w.WriteHeader(http.StatusForbidden)
return
}
w.Header().Add("content-type", "application/json; charset=utf-8")
w.Header().Add("ratelimit-limit", "1200")
w.Header().Add("ratelimit-remaining", "965")
w.Header().Add("ratelimit-reset", "1415984218")
w.WriteHeader(http.StatusAccepted)
page := 1
if pageQuery, ok := r.URL.Query()["page"]; ok {
var err error
page, err = strconv.Atoi(pageQuery[0])
if err != nil {
panic(err)
}
}
fmt.Fprint(w, []string{`
{
"droplets": [
{
"id": 3164444,
"name": "example.com",
"memory": 1024,
"vcpus": 1,
"disk": 25,
"locked": false,
"status": "active",
"kernel": {
"id": 2233,
"name": "Ubuntu 14.04 x64 vmlinuz-3.13.0-37-generic",
"version": "3.13.0-37-generic"
},
"created_at": "2014-11-14T16:29:21Z",
"features": [
"backups",
"ipv6",
"virtio"
],
"backup_ids": [
7938002
],
"snapshot_ids": [
],
"image": {
"id": 6918990,
"name": "14.04 x64",
"distribution": "Ubuntu",
"slug": "ubuntu-16-04-x64",
"public": true,
"regions": [
"nyc1",
"ams1",
"sfo1",
"nyc2",
"ams2",
"sgp1",
"lon1",
"nyc3",
"ams3",
"nyc3"
],
"created_at": "2014-10-17T20:24:33Z",
"type": "snapshot",
"min_disk_size": 20,
"size_gigabytes": 2.34
},
"volume_ids": [
],
"size": {
},
"size_slug": "s-1vcpu-1gb",
"networks": {
"v4": [
{
"ip_address": "104.236.32.182",
"netmask": "255.255.192.0",
"gateway": "104.236.0.1",
"type": "public"
}
],
"v6": [
{
"ip_address": "2604:A880:0800:0010:0000:0000:02DD:4001",
"netmask": 64,
"gateway": "2604:A880:0800:0010:0000:0000:0000:0001",
"type": "public"
}
]
},
"region": {
"name": "New York 3",
"slug": "nyc3",
"sizes": [
],
"features": [
"virtio",
"private_networking",
"backups",
"ipv6",
"metadata"
],
"available": null
},
"tags": [
],
"vpc_uuid": "f9b0769c-e118-42fb-a0c4-fed15ef69662"
},
{
"id": 3164494,
"name": "prometheus",
"memory": 1024,
"vcpus": 1,
"disk": 25,
"locked": false,
"status": "active",
"kernel": {
"id": 2233,
"name": "Ubuntu 14.04 x64 vmlinuz-3.13.0-37-generic",
"version": "3.13.0-37-generic"
},
"created_at": "2014-11-14T16:36:31Z",
"features": [
"virtio"
],
"backup_ids": [
],
"snapshot_ids": [
7938206
],
"image": {
"id": 6918990,
"name": "14.04 x64",
"distribution": "Ubuntu",
"slug": "ubuntu-16-04-x64",
"public": true,
"regions": [
"nyc1",
"ams1",
"sfo1",
"nyc2",
"ams2",
"sgp1",
"lon1",
"nyc3",
"ams3",
"nyc3"
],
"created_at": "2014-10-17T20:24:33Z",
"type": "snapshot",
"min_disk_size": 20,
"size_gigabytes": 2.34
},
"volume_ids": [
],
"size": {
},
"size_slug": "s-1vcpu-1gb",
"networks": {
"v4": [
{
"ip_address": "104.131.186.241",
"netmask": "255.255.240.0",
"gateway": "104.131.176.1",
"type": "public"
}
]
},
"region": {
"name": "New York 3",
"slug": "nyc3",
"sizes": [
"s-1vcpu-1gb",
"s-1vcpu-2gb",
"s-1vcpu-3gb",
"s-2vcpu-2gb",
"s-3vcpu-1gb",
"s-2vcpu-4gb",
"s-4vcpu-8gb",
"s-6vcpu-16gb",
"s-8vcpu-32gb",
"s-12vcpu-48gb",
"s-16vcpu-64gb",
"s-20vcpu-96gb",
"s-24vcpu-128gb",
"s-32vcpu-192gb"
],
"features": [
"virtio",
"private_networking",
"backups",
"ipv6",
"metadata"
],
"available": true
},
"tags": [
"monitor"
],
"vpc_uuid": "f9b0769c-e118-42fb-a0c4-fed15ef69662"
}
],
"links": {
"pages": {
"next": "https://api.digitalocean.com/v2/droplets?page=2&per_page=2",
"last": "https://api.digitalocean.com/v2/droplets?page=2&per_page=2"
}
},
"meta": {
"total": 4
}
}
`,
`
{
"droplets": [
{
"id": 175072239,
"name": "prometheus-demo-old",
"memory": 1024,
"vcpus": 1,
"disk": 25,
"locked": false,
"status": "off",
"kernel": null,
"created_at": "2020-01-10T16:47:39Z",
"features": [
"ipv6",
"private_networking"
],
"backup_ids": [],
"next_backup_window": null,
"snapshot_ids": [],
"image": {
"id": 53893572,
"name": "18.04.3 (LTS) x64",
"distribution": "Ubuntu",
"slug": "ubuntu-18-04-x64",
"public": true,
"regions": [
"nyc3",
"nyc1",
"sfo1",
"nyc2",
"ams2",
"sgp1",
"lon1",
"nyc3",
"ams3",
"fra1",
"tor1",
"sfo2",
"blr1",
"sfo3"
],
"created_at": "2019-10-22T01:38:19Z",
"min_disk_size": 20,
"type": "base",
"size_gigabytes": 2.36,
"description": "Ubuntu 18.04 x64 20191022",
"tags": [],
"status": "available"
},
"volume_ids": [],
"size": {
"slug": "s-1vcpu-1gb",
"memory": 1024,
"vcpus": 1,
"disk": 25,
"transfer": 1,
"price_monthly": 5,
"price_hourly": 0.00744,
"regions": [
"ams2",
"ams3",
"blr1",
"fra1",
"lon1",
"nyc1",
"nyc2",
"nyc3",
"sfo1",
"sfo2",
"sfo3",
"sgp1",
"tor1"
],
"available": true
},
"size_slug": "s-1vcpu-1gb",
"networks": {
"v4": [
{
"ip_address": "167.172.111.118",
"netmask": "255.255.240.0",
"gateway": "167.172.176.1",
"type": "public"
},
{
"ip_address": "10.135.64.211",
"netmask": "255.255.0.0",
"gateway": "10.135.0.1",
"type": "private"
}
],
"v6": [
]
},
"region": {
"name": "Frankfurt 1",
"slug": "fra1",
"features": [
"private_networking",
"backups",
"ipv6",
"metadata",
"install_agent",
"storage",
"image_transfer"
],
"available": true,
"sizes": [
"s-1vcpu-1gb",
"512mb",
"s-1vcpu-2gb",
"1gb",
"s-3vcpu-1gb",
"s-2vcpu-2gb",
"s-1vcpu-3gb",
"s-2vcpu-4gb",
"2gb",
"s-4vcpu-8gb",
"m-1vcpu-8gb",
"c-2",
"4gb",
"g-2vcpu-8gb",
"gd-2vcpu-8gb",
"m-16gb",
"s-6vcpu-16gb",
"c-4",
"8gb",
"m-2vcpu-16gb",
"m3-2vcpu-16gb",
"g-4vcpu-16gb",
"gd-4vcpu-16gb",
"m6-2vcpu-16gb",
"m-32gb",
"s-8vcpu-32gb",
"c-8",
"16gb",
"m-4vcpu-32gb",
"m3-4vcpu-32gb",
"g-8vcpu-32gb",
"s-12vcpu-48gb",
"gd-8vcpu-32gb",
"m6-4vcpu-32gb",
"m-64gb",
"s-16vcpu-64gb",
"c-16",
"32gb",
"m-8vcpu-64gb",
"m3-8vcpu-64gb",
"g-16vcpu-64gb",
"s-20vcpu-96gb",
"48gb",
"gd-16vcpu-64gb",
"m6-8vcpu-64gb",
"m-128gb",
"s-24vcpu-128gb",
"c-32",
"64gb",
"m-16vcpu-128gb",
"m3-16vcpu-128gb",
"s-32vcpu-192gb",
"m-24vcpu-192gb",
"m-224gb",
"m6-16vcpu-128gb",
"m3-24vcpu-192gb",
"m6-24vcpu-192gb"
]
},
"tags": []
},
{
"id": 176011507,
"name": "prometheus-demo",
"memory": 1024,
"vcpus": 1,
"disk": 25,
"locked": false,
"status": "active",
"kernel": null,
"created_at": "2020-01-17T12:06:26Z",
"features": [
"ipv6",
"private_networking"
],
"backup_ids": [],
"next_backup_window": null,
"snapshot_ids": [],
"image": {
"id": 53893572,
"name": "18.04.3 (LTS) x64",
"distribution": "Ubuntu",
"slug": "ubuntu-18-04-x64",
"public": true,
"regions": [
"nyc3",
"nyc1",
"sfo1",
"nyc2",
"ams2",
"sgp1",
"lon1",
"nyc3",
"ams3",
"fra1",
"tor1",
"sfo2",
"blr1",
"sfo3"
],
"created_at": "2019-10-22T01:38:19Z",
"min_disk_size": 20,
"type": "base",
"size_gigabytes": 2.36,
"description": "Ubuntu 18.04 x64 20191022",
"tags": [],
"status": "available"
},
"volume_ids": [],
"size": {
"slug": "s-1vcpu-1gb",
"memory": 1024,
"vcpus": 1,
"disk": 25,
"transfer": 1,
"price_monthly": 5,
"price_hourly": 0.00744,
"regions": [
"ams2",
"ams3",
"blr1",
"fra1",
"lon1",
"nyc1",
"nyc2",
"nyc3",
"sfo1",
"sfo2",
"sfo3",
"sgp1",
"tor1"
],
"available": true
},
"size_slug": "s-1vcpu-1gb",
"networks": {
"v4": [
{
"ip_address": "138.65.56.69",
"netmask": "255.255.240.0",
"gateway": "138.65.64.1",
"type": "public"
},
{
"ip_address": "154.245.26.111",
"netmask": "255.255.252.0",
"gateway": "154.245.24.1",
"type": "public"
},
{
"ip_address": "10.135.64.212",
"netmask": "255.255.0.0",
"gateway": "10.135.0.1",
"type": "private"
}
],
"v6": [
{
"ip_address": "2a03:b0c0:3:f0::cf2:4",
"netmask": 64,
"gateway": "2a03:b0c0:3:f0::1",
"type": "public"
}
]
},
"region": {
"name": "Frankfurt 1",
"slug": "fra1",
"features": [
"private_networking",
"backups",
"ipv6",
"metadata",
"install_agent",
"storage",
"image_transfer"
],
"available": true,
"sizes": [
"s-1vcpu-1gb",
"512mb",
"s-1vcpu-2gb",
"1gb",
"s-3vcpu-1gb",
"s-2vcpu-2gb",
"s-1vcpu-3gb",
"s-2vcpu-4gb",
"2gb",
"s-4vcpu-8gb",
"m-1vcpu-8gb",
"c-2",
"4gb",
"g-2vcpu-8gb",
"gd-2vcpu-8gb",
"m-16gb",
"s-6vcpu-16gb",
"c-4",
"8gb",
"m-2vcpu-16gb",
"m3-2vcpu-16gb",
"g-4vcpu-16gb",
"gd-4vcpu-16gb",
"m6-2vcpu-16gb",
"m-32gb",
"s-8vcpu-32gb",
"c-8",
"16gb",
"m-4vcpu-32gb",
"m3-4vcpu-32gb",
"g-8vcpu-32gb",
"s-12vcpu-48gb",
"gd-8vcpu-32gb",
"m6-4vcpu-32gb",
"m-64gb",
"s-16vcpu-64gb",
"c-16",
"32gb",
"m-8vcpu-64gb",
"m3-8vcpu-64gb",
"g-16vcpu-64gb",
"s-20vcpu-96gb",
"48gb",
"gd-16vcpu-64gb",
"m6-8vcpu-64gb",
"m-128gb",
"s-24vcpu-128gb",
"c-32",
"64gb",
"m-16vcpu-128gb",
"m3-16vcpu-128gb",
"s-32vcpu-192gb",
"m-24vcpu-192gb",
"m-224gb",
"m6-16vcpu-128gb",
"m3-24vcpu-192gb",
"m6-24vcpu-192gb"
]
},
"tags": []
}
],
"links": {
"pages": {
"first": "https://api.digitalocean.com/v2/droplets?page=1&per_page=2",
"prev": "https://api.digitalocean.com/v2/droplets?page=1&per_page=2"
}
},
"meta": {
"total": 4
}
}
`,
}[page-1],
)
})
}

View file

@ -38,6 +38,7 @@ import (
const ( const (
ec2Label = model.MetaLabelPrefix + "ec2_" ec2Label = model.MetaLabelPrefix + "ec2_"
ec2LabelAMI = ec2Label + "ami"
ec2LabelAZ = ec2Label + "availability_zone" ec2LabelAZ = ec2Label + "availability_zone"
ec2LabelArch = ec2Label + "architecture" ec2LabelArch = ec2Label + "architecture"
ec2LabelInstanceID = ec2Label + "instance_id" ec2LabelInstanceID = ec2Label + "instance_id"
@ -212,6 +213,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
labels[ec2LabelPublicDNS] = model.LabelValue(*inst.PublicDnsName) labels[ec2LabelPublicDNS] = model.LabelValue(*inst.PublicDnsName)
} }
labels[ec2LabelAMI] = model.LabelValue(*inst.ImageId)
labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone) labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone)
labels[ec2LabelInstanceState] = model.LabelValue(*inst.State.Name) labels[ec2LabelInstanceState] = model.LabelValue(*inst.State.Name)
labels[ec2LabelInstanceType] = model.LabelValue(*inst.InstanceType) labels[ec2LabelInstanceType] = model.LabelValue(*inst.InstanceType)

View file

@ -107,14 +107,18 @@ func (d k8sDiscoveryTest) Run(t *testing.T) {
// readResultWithTimeout reads all targegroups from channel with timeout. // readResultWithTimeout reads all targegroups from channel with timeout.
// It merges targegroups by source and sends the result to result channel. // It merges targegroups by source and sends the result to result channel.
func readResultWithTimeout(t *testing.T, ch <-chan []*targetgroup.Group, max int, timeout time.Duration, resChan chan<- map[string]*targetgroup.Group) { func readResultWithTimeout(t *testing.T, ch <-chan []*targetgroup.Group, max int, timeout time.Duration, resChan chan<- map[string]*targetgroup.Group) {
allTgs := make([][]*targetgroup.Group, 0) res := make(map[string]*targetgroup.Group)
Loop: Loop:
for { for {
select { select {
case tgs := <-ch: case tgs := <-ch:
allTgs = append(allTgs, tgs) for _, tg := range tgs {
if len(allTgs) == max { if tg == nil {
continue
}
res[tg.Source] = tg
}
if len(res) == max {
// Reached max target groups we may get, break fast. // Reached max target groups we may get, break fast.
break Loop break Loop
} }
@ -122,21 +126,11 @@ Loop:
// Because we use queue, an object that is created then // Because we use queue, an object that is created then
// deleted or updated may be processed only once. // deleted or updated may be processed only once.
// So possibly we may skip events, timed out here. // So possibly we may skip events, timed out here.
t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(allTgs), max) t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), max)
break Loop break Loop
} }
} }
// Merge by source and sent it to channel.
res := make(map[string]*targetgroup.Group)
for _, tgs := range allTgs {
for _, tg := range tgs {
if tg == nil {
continue
}
res[tg.Source] = tg
}
}
resChan <- res resChan <- res
} }

View file

@ -29,6 +29,7 @@ import (
"github.com/prometheus/prometheus/discovery/azure" "github.com/prometheus/prometheus/discovery/azure"
"github.com/prometheus/prometheus/discovery/consul" "github.com/prometheus/prometheus/discovery/consul"
"github.com/prometheus/prometheus/discovery/digitalocean"
"github.com/prometheus/prometheus/discovery/dns" "github.com/prometheus/prometheus/discovery/dns"
"github.com/prometheus/prometheus/discovery/ec2" "github.com/prometheus/prometheus/discovery/ec2"
"github.com/prometheus/prometheus/discovery/file" "github.com/prometheus/prometheus/discovery/file"
@ -369,6 +370,11 @@ func (m *Manager) registerProviders(cfg sd_config.ServiceDiscoveryConfig, setNam
return consul.NewDiscovery(c, log.With(m.logger, "discovery", "consul")) return consul.NewDiscovery(c, log.With(m.logger, "discovery", "consul"))
}) })
} }
for _, c := range cfg.DigitalOceanSDConfigs {
add(c, func() (Discoverer, error) {
return digitalocean.NewDiscovery(c, log.With(m.logger, "discovery", "digitalocean"))
})
}
for _, c := range cfg.MarathonSDConfigs { for _, c := range cfg.MarathonSDConfigs {
add(c, func() (Discoverer, error) { add(c, func() (Discoverer, error) {
return marathon.NewDiscovery(*c, log.With(m.logger, "discovery", "marathon")) return marathon.NewDiscovery(*c, log.With(m.logger, "discovery", "marathon"))

View file

@ -173,7 +173,7 @@ basic_auth:
# Sets the `Authorization` header on every scrape request with the bearer token # Sets the `Authorization` header on every scrape request with the bearer token
# read from the configured file. It is mutually exclusive with `bearer_token`. # read from the configured file. It is mutually exclusive with `bearer_token`.
[ bearer_token_file: /path/to/bearer/token/file ] [ bearer_token_file: <filename> ]
# Configures the scrape request's TLS settings. # Configures the scrape request's TLS settings.
tls_config: tls_config:
@ -190,6 +190,10 @@ azure_sd_configs:
consul_sd_configs: consul_sd_configs:
[ - <consul_sd_config> ... ] [ - <consul_sd_config> ... ]
# List of DigitalOcean service discovery configurations.
digitalocean_sd_configs:
[ - <digitalocean_sd_config> ... ]
# List of DNS service discovery configurations. # List of DNS service discovery configurations.
dns_sd_configs: dns_sd_configs:
[ - <dns_sd_config> ... ] [ - <dns_sd_config> ... ]
@ -386,6 +390,60 @@ users with thousands of services it can be more efficient to use the Consul API
directly which has basic support for filtering nodes (currently by node directly which has basic support for filtering nodes (currently by node
metadata and a single tag). metadata and a single tag).
### `<digitalocean_sd_config>`
DigitalOcean SD configurations allow retrieving scrape targets from [DigitalOcean's](https://www.digitalocean.com/)
Droplets API.
This service discovery uses the public IPv4 address by default, by that can be
changed with relabelling, as demonstrated in [the Prometheus digitalocean-sd
configuration file](/documentation/examples/prometheus-digitalocean.yml).
The following meta labels are available on targets during [relabeling](#relabel_config):
* `__meta_digitalocean_droplet_id`: the id of the droplet
* `__meta_digitalocean_droplet_name`: the name of the droplet
* `__meta_digitalocean_image`: the image name of the droplet
* `__meta_digitalocean_private_ipv4`: the private IPv4 of the droplet
* `__meta_digitalocean_public_ipv4`: the public IPv4 of the droplet
* `__meta_digitalocean_public_ipv6`: the public IPv6 of the droplet
* `__meta_digitalocean_region`: the region of the droplet
* `__meta_digitalocean_size`: the size of the droplet
* `__meta_digitalocean_status`: the status of the droplet
* `__meta_digitalocean_features`: the comma-separated list of features of the droplet
* `__meta_digitalocean_tags`: the comma-separated list of tags of the droplet
```yaml
# Authentication information used to authenticate to the API server.
# Note that `basic_auth`, `bearer_token` and `bearer_token_file` options are
# mutually exclusive.
# password and password_file are mutually exclusive.
# Optional HTTP basic authentication information, not currently supported by DigitalOcean.
basic_auth:
[ username: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
# Optional bearer token authentication information.
[ bearer_token: <secret> ]
# Optional bearer token file authentication information.
[ bearer_token_file: <filename> ]
# Optional proxy URL.
[ proxy_url: <string> ]
# TLS configuration.
tls_config:
[ <tls_config> ]
# The port to scrape metrics from.
[ port: <int> | default = 80 ]
# The time after which the droplets are refreshed.
[ refresh_interval: <duration> | default = 60s ]
```
### `<dns_sd_config>` ### `<dns_sd_config>`
A DNS-based service discovery configuration allows specifying a set of DNS A DNS-based service discovery configuration allows specifying a set of DNS
@ -426,6 +484,7 @@ the public IP address with relabeling.
The following meta labels are available on targets during [relabeling](#relabel_config): The following meta labels are available on targets during [relabeling](#relabel_config):
* `__meta_ec2_ami`: the EC2 Amazon Machine Image
* `__meta_ec2_architecture`: the architecture of the instance * `__meta_ec2_architecture`: the architecture of the instance
* `__meta_ec2_availability_zone`: the availability zone in which the instance is running * `__meta_ec2_availability_zone`: the availability zone in which the instance is running
* `__meta_ec2_instance_id`: the EC2 instance ID * `__meta_ec2_instance_id`: the EC2 instance ID
@ -941,7 +1000,7 @@ basic_auth:
# Sets the `Authorization` header on every request with the bearer token # Sets the `Authorization` header on every request with the bearer token
# read from the configured file. It is mutually exclusive with `bearer_token` and other authentication mechanisms. # read from the configured file. It is mutually exclusive with `bearer_token` and other authentication mechanisms.
# NOTE: The current version of DC/OS marathon (v1.11.0) does not support standard Bearer token authentication. Use `auth_token_file` instead. # NOTE: The current version of DC/OS marathon (v1.11.0) does not support standard Bearer token authentication. Use `auth_token_file` instead.
[ bearer_token_file: /path/to/bearer/token/file ] [ bearer_token_file: <filename> ]
# TLS configuration for connecting to marathon servers # TLS configuration for connecting to marathon servers
tls_config: tls_config:
@ -1236,7 +1295,7 @@ basic_auth:
# Sets the `Authorization` header on every request with the bearer token # Sets the `Authorization` header on every request with the bearer token
# read from the configured file. It is mutually exclusive with `bearer_token`. # read from the configured file. It is mutually exclusive with `bearer_token`.
[ bearer_token_file: /path/to/bearer/token/file ] [ bearer_token_file: <filename> ]
# Configures the scrape request's TLS settings. # Configures the scrape request's TLS settings.
tls_config: tls_config:
@ -1337,7 +1396,7 @@ basic_auth:
# Sets the `Authorization` header on every remote write request with the bearer token # Sets the `Authorization` header on every remote write request with the bearer token
# read from the configured file. It is mutually exclusive with `bearer_token`. # read from the configured file. It is mutually exclusive with `bearer_token`.
[ bearer_token_file: /path/to/bearer/token/file ] [ bearer_token_file: <filename> ]
# Configures the remote write request's TLS settings. # Configures the remote write request's TLS settings.
tls_config: tls_config:
@ -1409,7 +1468,7 @@ basic_auth:
# Sets the `Authorization` header on every remote read request with the bearer token # Sets the `Authorization` header on every remote read request with the bearer token
# read from the configured file. It is mutually exclusive with `bearer_token`. # read from the configured file. It is mutually exclusive with `bearer_token`.
[ bearer_token_file: /path/to/bearer/token/file ] [ bearer_token_file: <filename> ]
# Configures the remote read request's TLS settings. # Configures the remote read request's TLS settings.
tls_config: tls_config:

View file

@ -63,7 +63,7 @@ Prometheus has several flags that allow configuring the local storage. The most
* `--storage.tsdb.retention.time`: This determines when to remove old data. Defaults to `15d`. Overrides `storage.tsdb.retention` if this flag is set to anything other than default. * `--storage.tsdb.retention.time`: This determines when to remove old data. Defaults to `15d`. Overrides `storage.tsdb.retention` if this flag is set to anything other than default.
* `--storage.tsdb.retention.size`: [EXPERIMENTAL] This determines the maximum number of bytes that storage blocks can use (note that this does not include the WAL size, which can be substantial). The oldest data will be removed first. Defaults to `0` or disabled. This flag is experimental and can be changed in future releases. Units supported: B, KB, MB, GB, TB, PB, EB. Ex: "512MB" * `--storage.tsdb.retention.size`: [EXPERIMENTAL] This determines the maximum number of bytes that storage blocks can use (note that this does not include the WAL size, which can be substantial). The oldest data will be removed first. Defaults to `0` or disabled. This flag is experimental and can be changed in future releases. Units supported: B, KB, MB, GB, TB, PB, EB. Ex: "512MB"
* `--storage.tsdb.retention`: This flag has been deprecated in favour of `storage.tsdb.retention.time`. * `--storage.tsdb.retention`: This flag has been deprecated in favour of `storage.tsdb.retention.time`.
* `--storage.tsdb.wal-compression`: This flag enables compression of the write-ahead log (WAL). Depending on your data, you can expect the WAL size to be halved with little extra cpu load. Note that if you enable this flag and subsequently downgrade Prometheus to a version below 2.11.0 you will need to delete your WAL as it will be unreadable. * `--storage.tsdb.wal-compression`: This flag enables compression of the write-ahead log (WAL). Depending on your data, you can expect the WAL size to be halved with little extra cpu load. This flag was introduced in 2.11.0 and enabled by default in 2.20.0. Note that once enabled, downgrading Prometheus to a version below 2.11.0 will require deleting the WAL.
On average, Prometheus uses only around 1-2 bytes per sample. Thus, to plan the capacity of a Prometheus server, you can use the rough formula: On average, Prometheus uses only around 1-2 bytes per sample. Thus, to plan the capacity of a Prometheus server, you can use the rough formula:

View file

@ -0,0 +1,25 @@
# A example scrape configuration for running Prometheus with
# DigitalOcean.
scrape_configs:
# Make Prometheus scrape itself for metrics.
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
# Discover Node Exporter instances to scrape.
- job_name: 'node'
digitalocean_sd_configs:
- bearer_token: "<replace with a Personal Access Token>"
relabel_configs:
# Only scrape targets that have a tag 'monitoring'.
- source_labels: [__meta_digitalocean_tags]
regex: '.*,monitoring,.*'
action: keep
# Use the public IPv6 address and port 9100 to scrape the target.
- source_labels: [__meta_digitalocean_public_ipv6]
target_label: __address__
replacement: '[$1]:9100'

View file

@ -10,12 +10,12 @@ To use them, you need to have `jsonnet` (v0.13+) and `jb` installed. If you
have a working Go development environment, it's easiest to run the following: have a working Go development environment, it's easiest to run the following:
```bash ```bash
$ go get github.com/google/go-jsonnet/cmd/jsonnet $ go get github.com/google/go-jsonnet/cmd/jsonnet
$ go get github.com/google/go-jsonnet/cmd/jsonnetfmt
$ go get github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb $ go get github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
``` ```
_Note: The make targets `lint` and `fmt` need the `jsonnetfmt` binary, which is _Note: The make targets `lint` and `fmt` need the `jsonnetfmt` binary, which is
currently not included in the Go implementation of `jsonnet`. For the time available from [v.0.16.0](https://github.com/google/jsonnet/releases/tag/v0.16.0) in the Go implementation of `jsonnet`. If your jsonnet version is older than 0.16.0 you have to either upgrade or install the [C++ version of
being, you have to install the [C++ version of
jsonnetfmt](https://github.com/google/jsonnet) if you want to use `make lint` jsonnetfmt](https://github.com/google/jsonnet) if you want to use `make lint`
or `make fmt`._ or `make fmt`._

1
go.mod
View file

@ -14,6 +14,7 @@ require (
github.com/cespare/xxhash v1.1.0 github.com/cespare/xxhash v1.1.0
github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew v1.1.1
github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b
github.com/digitalocean/godo v1.37.0
github.com/edsrzf/mmap-go v1.0.0 github.com/edsrzf/mmap-go v1.0.0
github.com/go-kit/kit v0.10.0 github.com/go-kit/kit v0.10.0
github.com/go-logfmt/logfmt v0.5.0 github.com/go-logfmt/logfmt v0.5.0

4
go.sum
View file

@ -150,6 +150,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b h1:Yqiad0+sloMPdd/0Fg22actpFx0dekpzt1xJmVNVkU0= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b h1:Yqiad0+sloMPdd/0Fg22actpFx0dekpzt1xJmVNVkU0=
github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/digitalocean/godo v1.37.0 h1:NEj5ne2cvLBHo1GJY1DNN/iEt9ipa72CMwwAjKEA530=
github.com/digitalocean/godo v1.37.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
@ -362,6 +364,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=

View file

@ -368,6 +368,27 @@ func TestLabels_Equal(t *testing.T) {
} }
} }
func TestLabels_FromStrings(t *testing.T) {
labels := FromStrings("aaa", "111", "bbb", "222")
expected := Labels{
{
Name: "aaa",
Value: "111",
},
{
Name: "bbb",
Value: "222",
},
}
testutil.Equals(t, expected, labels, "unexpected labelset")
defer func() { recover() }()
FromStrings("aaa", "111", "bbb")
testutil.Assert(t, false, "did not panic as expected")
}
func TestLabels_Compare(t *testing.T) { func TestLabels_Compare(t *testing.T) {
labels := Labels{ labels := Labels{
{ {
@ -531,3 +552,80 @@ func TestLabels_Copy(t *testing.T) {
func TestLabels_Map(t *testing.T) { func TestLabels_Map(t *testing.T) {
testutil.Equals(t, map[string]string{"aaa": "111", "bbb": "222"}, Labels{{"aaa", "111"}, {"bbb", "222"}}.Map()) testutil.Equals(t, map[string]string{"aaa": "111", "bbb": "222"}, Labels{{"aaa", "111"}, {"bbb", "222"}}.Map())
} }
func TestLabels_WithLabels(t *testing.T) {
testutil.Equals(t, Labels{{"aaa", "111"}, {"bbb", "222"}}, Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}.WithLabels("aaa", "bbb"))
}
func TestLabels_WithoutLabels(t *testing.T) {
testutil.Equals(t, Labels{{"aaa", "111"}}, Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}.WithoutLabels("bbb", "ccc"))
testutil.Equals(t, Labels{{"aaa", "111"}}, Labels{{"aaa", "111"}, {"bbb", "222"}, {MetricName, "333"}}.WithoutLabels("bbb"))
}
func TestBulider_NewBulider(t *testing.T) {
testutil.Equals(
t,
&Builder{
base: Labels{{"aaa", "111"}},
del: []string{},
add: []Label{},
},
NewBuilder(Labels{{"aaa", "111"}}),
)
}
func TestBuilder_Del(t *testing.T) {
testutil.Equals(
t,
&Builder{
del: []string{"bbb"},
add: []Label{{"aaa", "111"}, {"ccc", "333"}},
},
(&Builder{
del: []string{},
add: []Label{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}},
}).Del("bbb"),
)
}
func TestBuilder_Set(t *testing.T) {
testutil.Equals(
t,
&Builder{
base: Labels{{"aaa", "111"}},
del: []string{},
add: []Label{{"bbb", "222"}},
},
(&Builder{
base: Labels{{"aaa", "111"}},
del: []string{},
add: []Label{},
}).Set("bbb", "222"),
)
testutil.Equals(
t,
&Builder{
base: Labels{{"aaa", "111"}},
del: []string{},
add: []Label{{"bbb", "333"}},
},
(&Builder{
base: Labels{{"aaa", "111"}},
del: []string{},
add: []Label{{"bbb", "222"}},
}).Set("bbb", "333"),
)
}
func TestBuilder_Labels(t *testing.T) {
testutil.Equals(
t,
Labels{{"aaa", "111"}, {"ccc", "333"}, {"ddd", "444"}},
(&Builder{
base: Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}},
del: []string{"bbb"},
add: []Label{{"ddd", "444"}},
}).Labels(),
)
}

View file

@ -29,7 +29,7 @@ import (
"github.com/go-kit/kit/log" "github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level" "github.com/go-kit/kit/log/level"
opentracing "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -418,7 +418,7 @@ func (ng *Engine) newTestQuery(f func(context.Context) error) Query {
// //
// At this point per query only one EvalStmt is evaluated. Alert and record // At this point per query only one EvalStmt is evaluated. Alert and record
// statements are not handled by the Engine. // statements are not handled by the Engine.
func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, w storage.Warnings, err error) { func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storage.Warnings, err error) {
ng.metrics.currentQueries.Inc() ng.metrics.currentQueries.Inc()
defer ng.metrics.currentQueries.Dec() defer ng.metrics.currentQueries.Dec()
@ -517,13 +517,9 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
} }
defer querier.Close() defer querier.Close()
warnings, err := ng.populateSeries(ctxPrepare, querier, s) ng.populateSeries(querier, s)
prepareSpanTimer.Finish() prepareSpanTimer.Finish()
if err != nil {
return nil, warnings, err
}
evalSpanTimer, ctxInnerEval := query.stats.GetSpanTimer(ctx, stats.InnerEvalTime, ng.metrics.queryInnerEval) evalSpanTimer, ctxInnerEval := query.stats.GetSpanTimer(ctx, stats.InnerEvalTime, ng.metrics.queryInnerEval)
// Instant evaluation. This is executed as a range evaluation with one step. // Instant evaluation. This is executed as a range evaluation with one step.
if s.Start == s.End && s.Interval == 0 { if s.Start == s.End && s.Interval == 0 {
@ -539,7 +535,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
lookbackDelta: ng.lookbackDelta, lookbackDelta: ng.lookbackDelta,
} }
val, err := evaluator.Eval(s.Expr) val, warnings, err := evaluator.Eval(s.Expr)
if err != nil { if err != nil {
return nil, warnings, err return nil, warnings, err
} }
@ -588,7 +584,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
logger: ng.logger, logger: ng.logger,
lookbackDelta: ng.lookbackDelta, lookbackDelta: ng.lookbackDelta,
} }
val, err := evaluator.Eval(s.Expr) val, warnings, err := evaluator.Eval(s.Expr)
if err != nil { if err != nil {
return nil, warnings, err return nil, warnings, err
} }
@ -649,19 +645,15 @@ func (ng *Engine) findMinTime(s *parser.EvalStmt) time.Time {
return s.Start.Add(-maxOffset) return s.Start.Add(-maxOffset)
} }
func (ng *Engine) populateSeries(ctx context.Context, querier storage.Querier, s *parser.EvalStmt) (storage.Warnings, error) { func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) {
var (
// Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range. // Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range.
// The evaluation of the VectorSelector inside then evaluates the given range and unsets // The evaluation of the VectorSelector inside then evaluates the given range and unsets
// the variable. // the variable.
evalRange time.Duration var evalRange time.Duration
warnings storage.Warnings
err error
)
parser.Inspect(s.Expr, func(node parser.Node, path []parser.Node) error { parser.Inspect(s.Expr, func(node parser.Node, path []parser.Node) error {
var set storage.SeriesSet switch n := node.(type) {
var wrn storage.Warnings case *parser.VectorSelector:
hints := &storage.SelectHints{ hints := &storage.SelectHints{
Start: timestamp.FromTime(s.Start), Start: timestamp.FromTime(s.Start),
End: timestamp.FromTime(s.End), End: timestamp.FromTime(s.End),
@ -676,8 +668,6 @@ func (ng *Engine) populateSeries(ctx context.Context, querier storage.Querier, s
offsetMilliseconds := durationMilliseconds(subqOffset) offsetMilliseconds := durationMilliseconds(subqOffset)
hints.Start = hints.Start - offsetMilliseconds hints.Start = hints.Start - offsetMilliseconds
switch n := node.(type) {
case *parser.VectorSelector:
if evalRange == 0 { if evalRange == 0 {
hints.Start = hints.Start - durationMilliseconds(ng.lookbackDelta) hints.Start = hints.Start - durationMilliseconds(ng.lookbackDelta)
} else { } else {
@ -696,20 +686,12 @@ func (ng *Engine) populateSeries(ctx context.Context, querier storage.Querier, s
hints.End = hints.End - offsetMilliseconds hints.End = hints.End - offsetMilliseconds
} }
set, wrn, err = querier.Select(false, hints, n.LabelMatchers...) n.UnexpandedSeriesSet = querier.Select(false, hints, n.LabelMatchers...)
warnings = append(warnings, wrn...)
if err != nil {
level.Error(ng.logger).Log("msg", "error selecting series set", "err", err)
return err
}
n.UnexpandedSeriesSet = set
case *parser.MatrixSelector: case *parser.MatrixSelector:
evalRange = n.Range evalRange = n.Range
} }
return nil return nil
}) })
return warnings, err
} }
// extractFuncFromPath walks up the path and searches for the first instance of // extractFuncFromPath walks up the path and searches for the first instance of
@ -743,34 +725,40 @@ func extractGroupsFromPath(p []parser.Node) (bool, []string) {
return false, nil return false, nil
} }
func checkForSeriesSetExpansion(ctx context.Context, expr parser.Expr) { func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (storage.Warnings, error) {
switch e := expr.(type) { switch e := expr.(type) {
case *parser.MatrixSelector: case *parser.MatrixSelector:
checkForSeriesSetExpansion(ctx, e.VectorSelector) return checkAndExpandSeriesSet(ctx, e.VectorSelector)
case *parser.VectorSelector: case *parser.VectorSelector:
if e.Series == nil { if e.Series != nil {
series, err := expandSeriesSet(ctx, e.UnexpandedSeriesSet) return nil, nil
if err != nil { }
panic(err) series, ws, err := expandSeriesSet(ctx, e.UnexpandedSeriesSet)
} else {
e.Series = series e.Series = series
return ws, err
} }
} return nil, nil
}
} }
func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.Series, err error) { func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.Series, ws storage.Warnings, err error) {
for it.Next() { for it.Next() {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return nil, ctx.Err() return nil, nil, ctx.Err()
default: default:
} }
res = append(res, it.At()) res = append(res, it.At())
} }
return res, it.Err() return res, it.Warnings(), it.Err()
} }
type errWithWarnings struct {
err error
warnings storage.Warnings
}
func (e errWithWarnings) Error() string { return e.err.Error() }
// An evaluator evaluates given expressions over given fixed timestamps. It // An evaluator evaluates given expressions over given fixed timestamps. It
// is attached to an engine through which it connects to a querier and reports // is attached to an engine through which it connects to a querier and reports
// errors. On timeout or cancellation of its context it terminates. // errors. On timeout or cancellation of its context it terminates.
@ -799,26 +787,33 @@ func (ev *evaluator) error(err error) {
} }
// recover is the handler that turns panics into returns from the top level of evaluation. // recover is the handler that turns panics into returns from the top level of evaluation.
func (ev *evaluator) recover(errp *error) { func (ev *evaluator) recover(ws *storage.Warnings, errp *error) {
e := recover() e := recover()
if e == nil { if e == nil {
return return
} }
if err, ok := e.(runtime.Error); ok {
switch err := e.(type) {
case runtime.Error:
// Print the stack trace but do not inhibit the running application. // Print the stack trace but do not inhibit the running application.
buf := make([]byte, 64<<10) buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)] buf = buf[:runtime.Stack(buf, false)]
level.Error(ev.logger).Log("msg", "runtime panic in parser", "err", e, "stacktrace", string(buf)) level.Error(ev.logger).Log("msg", "runtime panic in parser", "err", e, "stacktrace", string(buf))
*errp = errors.Wrap(err, "unexpected error") *errp = errors.Wrap(err, "unexpected error")
} else { case errWithWarnings:
*errp = err.err
*ws = append(*ws, err.warnings...)
default:
*errp = e.(error) *errp = e.(error)
} }
} }
func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, err error) { func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws storage.Warnings, err error) {
defer ev.recover(&err) defer ev.recover(&ws, &err)
return ev.eval(expr), nil
v, ws = ev.eval(expr)
return v, ws, nil
} }
// EvalNodeHelper stores extra information and caches for evaluating a single node across steps. // EvalNodeHelper stores extra information and caches for evaluating a single node across steps.
@ -884,17 +879,20 @@ func (enh *EvalNodeHelper) signatureFunc(on bool, names ...string) func(labels.L
// the given function with the values computed for each expression at that // the given function with the values computed for each expression at that
// step. The return value is the combination into time series of all the // step. The return value is the combination into time series of all the
// function call results. // function call results.
func (ev *evaluator) rangeEval(f func([]parser.Value, *EvalNodeHelper) Vector, exprs ...parser.Expr) Matrix { func (ev *evaluator) rangeEval(f func([]parser.Value, *EvalNodeHelper) (Vector, storage.Warnings), exprs ...parser.Expr) (Matrix, storage.Warnings) {
numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
matrixes := make([]Matrix, len(exprs)) matrixes := make([]Matrix, len(exprs))
origMatrixes := make([]Matrix, len(exprs)) origMatrixes := make([]Matrix, len(exprs))
originalNumSamples := ev.currentSamples originalNumSamples := ev.currentSamples
var warnings storage.Warnings
for i, e := range exprs { for i, e := range exprs {
// Functions will take string arguments from the expressions, not the values. // Functions will take string arguments from the expressions, not the values.
if e != nil && e.Type() != parser.ValueTypeString { if e != nil && e.Type() != parser.ValueTypeString {
// ev.currentSamples will be updated to the correct value within the ev.eval call. // ev.currentSamples will be updated to the correct value within the ev.eval call.
matrixes[i] = ev.eval(e).(Matrix) val, ws := ev.eval(e)
warnings = append(warnings, ws...)
matrixes[i] = val.(Matrix)
// Keep a copy of the original point slices so that they // Keep a copy of the original point slices so that they
// can be returned to the pool. // can be returned to the pool.
@ -946,11 +944,12 @@ func (ev *evaluator) rangeEval(f func([]parser.Value, *EvalNodeHelper) Vector, e
} }
// Make the function call. // Make the function call.
enh.ts = ts enh.ts = ts
result := f(args, enh) result, ws := f(args, enh)
if result.ContainsSameLabelset() { if result.ContainsSameLabelset() {
ev.errorf("vector cannot contain metrics with the same labelset") ev.errorf("vector cannot contain metrics with the same labelset")
} }
enh.out = result[:0] // Reuse result vector. enh.out = result[:0] // Reuse result vector.
warnings = append(warnings, ws...)
ev.currentSamples += len(result) ev.currentSamples += len(result)
// When we reset currentSamples to tempNumSamples during the next iteration of the loop it also // When we reset currentSamples to tempNumSamples during the next iteration of the loop it also
@ -969,7 +968,7 @@ func (ev *evaluator) rangeEval(f func([]parser.Value, *EvalNodeHelper) Vector, e
mat[i] = Series{Metric: s.Metric, Points: []Point{s.Point}} mat[i] = Series{Metric: s.Metric, Points: []Point{s.Point}}
} }
ev.currentSamples = originalNumSamples + mat.TotalSamples() ev.currentSamples = originalNumSamples + mat.TotalSamples()
return mat return mat, warnings
} }
// Add samples in output vector to output series. // Add samples in output vector to output series.
@ -1001,29 +1000,30 @@ func (ev *evaluator) rangeEval(f func([]parser.Value, *EvalNodeHelper) Vector, e
mat = append(mat, ss) mat = append(mat, ss)
} }
ev.currentSamples = originalNumSamples + mat.TotalSamples() ev.currentSamples = originalNumSamples + mat.TotalSamples()
return mat return mat, warnings
} }
// evalSubquery evaluates given SubqueryExpr and returns an equivalent // evalSubquery evaluates given SubqueryExpr and returns an equivalent
// evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set. // evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set.
func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) *parser.MatrixSelector { func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, storage.Warnings) {
val := ev.eval(subq).(Matrix) val, ws := ev.eval(subq)
mat := val.(Matrix)
vs := &parser.VectorSelector{ vs := &parser.VectorSelector{
Offset: subq.Offset, Offset: subq.Offset,
Series: make([]storage.Series, 0, len(val)), Series: make([]storage.Series, 0, len(mat)),
} }
ms := &parser.MatrixSelector{ ms := &parser.MatrixSelector{
Range: subq.Range, Range: subq.Range,
VectorSelector: vs, VectorSelector: vs,
} }
for _, s := range val { for _, s := range mat {
vs.Series = append(vs.Series, NewStorageSeries(s)) vs.Series = append(vs.Series, NewStorageSeries(s))
} }
return ms return ms, ws
} }
// eval evaluates the given expression as the given AST expression node requires. // eval evaluates the given expression as the given AST expression node requires.
func (ev *evaluator) eval(expr parser.Expr) parser.Value { func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
// This is the top-level evaluation method. // This is the top-level evaluation method.
// Thus, we check for timeout/cancellation here. // Thus, we check for timeout/cancellation here.
if err := contextDone(ev.ctx, "expression evaluation"); err != nil { if err := contextDone(ev.ctx, "expression evaluation"); err != nil {
@ -1035,16 +1035,16 @@ func (ev *evaluator) eval(expr parser.Expr) parser.Value {
case *parser.AggregateExpr: case *parser.AggregateExpr:
unwrapParenExpr(&e.Param) unwrapParenExpr(&e.Param)
if s, ok := e.Param.(*parser.StringLiteral); ok { if s, ok := e.Param.(*parser.StringLiteral); ok {
return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.aggregation(e.Op, e.Grouping, e.Without, s.Val, v[0].(Vector), enh) return ev.aggregation(e.Op, e.Grouping, e.Without, s.Val, v[0].(Vector), enh), nil
}, e.Expr) }, e.Expr)
} }
return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
var param float64 var param float64
if e.Param != nil { if e.Param != nil {
param = v[0].(Vector)[0].V param = v[0].(Vector)[0].V
} }
return ev.aggregation(e.Op, e.Grouping, e.Without, param, v[1].(Vector), enh) return ev.aggregation(e.Op, e.Grouping, e.Without, param, v[1].(Vector), enh), nil
}, e.Param, e.Expr) }, e.Param, e.Expr)
case *parser.Call: case *parser.Call:
@ -1056,15 +1056,19 @@ func (ev *evaluator) eval(expr parser.Expr) parser.Value {
// a vector selector. // a vector selector.
vs, ok := e.Args[0].(*parser.VectorSelector) vs, ok := e.Args[0].(*parser.VectorSelector)
if ok { if ok {
return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return call([]parser.Value{ev.vectorSelector(vs, enh.ts)}, e.Args, enh) val, ws := ev.vectorSelector(vs, enh.ts)
return call([]parser.Value{val}, e.Args, enh), ws
}) })
} }
} }
// Check if the function has a matrix argument. // Check if the function has a matrix argument.
var matrixArgIndex int var (
var matrixArg bool matrixArgIndex int
matrixArg bool
warnings storage.Warnings
)
for i := range e.Args { for i := range e.Args {
unwrapParenExpr(&e.Args[i]) unwrapParenExpr(&e.Args[i])
a := e.Args[i] a := e.Args[i]
@ -1078,14 +1082,16 @@ func (ev *evaluator) eval(expr parser.Expr) parser.Value {
matrixArgIndex = i matrixArgIndex = i
matrixArg = true matrixArg = true
// Replacing parser.SubqueryExpr with parser.MatrixSelector. // Replacing parser.SubqueryExpr with parser.MatrixSelector.
e.Args[i] = ev.evalSubquery(subq) val, ws := ev.evalSubquery(subq)
e.Args[i] = val
warnings = append(warnings, ws...)
break break
} }
} }
if !matrixArg { if !matrixArg {
// Does not have a matrix argument. // Does not have a matrix argument.
return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return call(v, e.Args, enh) return call(v, e.Args, enh), warnings
}, e.Args...) }, e.Args...)
} }
@ -1095,16 +1101,22 @@ func (ev *evaluator) eval(expr parser.Expr) parser.Value {
otherInArgs := make([]Vector, len(e.Args)) otherInArgs := make([]Vector, len(e.Args))
for i, e := range e.Args { for i, e := range e.Args {
if i != matrixArgIndex { if i != matrixArgIndex {
otherArgs[i] = ev.eval(e).(Matrix) val, ws := ev.eval(e)
otherArgs[i] = val.(Matrix)
otherInArgs[i] = Vector{Sample{}} otherInArgs[i] = Vector{Sample{}}
inArgs[i] = otherInArgs[i] inArgs[i] = otherInArgs[i]
warnings = append(warnings, ws...)
} }
} }
sel := e.Args[matrixArgIndex].(*parser.MatrixSelector) sel := e.Args[matrixArgIndex].(*parser.MatrixSelector)
selVS := sel.VectorSelector.(*parser.VectorSelector) selVS := sel.VectorSelector.(*parser.VectorSelector)
checkForSeriesSetExpansion(ev.ctx, sel) ws, err := checkAndExpandSeriesSet(ev.ctx, sel)
warnings = append(warnings, ws...)
if err != nil {
ev.error(errWithWarnings{errors.Wrap(err, "expanding series"), warnings})
}
mat := make(Matrix, 0, len(selVS.Series)) // Output matrix. mat := make(Matrix, 0, len(selVS.Series)) // Output matrix.
offset := durationMilliseconds(selVS.Offset) offset := durationMilliseconds(selVS.Offset)
selRange := durationMilliseconds(sel.Range) selRange := durationMilliseconds(sel.Range)
@ -1182,7 +1194,7 @@ func (ev *evaluator) eval(expr parser.Expr) parser.Value {
// Iterate once to look for a complete series. // Iterate once to look for a complete series.
for _, s := range mat { for _, s := range mat {
if len(s.Points) == steps { if len(s.Points) == steps {
return Matrix{} return Matrix{}, warnings
} }
} }
@ -1193,7 +1205,7 @@ func (ev *evaluator) eval(expr parser.Expr) parser.Value {
found[p.T] = struct{}{} found[p.T] = struct{}{}
} }
if i > 0 && len(found) == steps { if i > 0 && len(found) == steps {
return Matrix{} return Matrix{}, warnings
} }
} }
@ -1209,20 +1221,21 @@ func (ev *evaluator) eval(expr parser.Expr) parser.Value {
Metric: createLabelsForAbsentFunction(e.Args[0]), Metric: createLabelsForAbsentFunction(e.Args[0]),
Points: newp, Points: newp,
}, },
} }, warnings
} }
if mat.ContainsSameLabelset() { if mat.ContainsSameLabelset() {
ev.errorf("vector cannot contain metrics with the same labelset") ev.errorf("vector cannot contain metrics with the same labelset")
} }
return mat return mat, warnings
case *parser.ParenExpr: case *parser.ParenExpr:
return ev.eval(e.Expr) return ev.eval(e.Expr)
case *parser.UnaryExpr: case *parser.UnaryExpr:
mat := ev.eval(e.Expr).(Matrix) val, ws := ev.eval(e.Expr)
mat := val.(Matrix)
if e.Op == parser.SUB { if e.Op == parser.SUB {
for i := range mat { for i := range mat {
mat[i].Metric = dropMetricName(mat[i].Metric) mat[i].Metric = dropMetricName(mat[i].Metric)
@ -1234,53 +1247,56 @@ func (ev *evaluator) eval(expr parser.Expr) parser.Value {
ev.errorf("vector cannot contain metrics with the same labelset") ev.errorf("vector cannot contain metrics with the same labelset")
} }
} }
return mat return mat, ws
case *parser.BinaryExpr: case *parser.BinaryExpr:
switch lt, rt := e.LHS.Type(), e.RHS.Type(); { switch lt, rt := e.LHS.Type(), e.RHS.Type(); {
case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar:
return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
val := scalarBinop(e.Op, v[0].(Vector)[0].Point.V, v[1].(Vector)[0].Point.V) val := scalarBinop(e.Op, v[0].(Vector)[0].Point.V, v[1].(Vector)[0].Point.V)
return append(enh.out, Sample{Point: Point{V: val}}) return append(enh.out, Sample{Point: Point{V: val}}), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
case lt == parser.ValueTypeVector && rt == parser.ValueTypeVector: case lt == parser.ValueTypeVector && rt == parser.ValueTypeVector:
switch e.Op { switch e.Op {
case parser.LAND: case parser.LAND:
return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh) return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
case parser.LOR: case parser.LOR:
return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh) return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
case parser.LUNLESS: case parser.LUNLESS:
return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh) return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
default: default:
return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, enh) return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, enh), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
} }
case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar:
return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].Point.V}, false, e.ReturnBool, enh) return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].Point.V}, false, e.ReturnBool, enh), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector:
return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].Point.V}, true, e.ReturnBool, enh) return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].Point.V}, true, e.ReturnBool, enh), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
} }
case *parser.NumberLiteral: case *parser.NumberLiteral:
return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) Vector { return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) {
return append(enh.out, Sample{Point: Point{V: e.Val}}) return append(enh.out, Sample{Point: Point{V: e.Val}}), nil
}) })
case *parser.VectorSelector: case *parser.VectorSelector:
checkForSeriesSetExpansion(ev.ctx, e) ws, err := checkAndExpandSeriesSet(ev.ctx, e)
if err != nil {
ev.error(errWithWarnings{errors.Wrap(err, "expanding series"), ws})
}
mat := make(Matrix, 0, len(e.Series)) mat := make(Matrix, 0, len(e.Series))
it := storage.NewBuffer(durationMilliseconds(ev.lookbackDelta)) it := storage.NewBuffer(durationMilliseconds(ev.lookbackDelta))
for i, s := range e.Series { for i, s := range e.Series {
@ -1307,9 +1323,8 @@ func (ev *evaluator) eval(expr parser.Expr) parser.Value {
} else { } else {
putPointSlice(ss.Points) putPointSlice(ss.Points)
} }
} }
return mat return mat, ws
case *parser.MatrixSelector: case *parser.MatrixSelector:
if ev.startTimestamp != ev.endTimestamp { if ev.startTimestamp != ev.endTimestamp {
@ -1342,11 +1357,11 @@ func (ev *evaluator) eval(expr parser.Expr) parser.Value {
newEv.startTimestamp += newEv.interval newEv.startTimestamp += newEv.interval
} }
res := newEv.eval(e.Expr) res, ws := newEv.eval(e.Expr)
ev.currentSamples = newEv.currentSamples ev.currentSamples = newEv.currentSamples
return res return res, ws
case *parser.StringLiteral: case *parser.StringLiteral:
return String{V: e.Val, T: ev.startTimestamp} return String{V: e.Val, T: ev.startTimestamp}, nil
} }
panic(errors.Errorf("unhandled expression of type: %T", expr)) panic(errors.Errorf("unhandled expression of type: %T", expr))
@ -1357,13 +1372,12 @@ func durationToInt64Millis(d time.Duration) int64 {
} }
// vectorSelector evaluates a *parser.VectorSelector expression. // vectorSelector evaluates a *parser.VectorSelector expression.
func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) Vector { func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vector, storage.Warnings) {
checkForSeriesSetExpansion(ev.ctx, node) ws, err := checkAndExpandSeriesSet(ev.ctx, node)
if err != nil {
var ( ev.error(errWithWarnings{errors.Wrap(err, "expanding series"), ws})
vec = make(Vector, 0, len(node.Series)) }
) vec := make(Vector, 0, len(node.Series))
it := storage.NewBuffer(durationMilliseconds(ev.lookbackDelta)) it := storage.NewBuffer(durationMilliseconds(ev.lookbackDelta))
for i, s := range node.Series { for i, s := range node.Series {
it.Reset(s.Iterator()) it.Reset(s.Iterator())
@ -1381,7 +1395,7 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) Vecto
ev.error(ErrTooManySamples(env)) ev.error(ErrTooManySamples(env))
} }
} }
return vec return vec, ws
} }
// vectorSelectorSingle evaluates a instant vector for the iterator of one time series. // vectorSelectorSingle evaluates a instant vector for the iterator of one time series.
@ -1429,21 +1443,23 @@ func putPointSlice(p []Point) {
} }
// matrixSelector evaluates a *parser.MatrixSelector expression. // matrixSelector evaluates a *parser.MatrixSelector expression.
func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) Matrix { func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storage.Warnings) {
checkForSeriesSetExpansion(ev.ctx, node)
vs := node.VectorSelector.(*parser.VectorSelector)
var ( var (
vs = node.VectorSelector.(*parser.VectorSelector)
offset = durationMilliseconds(vs.Offset) offset = durationMilliseconds(vs.Offset)
maxt = ev.startTimestamp - offset maxt = ev.startTimestamp - offset
mint = maxt - durationMilliseconds(node.Range) mint = maxt - durationMilliseconds(node.Range)
matrix = make(Matrix, 0, len(vs.Series)) matrix = make(Matrix, 0, len(vs.Series))
it = storage.NewBuffer(durationMilliseconds(node.Range))
) )
ws, err := checkAndExpandSeriesSet(ev.ctx, node)
if err != nil {
ev.error(errWithWarnings{errors.Wrap(err, "expanding series"), ws})
}
it := storage.NewBuffer(durationMilliseconds(node.Range))
series := vs.Series series := vs.Series
for i, s := range series { for i, s := range series {
if err := contextDone(ev.ctx, "expression evaluation"); err != nil { if err := contextDone(ev.ctx, "expression evaluation"); err != nil {
ev.error(err) ev.error(err)
@ -1461,7 +1477,7 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) Matrix {
putPointSlice(ss.Points) putPointSlice(ss.Points)
} }
} }
return matrix return matrix, ws
} }
// matrixIterSlice populates a matrix vector covering the requested range for a // matrixIterSlice populates a matrix vector covering the requested range for a

View file

@ -19,7 +19,6 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"sort" "sort"
"strings"
"testing" "testing"
"time" "time"
@ -117,9 +116,7 @@ func TestQueryTimeout(t *testing.T) {
testutil.NotOk(t, res.Err, "expected timeout error but got none") testutil.NotOk(t, res.Err, "expected timeout error but got none")
var e ErrQueryTimeout var e ErrQueryTimeout
// TODO: when circleci-windows moves to go 1.13: testutil.Assert(t, errors.As(res.Err, &e), "expected timeout error but got: %s", res.Err)
// testutil.Assert(t, errors.As(res.Err, &e), "expected timeout error but got: %s", res.Err)
testutil.Assert(t, strings.HasPrefix(res.Err.Error(), e.Error()), "expected timeout error but got: %s", res.Err)
} }
const errQueryCanceled = ErrQueryCanceled("test statement execution") const errQueryCanceled = ErrQueryCanceled("test statement execution")
@ -175,8 +172,8 @@ type errQuerier struct {
err error err error
} }
func (q *errQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { func (q *errQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet {
return errSeriesSet{err: q.err}, nil, q.err return errSeriesSet{err: q.err}
} }
func (*errQuerier) LabelValues(string) ([]string, storage.Warnings, error) { return nil, nil, nil } func (*errQuerier) LabelValues(string) ([]string, storage.Warnings, error) { return nil, nil, nil }
func (*errQuerier) LabelNames() ([]string, storage.Warnings, error) { return nil, nil, nil } func (*errQuerier) LabelNames() ([]string, storage.Warnings, error) { return nil, nil, nil }
@ -190,6 +187,7 @@ type errSeriesSet struct {
func (errSeriesSet) Next() bool { return false } func (errSeriesSet) Next() bool { return false }
func (errSeriesSet) At() storage.Series { return nil } func (errSeriesSet) At() storage.Series { return nil }
func (e errSeriesSet) Err() error { return e.err } func (e errSeriesSet) Err() error { return e.err }
func (e errSeriesSet) Warnings() storage.Warnings { return nil }
func TestQueryError(t *testing.T) { func TestQueryError(t *testing.T) {
opts := EngineOpts{ opts := EngineOpts{
@ -211,14 +209,14 @@ func TestQueryError(t *testing.T) {
res := vectorQuery.Exec(ctx) res := vectorQuery.Exec(ctx)
testutil.NotOk(t, res.Err, "expected error on failed select but got none") testutil.NotOk(t, res.Err, "expected error on failed select but got none")
testutil.Equals(t, errStorage, res.Err) testutil.Assert(t, errors.Is(res.Err, errStorage), "expected error doesn't match")
matrixQuery, err := engine.NewInstantQuery(queryable, "foo[1m]", time.Unix(1, 0)) matrixQuery, err := engine.NewInstantQuery(queryable, "foo[1m]", time.Unix(1, 0))
testutil.Ok(t, err) testutil.Ok(t, err)
res = matrixQuery.Exec(ctx) res = matrixQuery.Exec(ctx)
testutil.NotOk(t, res.Err, "expected error on failed select but got none") testutil.NotOk(t, res.Err, "expected error on failed select but got none")
testutil.Equals(t, errStorage, res.Err) testutil.Assert(t, errors.Is(res.Err, errStorage), "expected error doesn't match")
} }
// hintCheckerQuerier implements storage.Querier which checks the start and end times // hintCheckerQuerier implements storage.Querier which checks the start and end times
@ -234,7 +232,7 @@ type hintCheckerQuerier struct {
t *testing.T t *testing.T
} }
func (q *hintCheckerQuerier) Select(_ bool, sp *storage.SelectHints, _ ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { func (q *hintCheckerQuerier) Select(_ bool, sp *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet {
testutil.Equals(q.t, q.start, sp.Start) testutil.Equals(q.t, q.start, sp.Start)
testutil.Equals(q.t, q.end, sp.End) testutil.Equals(q.t, q.end, sp.End)
testutil.Equals(q.t, q.grouping, sp.Grouping) testutil.Equals(q.t, q.grouping, sp.Grouping)
@ -242,7 +240,7 @@ func (q *hintCheckerQuerier) Select(_ bool, sp *storage.SelectHints, _ ...*label
testutil.Equals(q.t, q.selRange, sp.Range) testutil.Equals(q.t, q.selRange, sp.Range)
testutil.Equals(q.t, q.function, sp.Func) testutil.Equals(q.t, q.function, sp.Func)
return errSeriesSet{err: nil}, nil, nil return errSeriesSet{err: nil}
} }
func (*hintCheckerQuerier) LabelValues(string) ([]string, storage.Warnings, error) { func (*hintCheckerQuerier) LabelValues(string) ([]string, storage.Warnings, error) {
return nil, nil, nil return nil, nil, nil
@ -499,9 +497,7 @@ func TestEngineShutdown(t *testing.T) {
testutil.NotOk(t, res2.Err, "expected error on querying with canceled context but got none") testutil.NotOk(t, res2.Err, "expected error on querying with canceled context but got none")
var e ErrQueryCanceled var e ErrQueryCanceled
// TODO: when circleci-windows moves to go 1.13: testutil.Assert(t, errors.As(res2.Err, &e), "expected cancellation error but got: %s", res2.Err)
// testutil.Assert(t, errors.As(res2.Err, &e), "expected cancellation error but got: %s", res2.Err)
testutil.Assert(t, strings.HasPrefix(res2.Err.Error(), e.Error()), "expected cancellation error but got: %s", res2.Err)
} }
func TestEngineEvalStmtTimestamps(t *testing.T) { func TestEngineEvalStmtTimestamps(t *testing.T) {
@ -599,9 +595,8 @@ load 10s
} }
testutil.Ok(t, res.Err) testutil.Ok(t, res.Err)
testutil.Equals(t, c.Result, res.Value) testutil.Equals(t, c.Result, res.Value, "query %q failed", c.Query)
} }
} }
func TestMaxQuerySamples(t *testing.T) { func TestMaxQuerySamples(t *testing.T) {
@ -831,7 +826,7 @@ load 10s
res := qry.Exec(test.Context()) res := qry.Exec(test.Context())
testutil.Equals(t, c.Result.Err, res.Err) testutil.Equals(t, c.Result.Err, res.Err)
testutil.Equals(t, c.Result.Value, res.Value) testutil.Equals(t, c.Result.Value, res.Value, "query %q failed", c.Query)
} }
} }
@ -839,7 +834,7 @@ func TestRecoverEvaluatorRuntime(t *testing.T) {
ev := &evaluator{logger: log.NewNopLogger()} ev := &evaluator{logger: log.NewNopLogger()}
var err error var err error
defer ev.recover(&err) defer ev.recover(nil, &err)
// Cause a runtime panic. // Cause a runtime panic.
var a []int var a []int
@ -862,7 +857,31 @@ func TestRecoverEvaluatorError(t *testing.T) {
t.Fatalf("wrong error message: %q, expected %q", err, e) t.Fatalf("wrong error message: %q, expected %q", err, e)
} }
}() }()
defer ev.recover(&err) defer ev.recover(nil, &err)
panic(e)
}
func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) {
ev := &evaluator{logger: log.NewNopLogger()}
var err error
var ws storage.Warnings
warnings := storage.Warnings{errors.New("custom warning")}
e := errWithWarnings{
err: errors.New("custom error"),
warnings: warnings,
}
defer func() {
if err.Error() != e.Error() {
t.Fatalf("wrong error message: %q, expected %q", err, e)
}
if len(ws) != len(warnings) && ws[0] != warnings[0] {
t.Fatalf("wrong warning message: %q, expected %q", ws[0], warnings[0])
}
}()
defer ev.recover(&ws, &err)
panic(e) panic(e)
} }

View file

@ -168,7 +168,7 @@ func coalesceBuckets(buckets buckets) buckets {
func ensureMonotonic(buckets buckets) { func ensureMonotonic(buckets buckets) {
max := buckets[0].count max := buckets[0].count
for i := range buckets[1:] { for i := 1; i < len(buckets); i++ {
switch { switch {
case buckets[i].count > max: case buckets[i].count > max:
max = buckets[i].count max = buckets[i].count

View file

@ -133,8 +133,7 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
} }
// Get the series for the matcher. // Get the series for the matcher.
ss, _, err := querier.Select(false, nil, matchers...) ss := querier.Select(false, nil, matchers...)
testutil.Ok(t, err)
testutil.Assert(t, ss.Next(), "") testutil.Assert(t, ss.Next(), "")
storageSeries := ss.At() storageSeries := ss.At()
testutil.Assert(t, !ss.Next(), "Expecting only 1 series") testutil.Assert(t, !ss.Next(), "Expecting only 1 series")

View file

@ -151,21 +151,25 @@ eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket
# A histogram with nonmonotonic bucket counts. This may happen when recording # A histogram with nonmonotonic bucket counts. This may happen when recording
# rule evaluation or federation races scrape ingestion, causing some buckets # rule evaluation or federation races scrape ingestion, causing some buckets
# counts to be derived from fewer samples. The wrong answer we want to avoid # counts to be derived from fewer samples.
# is for histogram_quantile(0.99, nonmonotonic_bucket) to return ~1000 instead
# of 1.
load 5m load 5m
nonmonotonic_bucket{le="0.1"} 0+1x10 nonmonotonic_bucket{le="0.1"} 0+2x10
nonmonotonic_bucket{le="1"} 0+9x10 nonmonotonic_bucket{le="1"} 0+1x10
nonmonotonic_bucket{le="10"} 0+8x10 nonmonotonic_bucket{le="10"} 0+5x10
nonmonotonic_bucket{le="100"} 0+8x10 nonmonotonic_bucket{le="100"} 0+4x10
nonmonotonic_bucket{le="1000"} 0+9x10 nonmonotonic_bucket{le="1000"} 0+9x10
nonmonotonic_bucket{le="+Inf"} 0+9x10 nonmonotonic_bucket{le="+Inf"} 0+8x10
# Nonmonotonic buckets # Nonmonotonic buckets
eval instant at 50m histogram_quantile(0.01, nonmonotonic_bucket)
{} 0.0045
eval instant at 50m histogram_quantile(0.5, nonmonotonic_bucket)
{} 8.5
eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket) eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket)
{} 0.989875 {} 979.75
# Buckets with different representations of the same upper bound. # Buckets with different representations of the same upper bound.
eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[5m])) eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[5m]))

View file

@ -707,12 +707,7 @@ func (g *Group) RestoreForState(ts time.Time) {
matchers = append(matchers, mt) matchers = append(matchers, mt)
} }
sset, err, _ := q.Select(false, nil, matchers...) sset := q.Select(false, nil, matchers...)
if err != nil {
level.Error(g.logger).Log("msg", "Failed to restore 'for' state",
labels.AlertName, alertRule.Name(), "stage", "Select", "err", err)
return
}
seriesFound := false seriesFound := false
var s storage.Series var s storage.Series
@ -727,6 +722,17 @@ func (g *Group) RestoreForState(ts time.Time) {
} }
} }
if err := sset.Err(); err != nil {
// Querier Warnings are ignored. We do not care unless we have an error.
level.Error(g.logger).Log(
"msg", "Failed to restore 'for' state",
labels.AlertName, alertRule.Name(),
"stage", "Select",
"err", err,
)
return
}
if !seriesFound { if !seriesFound {
return return
} }

View file

@ -563,9 +563,7 @@ func TestStaleness(t *testing.T) {
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a_plus_one") matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a_plus_one")
testutil.Ok(t, err) testutil.Ok(t, err)
set, _, err := querier.Select(false, nil, matcher) set := querier.Select(false, nil, matcher)
testutil.Ok(t, err)
samples, err := readSeriesSet(set) samples, err := readSeriesSet(set)
testutil.Ok(t, err) testutil.Ok(t, err)
@ -686,9 +684,7 @@ func TestDeletedRuleMarkedStale(t *testing.T) {
matcher, err := labels.NewMatcher(labels.MatchEqual, "l1", "v1") matcher, err := labels.NewMatcher(labels.MatchEqual, "l1", "v1")
testutil.Ok(t, err) testutil.Ok(t, err)
set, _, err := querier.Select(false, nil, matcher) set := querier.Select(false, nil, matcher)
testutil.Ok(t, err)
samples, err := readSeriesSet(set) samples, err := readSeriesSet(set)
testutil.Ok(t, err) testutil.Ok(t, err)
@ -1107,9 +1103,7 @@ func countStaleNaN(t *testing.T, st storage.Storage) int {
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_2") matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_2")
testutil.Ok(t, err) testutil.Ok(t, err)
set, _, err := querier.Select(false, nil, matcher) set := querier.Select(false, nil, matcher)
testutil.Ok(t, err)
samples, err := readSeriesSet(set) samples, err := readSeriesSet(set)
testutil.Ok(t, err) testutil.Ok(t, err)

View file

@ -1140,7 +1140,7 @@ loop:
if ok { if ok {
err = app.AddFast(ce.ref, t, v) err = app.AddFast(ce.ref, t, v)
sampleAdded, err = sl.checkAddError(ce, met, tp, err, &sampleLimitErr, &appErrs) _, err = sl.checkAddError(ce, met, tp, err, &sampleLimitErr, &appErrs)
// In theory this should never happen. // In theory this should never happen.
if err == storage.ErrNotFound { if err == storage.ErrNotFound {
ok = false ok = false
@ -1187,10 +1187,10 @@ loop:
} }
} }
// Increment added even if there's a sampleLimitErr so we correctly report the number of samples scraped. // Increment added even if there's an error so we correctly report the
if sampleAdded || sampleLimitErr != nil { // number of samples remaining after relabelling.
added++ added++
}
} }
if sampleLimitErr != nil { if sampleLimitErr != nil {
if err == nil { if err == nil {
@ -1275,7 +1275,7 @@ const (
scrapeSeriesAddedMetricName = "scrape_series_added" + "\xff" scrapeSeriesAddedMetricName = "scrape_series_added" + "\xff"
) )
func (sl *scrapeLoop) report(start time.Time, duration time.Duration, scraped, appended, seriesAdded int, scrapeErr error) (err error) { func (sl *scrapeLoop) report(start time.Time, duration time.Duration, scraped, added, seriesAdded int, scrapeErr error) (err error) {
sl.scraper.Report(start, duration, scrapeErr) sl.scraper.Report(start, duration, scrapeErr)
ts := timestamp.FromTime(start) ts := timestamp.FromTime(start)
@ -1302,7 +1302,7 @@ func (sl *scrapeLoop) report(start time.Time, duration time.Duration, scraped, a
if err = sl.addReportSample(app, scrapeSamplesMetricName, ts, float64(scraped)); err != nil { if err = sl.addReportSample(app, scrapeSamplesMetricName, ts, float64(scraped)); err != nil {
return return
} }
if err = sl.addReportSample(app, samplesPostRelabelMetricName, ts, float64(appended)); err != nil { if err = sl.addReportSample(app, samplesPostRelabelMetricName, ts, float64(added)); err != nil {
return return
} }
if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, float64(seriesAdded)); err != nil { if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, float64(seriesAdded)); err != nil {

View file

@ -1340,7 +1340,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
} }
testutil.Equals(t, want, app.result, "Appended samples not as expected") testutil.Equals(t, want, app.result, "Appended samples not as expected")
testutil.Equals(t, 4, total) testutil.Equals(t, 4, total)
testutil.Equals(t, 1, added) testutil.Equals(t, 4, added)
testutil.Equals(t, 1, seriesAdded) testutil.Equals(t, 1, seriesAdded)
} }
@ -1365,7 +1365,7 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
now := time.Now().Add(20 * time.Minute) now := time.Now().Add(20 * time.Minute)
total, added, seriesAdded, err := sl.append([]byte("normal 1\n"), "", now) total, added, seriesAdded, err := sl.append([]byte("normal 1\n"), "", now)
testutil.Equals(t, 1, total) testutil.Equals(t, 1, total)
testutil.Equals(t, 0, added) testutil.Equals(t, 1, added)
testutil.Equals(t, 0, seriesAdded) testutil.Equals(t, 0, seriesAdded)
testutil.Ok(t, err) testutil.Ok(t, err)
@ -1617,9 +1617,9 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
q, err := s.Querier(ctx, time.Time{}.UnixNano(), 0) q, err := s.Querier(ctx, time.Time{}.UnixNano(), 0)
testutil.Ok(t, err) testutil.Ok(t, err)
series, _, err := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) series := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
testutil.Ok(t, err)
testutil.Equals(t, false, series.Next(), "series found in tsdb") testutil.Equals(t, false, series.Next(), "series found in tsdb")
testutil.Ok(t, series.Err())
// We add a good metric to check that it is recorded. // We add a good metric to check that it is recorded.
_, _, _, err = sl.append([]byte("test_metric{le=\"500\"} 1\n"), "", time.Time{}) _, _, _, err = sl.append([]byte("test_metric{le=\"500\"} 1\n"), "", time.Time{})
@ -1627,9 +1627,9 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
q, err = s.Querier(ctx, time.Time{}.UnixNano(), 0) q, err = s.Querier(ctx, time.Time{}.UnixNano(), 0)
testutil.Ok(t, err) testutil.Ok(t, err)
series, _, err = q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "le", "500")) series = q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "le", "500"))
testutil.Ok(t, err)
testutil.Equals(t, true, series.Next(), "series not found in tsdb") testutil.Equals(t, true, series.Next(), "series not found in tsdb")
testutil.Ok(t, series.Err())
testutil.Equals(t, false, series.Next(), "more than one series found in tsdb") testutil.Equals(t, false, series.Next(), "more than one series found in tsdb")
} }
@ -1663,9 +1663,9 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
q, err := s.Querier(ctx, time.Time{}.UnixNano(), 0) q, err := s.Querier(ctx, time.Time{}.UnixNano(), 0)
testutil.Ok(t, err) testutil.Ok(t, err)
series, _, err := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) series := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
testutil.Ok(t, err)
testutil.Equals(t, false, series.Next(), "series found in tsdb") testutil.Equals(t, false, series.Next(), "series found in tsdb")
testutil.Ok(t, series.Err())
} }
func TestReusableConfig(t *testing.T) { func TestReusableConfig(t *testing.T) {

View file

@ -7,10 +7,10 @@ set -uo pipefail
git_mail="prometheus-team@googlegroups.com" git_mail="prometheus-team@googlegroups.com"
git_user="prombot" git_user="prombot"
branch="makefile_common" branch="repo_sync"
commit_msg="makefile: update Makefile.common with newer version" commit_msg="Update common Prometheus files"
pr_title="Synchronize Makefile.common from prometheus/prometheus" pr_title="Synchronize common files from prometheus/prometheus"
pr_msg="Propagating changes from master Makefile.common located in prometheus/prometheus." pr_msg="Propagating changes from prometheus/prometheus default branch."
orgs="prometheus prometheus-community" orgs="prometheus prometheus-community"
GITHUB_TOKEN="${GITHUB_TOKEN:-}" GITHUB_TOKEN="${GITHUB_TOKEN:-}"
@ -19,11 +19,13 @@ if [ -z "${GITHUB_TOKEN}" ]; then
exit 1 exit 1
fi fi
# List of files that should be synced.
SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common"
# Go to the root of the repo # Go to the root of the repo
cd "$(git rev-parse --show-cdup)" || exit 1 cd "$(git rev-parse --show-cdup)" || exit 1
source_makefile="$(pwd)/Makefile.common" source_dir="$(pwd)"
source_checksum="$(sha256sum Makefile.common | cut -d' ' -f1)"
tmp_dir="$(mktemp -d)" tmp_dir="$(mktemp -d)"
trap 'rm -rf "${tmp_dir}"' EXIT trap 'rm -rf "${tmp_dir}"' EXIT
@ -51,18 +53,42 @@ post_pull_request() {
"https://api.github.com/repos/${1}/pulls" "https://api.github.com/repos/${1}/pulls"
} }
check_license() {
# Check to see if the input is an Apache license of some kind
echo "$1" | grep --quiet --no-messages --ignore-case 'Apache License'
}
process_repo() { process_repo() {
local org_repo="$1" local org_repo="$1"
echo -e "\e[32mAnalyzing '${org_repo}'\e[0m" echo -e "\e[32mAnalyzing '${org_repo}'\e[0m"
target_makefile="$(curl -s --fail "https://raw.githubusercontent.com/${org_repo}/master/Makefile.common")" local needs_update=()
if [ -z "${target_makefile}" ]; then for source_file in ${SYNC_FILES}; do
echo "Makefile.common doesn't exist in ${org_repo}" source_checksum="$(sha256sum "${source_dir}/${source_file}" | cut -d' ' -f1)"
return
target_file="$(curl -s --fail "https://raw.githubusercontent.com/${org_repo}/master/${source_file}")"
if [[ "${source_file}" == 'LICENSE' ]] && ! check_license "${target_file}" ; then
echo "LICENSE in ${org_repo} is not apache, skipping."
continue
fi fi
target_checksum="$(echo "${target_makefile}" | sha256sum | cut -d' ' -f1)" if [[ -z "${target_file}" ]]; then
echo "${source_file} doesn't exist in ${org_repo}"
if [[ "${source_file}" == 'CODE_OF_CONDUCT.md' ]] ; then
echo "CODE_OF_CONDUCT.md missing in ${org_repo}, force updating."
needs_update+=('CODE_OF_CONDUCT.md')
fi
continue
fi
target_checksum="$(echo "${target_file}" | sha256sum | cut -d' ' -f1)"
if [ "${source_checksum}" == "${target_checksum}" ]; then if [ "${source_checksum}" == "${target_checksum}" ]; then
echo "Makefile.common is already in sync." echo "${source_file} is already in sync."
continue
fi
needs_update+=("${source_file}")
done
if [[ "${#needs_update[@]}" -eq 0 ]] ; then
echo "No files need sync."
return return
fi fi
@ -71,8 +97,11 @@ process_repo() {
cd "${tmp_dir}/${org_repo}" || return 1 cd "${tmp_dir}/${org_repo}" || return 1
git checkout -b "${branch}" || return 1 git checkout -b "${branch}" || return 1
# Replace Makefile.common in target repo by one from prometheus/prometheus # Update the files in target repo by one from prometheus/prometheus.
cp -f "${source_makefile}" ./ for source_file in ${needs_update[@]}; do
cp -f "${source_dir}/${source_file}" "./${source_file}"
done
if [ -n "$(git status --porcelain)" ]; then if [ -n "$(git status --porcelain)" ]; then
git config user.email "${git_mail}" git config user.email "${git_mail}"
git config user.name "${git_user}" git config user.name "${git_user}"

View file

@ -17,7 +17,6 @@ import (
"math/rand" "math/rand"
"testing" "testing"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/testutil" "github.com/prometheus/prometheus/util/testutil"
) )
@ -105,7 +104,7 @@ func TestBufferedSeriesIterator(t *testing.T) {
testutil.Equals(t, ev, v, "value mismatch") testutil.Equals(t, ev, v, "value mismatch")
} }
it = NewBufferIterator(NewListSeriesIterator([]tsdbutil.Sample{ it = NewBufferIterator(NewListSeriesIterator(samples{
sample{t: 1, v: 2}, sample{t: 1, v: 2},
sample{t: 2, v: 3}, sample{t: 2, v: 3},
sample{t: 3, v: 4}, sample{t: 3, v: 4},

View file

@ -14,11 +14,12 @@
package storage package storage
import ( import (
"bytes"
"container/heap" "container/heap"
"context" "context"
"reflect"
"sort" "sort"
"strings" "strings"
"sync"
"github.com/go-kit/kit/log" "github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level" "github.com/go-kit/kit/log/level"
@ -37,8 +38,15 @@ type fanout struct {
secondaries []Storage secondaries []Storage
} }
// NewFanout returns a new fan-out Storage, which proxies reads and writes // NewFanout returns a new fanout Storage, which proxies reads and writes
// through to multiple underlying storages. // through to multiple underlying storages.
//
// The difference between primary and secondary Storage is only for read (Querier) path and it goes as follows:
// * If the primary querier returns an error, then any of the Querier operations will fail.
// * If any secondary querier returns an error the result from that queries is discarded. The overall operation will succeed,
// and the error from the secondary querier will be returned as a warning.
//
// NOTE: In the case of Prometheus, it treats all remote storages as secondary / best effort.
func NewFanout(logger log.Logger, primary Storage, secondaries ...Storage) Storage { func NewFanout(logger log.Logger, primary Storage, secondaries ...Storage) Storage {
return &fanout{ return &fanout{
logger: logger, logger: logger,
@ -56,8 +64,8 @@ func (f *fanout) StartTime() (int64, error) {
return int64(model.Latest), err return int64(model.Latest), err
} }
for _, storage := range f.secondaries { for _, s := range f.secondaries {
t, err := storage.StartTime() t, err := s.StartTime()
if err != nil { if err != nil {
return int64(model.Latest), err return int64(model.Latest), err
} }
@ -69,29 +77,49 @@ func (f *fanout) StartTime() (int64, error) {
} }
func (f *fanout) Querier(ctx context.Context, mint, maxt int64) (Querier, error) { func (f *fanout) Querier(ctx context.Context, mint, maxt int64) (Querier, error) {
queriers := make([]Querier, 0, 1+len(f.secondaries)) primary, err := f.primary.Querier(ctx, mint, maxt)
// Add primary querier.
primaryQuerier, err := f.primary.Querier(ctx, mint, maxt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
queriers = append(queriers, primaryQuerier)
// Add secondary queriers. secondaries := make([]Querier, 0, len(f.secondaries))
for _, storage := range f.secondaries { for _, storage := range f.secondaries {
querier, err := storage.Querier(ctx, mint, maxt) querier, err := storage.Querier(ctx, mint, maxt)
if err != nil { if err != nil {
for _, q := range queriers { // Close already open Queriers, append potential errors to returned error.
// TODO(bwplotka): Log error. errs := tsdb_errors.MultiError{err}
_ = q.Close() errs.Add(primary.Close())
for _, q := range secondaries {
errs.Add(q.Close())
} }
return nil, err return nil, errs.Err()
} }
queriers = append(queriers, querier) secondaries = append(secondaries, querier)
}
return NewMergeQuerier(primary, secondaries, ChainedSeriesMerge), nil
} }
return NewMergeQuerier(primaryQuerier, queriers, ChainedSeriesMerge), nil func (f *fanout) ChunkQuerier(ctx context.Context, mint, maxt int64) (ChunkQuerier, error) {
primary, err := f.primary.ChunkQuerier(ctx, mint, maxt)
if err != nil {
return nil, err
}
secondaries := make([]ChunkQuerier, 0, len(f.secondaries))
for _, storage := range f.secondaries {
querier, err := storage.ChunkQuerier(ctx, mint, maxt)
if err != nil {
// Close already open Queriers, append potential errors to returned error.
errs := tsdb_errors.MultiError{err}
errs.Add(primary.Close())
for _, q := range secondaries {
errs.Add(q.Close())
}
return nil, errs.Err()
}
secondaries = append(secondaries, querier)
}
return NewMergeChunkQuerier(primary, secondaries, NewCompactingChunkSeriesMerger(ChainedSeriesMerge)), nil
} }
func (f *fanout) Appender() Appender { func (f *fanout) Appender() Appender {
@ -109,18 +137,12 @@ func (f *fanout) Appender() Appender {
// Close closes the storage and all its underlying resources. // Close closes the storage and all its underlying resources.
func (f *fanout) Close() error { func (f *fanout) Close() error {
if err := f.primary.Close(); err != nil { errs := tsdb_errors.MultiError{}
return err errs.Add(f.primary.Close())
for _, s := range f.secondaries {
errs.Add(s.Close())
} }
return errs.Err()
// TODO return multiple errors?
var lastErr error
for _, storage := range f.secondaries {
if err := storage.Close(); err != nil {
lastErr = err
}
}
return lastErr
} }
// fanoutAppender implements Appender. // fanoutAppender implements Appender.
@ -188,153 +210,138 @@ func (f *fanoutAppender) Rollback() (err error) {
} }
type mergeGenericQuerier struct { type mergeGenericQuerier struct {
mergeFunc genericSeriesMergeFunc
primaryQuerier genericQuerier
queriers []genericQuerier queriers []genericQuerier
failedQueriers map[genericQuerier]struct{}
setQuerierMap map[genericSeriesSet]genericQuerier // mergeFn is used when we see series from different queriers Selects with the same labels.
mergeFn genericSeriesMergeFunc
} }
// NewMergeQuerier returns a new Querier that merges results of chkQuerierSeries queriers. // NewMergeQuerier returns a new Querier that merges results of given primary and slice of secondary queriers.
// NewMergeQuerier will return NoopQuerier if no queriers are passed to it // See NewFanout commentary to learn more about primary vs secondary differences.
// and will filter NoopQueriers from its arguments, in order to reduce overhead //
// when only one querier is passed. // In case of overlaps between the data given by primary + secondaries Selects, merge function will be used.
// The difference between primary and secondary is as follows: f the primaryQuerier returns an error, query fails. func NewMergeQuerier(primary Querier, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier {
// For secondaries it just return warnings. queriers := make([]genericQuerier, 0, len(secondaries)+1)
func NewMergeQuerier(primaryQuerier Querier, queriers []Querier, mergeFunc VerticalSeriesMergeFunc) Querier { if primary != nil {
filtered := make([]genericQuerier, 0, len(queriers)) queriers = append(queriers, newGenericQuerierFrom(primary))
for _, querier := range queriers { }
for _, querier := range secondaries {
if _, ok := querier.(noopQuerier); !ok && querier != nil { if _, ok := querier.(noopQuerier); !ok && querier != nil {
filtered = append(filtered, newGenericQuerierFrom(querier)) queriers = append(queriers, newSecondaryQuerierFrom(querier))
} }
} }
if len(filtered) == 0 {
return primaryQuerier
}
if primaryQuerier == nil && len(filtered) == 1 {
return &querierAdapter{filtered[0]}
}
return &querierAdapter{&mergeGenericQuerier{ return &querierAdapter{&mergeGenericQuerier{
mergeFunc: (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFunc}).Merge, mergeFn: (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFn}).Merge,
primaryQuerier: newGenericQuerierFrom(primaryQuerier), queriers: queriers,
queriers: filtered,
failedQueriers: make(map[genericQuerier]struct{}),
setQuerierMap: make(map[genericSeriesSet]genericQuerier),
}} }}
} }
// NewMergeChunkQuerier returns a new ChunkQuerier that merges results of chkQuerierSeries chunk queriers. // NewMergeChunkQuerier returns a new ChunkQuerier that merges results of given primary and slice of secondary chunk queriers.
// NewMergeChunkQuerier will return NoopChunkQuerier if no chunk queriers are passed to it, // See NewFanout commentary to learn more about primary vs secondary differences.
// and will filter NoopQuerieNoopChunkQuerierrs from its arguments, in order to reduce overhead //
// when only one chunk querier is passed. // In case of overlaps between the data given by primary + secondaries Selects, merge function will be used.
func NewMergeChunkQuerier(primaryQuerier ChunkQuerier, queriers []ChunkQuerier, merger VerticalChunkSeriesMergerFunc) ChunkQuerier { // TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670
filtered := make([]genericQuerier, 0, len(queriers)) func NewMergeChunkQuerier(primary ChunkQuerier, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier {
for _, querier := range queriers { queriers := make([]genericQuerier, 0, len(secondaries)+1)
if primary != nil {
queriers = append(queriers, newGenericQuerierFromChunk(primary))
}
for _, querier := range secondaries {
if _, ok := querier.(noopChunkQuerier); !ok && querier != nil { if _, ok := querier.(noopChunkQuerier); !ok && querier != nil {
filtered = append(filtered, newGenericQuerierFromChunk(querier)) queriers = append(queriers, newSecondaryQuerierFromChunk(querier))
} }
} }
if len(filtered) == 0 {
return primaryQuerier
}
if primaryQuerier == nil && len(filtered) == 1 {
return &chunkQuerierAdapter{filtered[0]}
}
return &chunkQuerierAdapter{&mergeGenericQuerier{ return &chunkQuerierAdapter{&mergeGenericQuerier{
mergeFunc: (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergerFunc: merger}).Merge, mergeFn: (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFn}).Merge,
primaryQuerier: newGenericQuerierFromChunk(primaryQuerier), queriers: queriers,
queriers: filtered,
failedQueriers: make(map[genericQuerier]struct{}),
setQuerierMap: make(map[genericSeriesSet]genericQuerier),
}} }}
} }
// Select returns a set of series that matches the given label matchers. // Select returns a set of series that matches the given label matchers.
func (q *mergeGenericQuerier) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) (genericSeriesSet, Warnings, error) { func (q *mergeGenericQuerier) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
if len(q.queriers) == 1 { if len(q.queriers) == 1 {
return q.queriers[0].Select(sortSeries, hints, matchers...) return q.queriers[0].Select(sortSeries, hints, matchers...)
} }
var ( var (
seriesSets = make([]genericSeriesSet, 0, len(q.queriers)) seriesSets = make([]genericSeriesSet, 0, len(q.queriers))
warnings Warnings wg sync.WaitGroup
priErr error seriesSetChan = make(chan genericSeriesSet)
) )
type queryResult struct {
qr genericQuerier
set genericSeriesSet
wrn Warnings
selectError error
}
queryResultChan := make(chan *queryResult)
// Schedule all Selects for all queriers we know about.
for _, querier := range q.queriers { for _, querier := range q.queriers {
wg.Add(1)
go func(qr genericQuerier) { go func(qr genericQuerier) {
defer wg.Done()
// We need to sort for NewMergeSeriesSet to work. // We need to sort for NewMergeSeriesSet to work.
set, wrn, err := qr.Select(true, hints, matchers...) seriesSetChan <- qr.Select(true, hints, matchers...)
queryResultChan <- &queryResult{qr: qr, set: set, wrn: wrn, selectError: err}
}(querier) }(querier)
} }
for i := 0; i < len(q.queriers); i++ { go func() {
qryResult := <-queryResultChan wg.Wait()
q.setQuerierMap[qryResult.set] = qryResult.qr close(seriesSetChan)
if qryResult.wrn != nil { }()
warnings = append(warnings, qryResult.wrn...)
for r := range seriesSetChan {
seriesSets = append(seriesSets, r)
} }
if qryResult.selectError != nil { return &lazySeriesSet{create: create(seriesSets, q.mergeFn)}
q.failedQueriers[qryResult.qr] = struct{}{}
// If the error source isn't the primary querier, return the error as a warning and continue.
if !reflect.DeepEqual(qryResult.qr, q.primaryQuerier) {
warnings = append(warnings, qryResult.selectError)
} else {
priErr = qryResult.selectError
} }
func create(seriesSets []genericSeriesSet, mergeFunc genericSeriesMergeFunc) func() (genericSeriesSet, bool) {
// Returned function gets called with the first call to Next().
return func() (genericSeriesSet, bool) {
if len(seriesSets) == 1 {
return seriesSets[0], seriesSets[0].Next()
}
var h genericSeriesSetHeap
for _, set := range seriesSets {
if set == nil {
continue continue
} }
seriesSets = append(seriesSets, qryResult.set) if set.Next() {
heap.Push(&h, set)
continue
} }
if priErr != nil { // When primary fails ignore results from secondaries.
return nil, nil, priErr // Only the primary querier returns error.
if err := set.Err(); err != nil {
return errorOnlySeriesSet{err}, false
}
}
set := &genericMergeSeriesSet{
mergeFunc: mergeFunc,
sets: seriesSets,
heap: h,
}
return set, set.Next()
} }
return newGenericMergeSeriesSet(seriesSets, q, q.mergeFunc), warnings, nil
} }
// LabelValues returns all potential values for a label name. // LabelValues returns all potential values for a label name.
func (q *mergeGenericQuerier) LabelValues(name string) ([]string, Warnings, error) { func (q *mergeGenericQuerier) LabelValues(name string) ([]string, Warnings, error) {
var results [][]string var (
var warnings Warnings results [][]string
warnings Warnings
)
for _, querier := range q.queriers { for _, querier := range q.queriers {
values, wrn, err := querier.LabelValues(name) values, wrn, err := querier.LabelValues(name)
if wrn != nil { if wrn != nil {
// TODO(bwplotka): We could potentially wrap warnings.
warnings = append(warnings, wrn...) warnings = append(warnings, wrn...)
} }
if err != nil { if err != nil {
q.failedQueriers[querier] = struct{}{} return nil, nil, errors.Wrapf(err, "LabelValues() from Querier for label %s", name)
// If the error source isn't the primary querier, return the error as a warning and continue.
if querier != q.primaryQuerier {
warnings = append(warnings, err)
continue
} else {
return nil, nil, err
}
} }
results = append(results, values) results = append(results, values)
} }
return mergeStringSlices(results), warnings, nil return mergeStringSlices(results), warnings, nil
} }
func (q *mergeGenericQuerier) IsFailedSet(set genericSeriesSet) bool {
_, isFailedQuerier := q.failedQueriers[q.setQuerierMap[set]]
return isFailedQuerier
}
func mergeStringSlices(ss [][]string) []string { func mergeStringSlices(ss [][]string) []string {
switch len(ss) { switch len(ss) {
case 0: case 0:
@ -376,42 +383,38 @@ func mergeTwoStringSlices(a, b []string) []string {
// LabelNames returns all the unique label names present in the block in sorted order. // LabelNames returns all the unique label names present in the block in sorted order.
func (q *mergeGenericQuerier) LabelNames() ([]string, Warnings, error) { func (q *mergeGenericQuerier) LabelNames() ([]string, Warnings, error) {
labelNamesMap := make(map[string]struct{}) var (
var warnings Warnings labelNamesMap = make(map[string]struct{})
warnings Warnings
)
for _, querier := range q.queriers { for _, querier := range q.queriers {
names, wrn, err := querier.LabelNames() names, wrn, err := querier.LabelNames()
if wrn != nil { if wrn != nil {
// TODO(bwplotka): We could potentially wrap warnings.
warnings = append(warnings, wrn...) warnings = append(warnings, wrn...)
} }
if err != nil { if err != nil {
q.failedQueriers[querier] = struct{}{}
// If the error source isn't the primaryQuerier querier, return the error as a warning and continue.
if querier != q.primaryQuerier {
warnings = append(warnings, err)
continue
} else {
return nil, nil, errors.Wrap(err, "LabelNames() from Querier") return nil, nil, errors.Wrap(err, "LabelNames() from Querier")
} }
}
for _, name := range names { for _, name := range names {
labelNamesMap[name] = struct{}{} labelNamesMap[name] = struct{}{}
} }
} }
if len(labelNamesMap) == 0 {
return nil, warnings, nil
}
labelNames := make([]string, 0, len(labelNamesMap)) labelNames := make([]string, 0, len(labelNamesMap))
for name := range labelNamesMap { for name := range labelNamesMap {
labelNames = append(labelNames, name) labelNames = append(labelNames, name)
} }
sort.Strings(labelNames) sort.Strings(labelNames)
return labelNames, warnings, nil return labelNames, warnings, nil
} }
// Close releases the resources of the Querier. // Close releases the resources of the Querier.
func (q *mergeGenericQuerier) Close() error { func (q *mergeGenericQuerier) Close() error {
var errs tsdb_errors.MultiError errs := tsdb_errors.MultiError{}
for _, querier := range q.queriers { for _, querier := range q.queriers {
if err := querier.Close(); err != nil { if err := querier.Close(); err != nil {
errs.Add(err) errs.Add(err)
@ -420,58 +423,57 @@ func (q *mergeGenericQuerier) Close() error {
return errs.Err() return errs.Err()
} }
// genericMergeSeriesSet implements genericSeriesSet // VerticalSeriesMergeFunc returns merged series implementation that merges series with same labels together.
// It has to handle time-overlapped series as well.
type VerticalSeriesMergeFunc func(...Series) Series
// NewMergeSeriesSet returns a new SeriesSet that merges many SeriesSets together.
func NewMergeSeriesSet(sets []SeriesSet, mergeFunc VerticalSeriesMergeFunc) SeriesSet {
genericSets := make([]genericSeriesSet, 0, len(sets))
for _, s := range sets {
genericSets = append(genericSets, &genericSeriesSetAdapter{s})
}
return &seriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFunc}).Merge)}
}
// VerticalChunkSeriesMergeFunc returns merged chunk series implementation that merges potentially time-overlapping
// chunk series with the same labels into single ChunkSeries.
//
// NOTE: It's up to implementation how series are vertically merged (if chunks are sorted, re-encoded etc).
type VerticalChunkSeriesMergeFunc func(...ChunkSeries) ChunkSeries
// NewMergeChunkSeriesSet returns a new ChunkSeriesSet that merges many SeriesSet together.
func NewMergeChunkSeriesSet(sets []ChunkSeriesSet, mergeFunc VerticalChunkSeriesMergeFunc) ChunkSeriesSet {
genericSets := make([]genericSeriesSet, 0, len(sets))
for _, s := range sets {
genericSets = append(genericSets, &genericChunkSeriesSetAdapter{s})
}
return &chunkSeriesSetAdapter{newGenericMergeSeriesSet(genericSets, (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFunc}).Merge)}
}
// genericMergeSeriesSet implements genericSeriesSet.
type genericMergeSeriesSet struct { type genericMergeSeriesSet struct {
currentLabels labels.Labels currentLabels labels.Labels
mergeFunc genericSeriesMergeFunc mergeFunc genericSeriesMergeFunc
heap genericSeriesSetHeap heap genericSeriesSetHeap
sets []genericSeriesSet sets []genericSeriesSet
currentSets []genericSeriesSet currentSets []genericSeriesSet
querier *mergeGenericQuerier
}
// VerticalSeriesMergeFunc returns merged series implementation that merges series with same labels together.
// It has to handle time-overlapped series as well.
type VerticalSeriesMergeFunc func(...Series) Series
// VerticalChunkSeriesMergerFunc returns merged chunk series implementation that merges series with same labels together.
// It has to handle time-overlapped chunk series as well.
type VerticalChunkSeriesMergerFunc func(...ChunkSeries) ChunkSeries
// NewMergeSeriesSet returns a new SeriesSet that merges results of chkQuerierSeries SeriesSets.
func NewMergeSeriesSet(sets []SeriesSet, merger VerticalSeriesMergeFunc) SeriesSet {
genericSets := make([]genericSeriesSet, 0, len(sets))
for _, s := range sets {
genericSets = append(genericSets, &genericSeriesSetAdapter{s})
}
return &seriesSetAdapter{newGenericMergeSeriesSet(genericSets, nil, (&seriesMergerAdapter{VerticalSeriesMergeFunc: merger}).Merge)}
}
// NewMergeChunkSeriesSet returns a new ChunkSeriesSet that merges results of chkQuerierSeries ChunkSeriesSets.
func NewMergeChunkSeriesSet(sets []ChunkSeriesSet, merger VerticalChunkSeriesMergerFunc) ChunkSeriesSet {
genericSets := make([]genericSeriesSet, 0, len(sets))
for _, s := range sets {
genericSets = append(genericSets, &genericChunkSeriesSetAdapter{s})
}
return &chunkSeriesSetAdapter{newGenericMergeSeriesSet(genericSets, nil, (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergerFunc: merger}).Merge)}
} }
// newGenericMergeSeriesSet returns a new genericSeriesSet that merges (and deduplicates) // newGenericMergeSeriesSet returns a new genericSeriesSet that merges (and deduplicates)
// series returned by the chkQuerierSeries series sets when iterating. // series returned by the series sets when iterating.
// Each chkQuerierSeries series set must return its series in labels order, otherwise // Each series set must return its series in labels order, otherwise
// merged series set will be incorrect. // merged series set will be incorrect.
// Argument 'querier' is optional and can be nil. Pass Querier if you want to retry query in case of failing series set.
// Overlapped situations are merged using provided mergeFunc. // Overlapped situations are merged using provided mergeFunc.
func newGenericMergeSeriesSet(sets []genericSeriesSet, querier *mergeGenericQuerier, mergeFunc genericSeriesMergeFunc) genericSeriesSet { func newGenericMergeSeriesSet(sets []genericSeriesSet, mergeFunc genericSeriesMergeFunc) genericSeriesSet {
if len(sets) == 1 { if len(sets) == 1 {
return sets[0] return sets[0]
} }
// Sets need to be pre-advanced, so we can introspect the label of the // We are pre-advancing sets, so we can introspect the label of the
// series under the cursor. // series under the cursor.
var h genericSeriesSetHeap var h genericSeriesSetHeap
for _, set := range sets { for _, set := range sets {
@ -484,15 +486,13 @@ func newGenericMergeSeriesSet(sets []genericSeriesSet, querier *mergeGenericQuer
} }
return &genericMergeSeriesSet{ return &genericMergeSeriesSet{
mergeFunc: mergeFunc, mergeFunc: mergeFunc,
heap: h,
sets: sets, sets: sets,
querier: querier, heap: h,
} }
} }
func (c *genericMergeSeriesSet) Next() bool { func (c *genericMergeSeriesSet) Next() bool {
// Run in a loop because the "next" series sets may not be valid anymore. // Run in a loop because the "next" series sets may not be valid anymore.
// If a remote querier fails, we discard all series sets from that querier.
// If, for the current label set, all the next series sets come from // If, for the current label set, all the next series sets come from
// failed remote storage sources, we want to keep trying with the next label set. // failed remote storage sources, we want to keep trying with the next label set.
for { for {
@ -503,6 +503,7 @@ func (c *genericMergeSeriesSet) Next() bool {
heap.Push(&c.heap, set) heap.Push(&c.heap, set)
} }
} }
if len(c.heap) == 0 { if len(c.heap) == 0 {
return false return false
} }
@ -512,9 +513,6 @@ func (c *genericMergeSeriesSet) Next() bool {
c.currentLabels = c.heap[0].At().Labels() c.currentLabels = c.heap[0].At().Labels()
for len(c.heap) > 0 && labels.Equal(c.currentLabels, c.heap[0].At().Labels()) { for len(c.heap) > 0 && labels.Equal(c.currentLabels, c.heap[0].At().Labels()) {
set := heap.Pop(&c.heap).(genericSeriesSet) set := heap.Pop(&c.heap).(genericSeriesSet)
if c.querier != nil && c.querier.IsFailedSet(set) {
continue
}
c.currentSets = append(c.currentSets, set) c.currentSets = append(c.currentSets, set)
} }
@ -547,6 +545,14 @@ func (c *genericMergeSeriesSet) Err() error {
return nil return nil
} }
func (c *genericMergeSeriesSet) Warnings() Warnings {
var ws Warnings
for _, set := range c.sets {
ws = append(ws, set.Warnings()...)
}
return ws
}
type genericSeriesSetHeap []genericSeriesSet type genericSeriesSetHeap []genericSeriesSet
func (h genericSeriesSetHeap) Len() int { return len(h) } func (h genericSeriesSetHeap) Len() int { return len(h) }
@ -569,10 +575,16 @@ func (h *genericSeriesSetHeap) Pop() interface{} {
return x return x
} }
// ChainedSeriesMerge returns single series from many same series by chaining samples together. // ChainedSeriesMerge returns single series from many same, potentially overlapping series by chaining samples together.
// In case of the timestamp overlap, the first overlapped sample is kept and the rest samples with the same timestamps // If one or more samples overlap, one sample from random overlapped ones is kept and all others with the same
// are dropped. We expect the same labels for each given series. // timestamp are dropped.
// TODO(bwplotka): This has the same logic as tsdb.verticalChainedSeries. Remove this in favor of ChainedSeriesMerge in next PRs. //
// This works the best with replicated series, where data from two series are exactly the same. This does not work well
// with "almost" the same data, e.g. from 2 Prometheus HA replicas. This is fine, since from the Prometheus perspective
// this never happens.
//
// NOTE: Use this only when you see potentially overlapping series, as this introduces small overhead to handle overlaps
// between series.
func ChainedSeriesMerge(s ...Series) Series { func ChainedSeriesMerge(s ...Series) Series {
if len(s) == 0 { if len(s) == 0 {
return nil return nil
@ -600,8 +612,9 @@ func (m *chainSeries) Iterator() chunkenc.Iterator {
return newChainSampleIterator(iterators) return newChainSampleIterator(iterators)
} }
// chainSampleIterator is responsible to iterate over samples from different iterators of the same time series. // chainSampleIterator is responsible to iterate over samples from different iterators of the same time series in timestamps
// If one or more samples overlap, the first one is kept and all others with the same timestamp are dropped. // order. If one or more samples overlap, one sample from random overlapped ones is kept and all others with the same
// timestamp are dropped.
type chainSampleIterator struct { type chainSampleIterator struct {
iterators []chunkenc.Iterator iterators []chunkenc.Iterator
h samplesIteratorHeap h samplesIteratorHeap
@ -665,12 +678,13 @@ func (c *chainSampleIterator) Next() bool {
} }
func (c *chainSampleIterator) Err() error { func (c *chainSampleIterator) Err() error {
var errs tsdb_errors.MultiError
for _, iter := range c.iterators { for _, iter := range c.iterators {
if err := iter.Err(); err != nil { if err := iter.Err(); err != nil {
return err errs.Add(err)
} }
} }
return nil return errs.Err()
} }
type samplesIteratorHeap []chunkenc.Iterator type samplesIteratorHeap []chunkenc.Iterator
@ -696,77 +710,76 @@ func (h *samplesIteratorHeap) Pop() interface{} {
return x return x
} }
// VerticalChunkMergeFunc represents a function that merges multiple time overlapping chunks. type compactChunkSeriesMerger struct {
// Passed chunks: mergeFunc VerticalSeriesMergeFunc
// * have to be sorted by MinTime.
// * have to be part of exactly the same timeseries.
// * have to be populated.
type VerticalChunksMergeFunc func(chks ...chunks.Meta) chunks.Iterator
type verticalChunkSeriesMerger struct {
verticalChunksMerger VerticalChunksMergeFunc
labels labels.Labels labels labels.Labels
series []ChunkSeries series []ChunkSeries
} }
// NewVerticalChunkSeriesMerger returns VerticalChunkSeriesMerger that merges the same chunk series into one or more chunks. // NewCompactingChunkSeriesMerger returns VerticalChunkSeriesMergeFunc that merges the same chunk series into single chunk series.
// In case of the chunk overlap, given VerticalChunkMergeFunc will be used. // In case of the chunk overlaps, it compacts those into one or more time-ordered non-overlapping chunks with merged data.
// Samples from overlapped chunks are merged using series vertical merge func.
// It expects the same labels for each given series. // It expects the same labels for each given series.
func NewVerticalChunkSeriesMerger(chunkMerger VerticalChunksMergeFunc) VerticalChunkSeriesMergerFunc { //
// NOTE: Use this only when you see potentially overlapping series, as this introduces small overhead to handle overlaps
// between series.
func NewCompactingChunkSeriesMerger(mergeFunc VerticalSeriesMergeFunc) VerticalChunkSeriesMergeFunc {
return func(s ...ChunkSeries) ChunkSeries { return func(s ...ChunkSeries) ChunkSeries {
if len(s) == 0 { if len(s) == 0 {
return nil return nil
} }
return &verticalChunkSeriesMerger{ return &compactChunkSeriesMerger{
verticalChunksMerger: chunkMerger, mergeFunc: mergeFunc,
labels: s[0].Labels(), labels: s[0].Labels(),
series: s, series: s,
} }
} }
} }
func (s *verticalChunkSeriesMerger) Labels() labels.Labels { func (s *compactChunkSeriesMerger) Labels() labels.Labels {
return s.labels return s.labels
} }
func (s *verticalChunkSeriesMerger) Iterator() chunks.Iterator { func (s *compactChunkSeriesMerger) Iterator() chunks.Iterator {
iterators := make([]chunks.Iterator, 0, len(s.series)) iterators := make([]chunks.Iterator, 0, len(s.series))
for _, series := range s.series { for _, series := range s.series {
iterators = append(iterators, series.Iterator()) iterators = append(iterators, series.Iterator())
} }
return &chainChunkIterator{ return &compactChunkIterator{
overlappedChunksMerger: s.verticalChunksMerger, mergeFunc: s.mergeFunc,
labels: s.labels,
iterators: iterators, iterators: iterators,
h: nil,
} }
} }
// chainChunkIterator is responsible to chain chunks from different iterators of same time series. // compactChunkIterator is responsible to compact chunks from different iterators of the same time series into single chainSeries.
// If they are time overlapping overlappedChunksMerger will be used. // If time-overlapping chunks are found, they are encoded and passed to series merge and encoded again into one bigger chunk.
type chainChunkIterator struct { // TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670
overlappedChunksMerger VerticalChunksMergeFunc type compactChunkIterator struct {
mergeFunc VerticalSeriesMergeFunc
labels labels.Labels
iterators []chunks.Iterator iterators []chunks.Iterator
h chunkIteratorHeap h chunkIteratorHeap
} }
func (c *chainChunkIterator) At() chunks.Meta { func (c *compactChunkIterator) At() chunks.Meta {
if len(c.h) == 0 { if len(c.h) == 0 {
panic("chainChunkIterator.At() called after .Next() returned false.") panic("compactChunkIterator.At() called after .Next() returned false.")
} }
return c.h[0].At() return c.h[0].At()
} }
func (c *chainChunkIterator) Next() bool { func (c *compactChunkIterator) Next() bool {
if c.h == nil { if c.h == nil {
for _, iter := range c.iterators { for _, iter := range c.iterators {
if iter.Next() { if iter.Next() {
heap.Push(&c.h, iter) heap.Push(&c.h, iter)
} }
} }
return len(c.h) > 0 return len(c.h) > 0
} }
@ -774,41 +787,63 @@ func (c *chainChunkIterator) Next() bool {
return false return false
} }
// Detect the shortest chain of time-overlapped chunks. // Detect overlaps to compact.
// Be smart about it and deduplicate on the fly if chunks are identical.
last := c.At() last := c.At()
var overlapped []chunks.Meta var overlapped []Series
for { for {
iter := heap.Pop(&c.h).(chunks.Iterator) iter := heap.Pop(&c.h).(chunks.Iterator)
if iter.Next() { if iter.Next() {
heap.Push(&c.h, iter) heap.Push(&c.h, iter)
} }
if len(c.h) == 0 { if len(c.h) == 0 {
break break
} }
// Get the current oldest chunk by min, then max time.
next := c.At() next := c.At()
if next.MinTime > last.MaxTime { if next.MinTime > last.MaxTime {
// No overlap with last one. // No overlap with last one.
break break
} }
overlapped = append(overlapped, last)
if next.MinTime == last.MinTime &&
next.MaxTime == last.MaxTime &&
bytes.Equal(next.Chunk.Bytes(), last.Chunk.Bytes()) {
// 1:1 duplicates, skip last.
continue
}
overlapped = append(overlapped, &chunkToSeriesDecoder{
labels: c.labels,
Meta: last,
})
last = next last = next
} }
if len(overlapped) > 0 {
heap.Push(&c.h, c.overlappedChunksMerger(append(overlapped, c.At())...)) if len(overlapped) == 0 {
return true
}
return len(c.h) > 0 return len(c.h) > 0
} }
func (c *chainChunkIterator) Err() error { // Add last, not yet included overlap.
overlapped = append(overlapped, &chunkToSeriesDecoder{
labels: c.labels,
Meta: c.At(),
})
var chkSeries ChunkSeries = &seriesToChunkEncoder{Series: c.mergeFunc(overlapped...)}
heap.Push(&c.h, chkSeries)
return true
}
func (c *compactChunkIterator) Err() error {
var errs tsdb_errors.MultiError
for _, iter := range c.iterators { for _, iter := range c.iterators {
if err := iter.Err(); err != nil { if err := iter.Err(); err != nil {
return err errs.Add(err)
} }
} }
return nil return errs.Err()
} }
type chunkIteratorHeap []chunks.Iterator type chunkIteratorHeap []chunks.Iterator

View file

@ -15,15 +15,15 @@ package storage
import ( import (
"context" "context"
"errors" "testing"
"github.com/pkg/errors"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/teststorage"
"github.com/prometheus/prometheus/util/testutil" "github.com/prometheus/prometheus/util/testutil"
"testing"
) )
func TestSelectSorted(t *testing.T) { func TestSelectSorted(t *testing.T) {
@ -72,6 +72,7 @@ func TestSelectSorted(t *testing.T) {
fanoutStorage := storage.NewFanout(nil, priStorage, remoteStorage1, remoteStorage2) fanoutStorage := storage.NewFanout(nil, priStorage, remoteStorage1, remoteStorage2)
t.Run("querier", func(t *testing.T) {
querier, err := fanoutStorage.Querier(context.Background(), 0, 8000) querier, err := fanoutStorage.Querier(context.Background(), 0, 8000)
testutil.Ok(t, err) testutil.Ok(t, err)
defer querier.Close() defer querier.Close()
@ -79,8 +80,7 @@ func TestSelectSorted(t *testing.T) {
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a") matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a")
testutil.Ok(t, err) testutil.Ok(t, err)
seriesSet, _, err := querier.Select(true, nil, matcher) seriesSet := querier.Select(true, nil, matcher)
testutil.Ok(t, err)
result := make(map[int64]float64) result := make(map[int64]float64)
var labelsResult labels.Labels var labelsResult labels.Labels
@ -97,6 +97,35 @@ func TestSelectSorted(t *testing.T) {
testutil.Equals(t, labelsResult, outputLabel) testutil.Equals(t, labelsResult, outputLabel)
testutil.Equals(t, inputTotalSize, len(result)) testutil.Equals(t, inputTotalSize, len(result))
})
t.Run("chunk querier", func(t *testing.T) {
t.Skip("TODO(bwplotka: Unskip when db will implement ChunkQuerier.")
querier, err := fanoutStorage.ChunkQuerier(context.Background(), 0, 8000)
testutil.Ok(t, err)
defer querier.Close()
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a")
testutil.Ok(t, err)
seriesSet := storage.NewSeriesSetFromChunkSeriesSet(querier.Select(true, nil, matcher))
result := make(map[int64]float64)
var labelsResult labels.Labels
for seriesSet.Next() {
series := seriesSet.At()
seriesLabels := series.Labels()
labelsResult = seriesLabels
iterator := series.Iterator()
for iterator.Next() {
timestamp, value := iterator.At()
result[timestamp] = value
}
}
testutil.Ok(t, seriesSet.Err())
testutil.Equals(t, labelsResult, outputLabel)
testutil.Equals(t, inputTotalSize, len(result))
})
} }
func TestFanoutErrors(t *testing.T) { func TestFanoutErrors(t *testing.T) {
@ -106,19 +135,19 @@ func TestFanoutErrors(t *testing.T) {
cases := []struct { cases := []struct {
primary storage.Storage primary storage.Storage
secondary storage.Storage secondary storage.Storage
warnings storage.Warnings warning error
err error err error
}{ }{
{ {
primary: workingStorage, primary: workingStorage,
secondary: errStorage{}, secondary: errStorage{},
warnings: storage.Warnings{errSelect}, warning: errSelect,
err: nil, err: nil,
}, },
{ {
primary: errStorage{}, primary: errStorage{},
secondary: workingStorage, secondary: workingStorage,
warnings: nil, warning: nil,
err: errSelect, err: errSelect,
}, },
} }
@ -126,24 +155,55 @@ func TestFanoutErrors(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
fanoutStorage := storage.NewFanout(nil, tc.primary, tc.secondary) fanoutStorage := storage.NewFanout(nil, tc.primary, tc.secondary)
t.Run("samples", func(t *testing.T) {
querier, err := fanoutStorage.Querier(context.Background(), 0, 8000) querier, err := fanoutStorage.Querier(context.Background(), 0, 8000)
testutil.Ok(t, err) testutil.Ok(t, err)
defer querier.Close() defer querier.Close()
matcher := labels.MustNewMatcher(labels.MatchEqual, "a", "b") matcher := labels.MustNewMatcher(labels.MatchEqual, "a", "b")
ss, warnings, err := querier.Select(true, nil, matcher) ss := querier.Select(true, nil, matcher)
testutil.Equals(t, tc.err, err)
testutil.Equals(t, tc.warnings, warnings)
// Only test series iteration if there are no errors.
if err != nil {
continue
}
// Exhaust.
for ss.Next() { for ss.Next() {
ss.At() ss.At()
} }
testutil.Ok(t, ss.Err())
if tc.err != nil {
testutil.NotOk(t, ss.Err())
testutil.Equals(t, tc.err.Error(), ss.Err().Error())
}
if tc.warning != nil {
testutil.Assert(t, len(ss.Warnings()) > 0, "warnings expected")
testutil.NotOk(t, ss.Warnings()[0])
testutil.Equals(t, tc.warning.Error(), ss.Warnings()[0].Error())
}
})
t.Run("chunks", func(t *testing.T) {
t.Skip("enable once TestStorage and TSDB implements ChunkQuerier")
querier, err := fanoutStorage.ChunkQuerier(context.Background(), 0, 8000)
testutil.Ok(t, err)
defer querier.Close()
matcher := labels.MustNewMatcher(labels.MatchEqual, "a", "b")
ss := querier.Select(true, nil, matcher)
// Exhaust.
for ss.Next() {
ss.At()
}
if tc.err != nil {
testutil.NotOk(t, ss.Err())
testutil.Equals(t, tc.err.Error(), ss.Err().Error())
}
if tc.warning != nil {
testutil.Assert(t, len(ss.Warnings()) > 0, "warnings expected")
testutil.NotOk(t, ss.Warnings()[0])
testutil.Equals(t, tc.warning.Error(), ss.Warnings()[0].Error())
}
})
} }
} }
@ -151,26 +211,23 @@ var errSelect = errors.New("select error")
type errStorage struct{} type errStorage struct{}
type errQuerier struct{}
func (errStorage) Querier(_ context.Context, _, _ int64) (storage.Querier, error) { func (errStorage) Querier(_ context.Context, _, _ int64) (storage.Querier, error) {
return errQuerier{}, nil return errQuerier{}, nil
} }
func (errStorage) Appender() storage.Appender { type errChunkQuerier struct{ errQuerier }
return nil
func (errStorage) ChunkQuerier(_ context.Context, _, _ int64) (storage.ChunkQuerier, error) {
return errChunkQuerier{}, nil
} }
func (errStorage) Appender() storage.Appender { return nil }
func (errStorage) StartTime() (int64, error) { return 0, nil }
func (errStorage) Close() error { return nil }
func (errStorage) StartTime() (int64, error) { func (errQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet {
return 0, nil return storage.ErrSeriesSet(errSelect)
}
func (errStorage) Close() error {
return nil
}
type errQuerier struct{}
func (errQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) {
return nil, nil, errSelect
} }
func (errQuerier) LabelValues(name string) ([]string, storage.Warnings, error) { func (errQuerier) LabelValues(name string) ([]string, storage.Warnings, error) {
@ -181,6 +238,8 @@ func (errQuerier) LabelNames() ([]string, storage.Warnings, error) {
return nil, nil, errors.New("label names error") return nil, nil, errors.New("label names error")
} }
func (errQuerier) Close() error { func (errQuerier) Close() error { return nil }
return nil
func (errChunkQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) storage.ChunkSeriesSet {
return storage.ErrChunkSeriesSet(errSelect)
} }

View file

@ -17,8 +17,11 @@ import (
"fmt" "fmt"
"math" "math"
"sort" "sort"
"sync"
"testing" "testing"
"github.com/pkg/errors"
"github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/tsdbutil"
@ -57,23 +60,36 @@ func TestMergeTwoStringSlices(t *testing.T) {
func TestMergeQuerierWithChainMerger(t *testing.T) { func TestMergeQuerierWithChainMerger(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
name string name string
primaryQuerierSeries []Series
querierSeries [][]Series querierSeries [][]Series
extraQueriers []Querier extraQueriers []Querier
expected SeriesSet expected SeriesSet
}{ }{
{ {
name: "1 querier with no series", name: "one primary querier with no series",
primaryQuerierSeries: []Series{},
expected: NewMockSeriesSet(),
},
{
name: "one secondary querier with no series",
querierSeries: [][]Series{{}}, querierSeries: [][]Series{{}},
expected: NewMockSeriesSet(), expected: NewMockSeriesSet(),
}, },
{ {
name: "many queriers with no series", name: "many secondary queriers with no series",
querierSeries: [][]Series{{}, {}, {}, {}, {}, {}, {}}, querierSeries: [][]Series{{}, {}, {}, {}, {}, {}, {}},
expected: NewMockSeriesSet(), expected: NewMockSeriesSet(),
}, },
{ {
name: "1 querier, two series", name: "mix of queriers with no series",
primaryQuerierSeries: []Series{},
querierSeries: [][]Series{{}, {}, {}, {}, {}, {}, {}},
expected: NewMockSeriesSet(),
},
// Test rest of cases on secondary queriers as the different between primary vs secondary is just error handling.
{
name: "one querier, two series",
querierSeries: [][]Series{{ querierSeries: [][]Series{{
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}}), NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}}),
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}), NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}),
@ -84,7 +100,7 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
), ),
}, },
{ {
name: "2 queriers, 1 different series each", name: "two queriers, one different series each",
querierSeries: [][]Series{{ querierSeries: [][]Series{{
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}}), NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}, sample{3, 3}}),
}, { }, {
@ -96,7 +112,7 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
), ),
}, },
{ {
name: "2 time unsorted queriers, 2 series each", name: "two time unsorted queriers, two series each",
querierSeries: [][]Series{{ querierSeries: [][]Series{{
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}, sample{6, 6}}), NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}, sample{6, 6}}),
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}), NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}),
@ -116,7 +132,7 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
), ),
}, },
{ {
name: "5 queriers, only 2 queriers have 2 time unsorted series each", name: "five queriers, only two queriers have two time unsorted series each",
querierSeries: [][]Series{{}, {}, { querierSeries: [][]Series{{}, {}, {
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}, sample{6, 6}}), NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}, sample{6, 6}}),
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}), NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}),
@ -136,7 +152,7 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
), ),
}, },
{ {
name: "2 queriers, only 2 queriers have 2 time unsorted series each, with 3 noop and one nil querier together", name: "two queriers, only two queriers have two time unsorted series each, with 3 noop and one nil querier together",
querierSeries: [][]Series{{}, {}, { querierSeries: [][]Series{{}, {}, {
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}, sample{6, 6}}), NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}, sample{6, 6}}),
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}), NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}),
@ -157,7 +173,7 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
), ),
}, },
{ {
name: "2 queriers, with 2 series, one is overlapping", name: "two queriers, with two series, one is overlapping",
querierSeries: [][]Series{{}, {}, { querierSeries: [][]Series{{}, {}, {
NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 21}, sample{3, 31}, sample{5, 5}, sample{6, 6}}), NewListSeries(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{2, 21}, sample{3, 31}, sample{5, 5}, sample{6, 6}}),
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}), NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}),
@ -177,7 +193,7 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
), ),
}, },
{ {
name: "2 queries, one with NaN samples series", name: "two queries, one with NaN samples series",
querierSeries: [][]Series{{ querierSeries: [][]Series{{
NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, math.NaN()}}), NewListSeries(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, math.NaN()}}),
}, { }, {
@ -189,13 +205,17 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
}, },
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
var p Querier
if tc.primaryQuerierSeries != nil {
p = &mockQuerier{toReturn: tc.primaryQuerierSeries}
}
var qs []Querier var qs []Querier
for _, in := range tc.querierSeries { for _, in := range tc.querierSeries {
qs = append(qs, &mockQuerier{toReturn: in}) qs = append(qs, &mockQuerier{toReturn: in})
} }
qs = append(qs, tc.extraQueriers...) qs = append(qs, tc.extraQueriers...)
mergedQuerier, _, _ := NewMergeQuerier(qs[0], qs, ChainedSeriesMerge).Select(false, nil) mergedQuerier := NewMergeQuerier(p, qs, ChainedSeriesMerge).Select(false, nil)
// Get all merged series upfront to make sure there are no incorrectly retained shared // Get all merged series upfront to make sure there are no incorrectly retained shared
// buffers causing bugs. // buffers causing bugs.
@ -223,21 +243,34 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) { func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
name string name string
primaryChkQuerierSeries []ChunkSeries
chkQuerierSeries [][]ChunkSeries chkQuerierSeries [][]ChunkSeries
extraQueriers []ChunkQuerier extraQueriers []ChunkQuerier
expected ChunkSeriesSet expected ChunkSeriesSet
}{ }{
{ {
name: "one querier with no series", name: "one primary querier with no series",
primaryChkQuerierSeries: []ChunkSeries{},
expected: NewMockChunkSeriesSet(),
},
{
name: "one secondary querier with no series",
chkQuerierSeries: [][]ChunkSeries{{}}, chkQuerierSeries: [][]ChunkSeries{{}},
expected: NewMockChunkSeriesSet(), expected: NewMockChunkSeriesSet(),
}, },
{ {
name: "many queriers with no series", name: "many secondary queriers with no series",
chkQuerierSeries: [][]ChunkSeries{{}, {}, {}, {}, {}, {}, {}}, chkQuerierSeries: [][]ChunkSeries{{}, {}, {}, {}, {}, {}, {}},
expected: NewMockChunkSeriesSet(), expected: NewMockChunkSeriesSet(),
}, },
{
name: "mix of queriers with no series",
primaryChkQuerierSeries: []ChunkSeries{},
chkQuerierSeries: [][]ChunkSeries{{}, {}, {}, {}, {}, {}, {}},
expected: NewMockChunkSeriesSet(),
},
// Test rest of cases on secondary queriers as the different between primary vs secondary is just error handling.
{ {
name: "one querier, two series", name: "one querier, two series",
chkQuerierSeries: [][]ChunkSeries{{ chkQuerierSeries: [][]ChunkSeries{{
@ -250,7 +283,7 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
), ),
}, },
{ {
name: "two queriers, one different series each", name: "two secondaries, one different series each",
chkQuerierSeries: [][]ChunkSeries{{ chkQuerierSeries: [][]ChunkSeries{{
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, []tsdbutil.Sample{sample{3, 3}}),
}, { }, {
@ -262,7 +295,7 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
), ),
}, },
{ {
name: "two queriers, two not in time order series each", name: "two secondaries, two not in time order series each",
chkQuerierSeries: [][]ChunkSeries{{ chkQuerierSeries: [][]ChunkSeries{{
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}}, []tsdbutil.Sample{sample{6, 6}}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}}, []tsdbutil.Sample{sample{6, 6}}),
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}}, []tsdbutil.Sample{sample{2, 2}}), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}}, []tsdbutil.Sample{sample{2, 2}}),
@ -286,7 +319,7 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
), ),
}, },
{ {
name: "five queriers, only two have two not in time order series each", name: "five secondaries, only two have two not in time order series each",
chkQuerierSeries: [][]ChunkSeries{{}, {}, { chkQuerierSeries: [][]ChunkSeries{{}, {}, {
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}}, []tsdbutil.Sample{sample{6, 6}}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}}, []tsdbutil.Sample{sample{6, 6}}),
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}}, []tsdbutil.Sample{sample{2, 2}}), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}}, []tsdbutil.Sample{sample{2, 2}}),
@ -310,7 +343,7 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
), ),
}, },
{ {
name: "two queriers, with two not in time order series each, with 3 noop queries and one nil together", name: "two secondaries, with two not in time order series each, with 3 noop queries and one nil together",
chkQuerierSeries: [][]ChunkSeries{{ chkQuerierSeries: [][]ChunkSeries{{
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}}, []tsdbutil.Sample{sample{6, 6}}), NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), []tsdbutil.Sample{sample{5, 5}}, []tsdbutil.Sample{sample{6, 6}}),
NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}}, []tsdbutil.Sample{sample{2, 2}}), NewListChunkSeriesFromSamples(labels.FromStrings("foo", "bar"), []tsdbutil.Sample{sample{0, 0}, sample{1, 1}}, []tsdbutil.Sample{sample{2, 2}}),
@ -347,13 +380,18 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
}, },
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
var p ChunkQuerier
if tc.primaryChkQuerierSeries != nil {
p = &mockChunkQurier{toReturn: tc.primaryChkQuerierSeries}
}
var qs []ChunkQuerier var qs []ChunkQuerier
for _, in := range tc.chkQuerierSeries { for _, in := range tc.chkQuerierSeries {
qs = append(qs, &mockChunkQurier{toReturn: in}) qs = append(qs, &mockChunkQurier{toReturn: in})
} }
qs = append(qs, tc.extraQueriers...) qs = append(qs, tc.extraQueriers...)
merged, _, _ := NewMergeChunkQuerier(qs[0], qs, NewVerticalChunkSeriesMerger(nil)).Select(false, nil) merged := NewMergeChunkQuerier(p, qs, NewCompactingChunkSeriesMerger(nil)).Select(false, nil)
for merged.Next() { for merged.Next() {
testutil.Assert(t, tc.expected.Next(), "Expected Next() to be true") testutil.Assert(t, tc.expected.Next(), "Expected Next() to be true")
actualSeries := merged.At() actualSeries := merged.At()
@ -373,7 +411,7 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
} }
type mockQuerier struct { type mockQuerier struct {
baseQuerier LabelQuerier
toReturn []Series toReturn []Series
} }
@ -384,18 +422,18 @@ func (a seriesByLabel) Len() int { return len(a) }
func (a seriesByLabel) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a seriesByLabel) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a seriesByLabel) Less(i, j int) bool { return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 } func (a seriesByLabel) Less(i, j int) bool { return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 }
func (m *mockQuerier) Select(sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) (SeriesSet, Warnings, error) { func (m *mockQuerier) Select(sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) SeriesSet {
cpy := make([]Series, len(m.toReturn)) cpy := make([]Series, len(m.toReturn))
copy(cpy, m.toReturn) copy(cpy, m.toReturn)
if sortSeries { if sortSeries {
sort.Sort(seriesByLabel(cpy)) sort.Sort(seriesByLabel(cpy))
} }
return NewMockSeriesSet(cpy...), nil, nil return NewMockSeriesSet(cpy...)
} }
type mockChunkQurier struct { type mockChunkQurier struct {
baseQuerier LabelQuerier
toReturn []ChunkSeries toReturn []ChunkSeries
} }
@ -408,14 +446,14 @@ func (a chunkSeriesByLabel) Less(i, j int) bool {
return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 return labels.Compare(a[i].Labels(), a[j].Labels()) < 0
} }
func (m *mockChunkQurier) Select(sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) (ChunkSeriesSet, Warnings, error) { func (m *mockChunkQurier) Select(sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) ChunkSeriesSet {
cpy := make([]ChunkSeries, len(m.toReturn)) cpy := make([]ChunkSeries, len(m.toReturn))
copy(cpy, m.toReturn) copy(cpy, m.toReturn)
if sortSeries { if sortSeries {
sort.Sort(chunkSeriesByLabel(cpy)) sort.Sort(chunkSeriesByLabel(cpy))
} }
return NewMockChunkSeriesSet(cpy...), nil, nil return NewMockChunkSeriesSet(cpy...)
} }
type mockSeriesSet struct { type mockSeriesSet struct {
@ -439,6 +477,8 @@ func (m *mockSeriesSet) At() Series { return m.series[m.idx] }
func (m *mockSeriesSet) Err() error { return nil } func (m *mockSeriesSet) Err() error { return nil }
func (m *mockSeriesSet) Warnings() Warnings { return nil }
type mockChunkSeriesSet struct { type mockChunkSeriesSet struct {
idx int idx int
series []ChunkSeries series []ChunkSeries
@ -460,6 +500,8 @@ func (m *mockChunkSeriesSet) At() ChunkSeries { return m.series[m.idx] }
func (m *mockChunkSeriesSet) Err() error { return nil } func (m *mockChunkSeriesSet) Err() error { return nil }
func (m *mockChunkSeriesSet) Warnings() Warnings { return nil }
func TestChainSampleIterator(t *testing.T) { func TestChainSampleIterator(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
input []chunkenc.Iterator input []chunkenc.Iterator
@ -467,22 +509,22 @@ func TestChainSampleIterator(t *testing.T) {
}{ }{
{ {
input: []chunkenc.Iterator{ input: []chunkenc.Iterator{
NewListSeriesIterator([]tsdbutil.Sample{sample{0, 0}, sample{1, 1}}), NewListSeriesIterator(samples{sample{0, 0}, sample{1, 1}}),
}, },
expected: []tsdbutil.Sample{sample{0, 0}, sample{1, 1}}, expected: []tsdbutil.Sample{sample{0, 0}, sample{1, 1}},
}, },
{ {
input: []chunkenc.Iterator{ input: []chunkenc.Iterator{
NewListSeriesIterator([]tsdbutil.Sample{sample{0, 0}, sample{1, 1}}), NewListSeriesIterator(samples{sample{0, 0}, sample{1, 1}}),
NewListSeriesIterator([]tsdbutil.Sample{sample{2, 2}, sample{3, 3}}), NewListSeriesIterator(samples{sample{2, 2}, sample{3, 3}}),
}, },
expected: []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}}, expected: []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}},
}, },
{ {
input: []chunkenc.Iterator{ input: []chunkenc.Iterator{
NewListSeriesIterator([]tsdbutil.Sample{sample{0, 0}, sample{3, 3}}), NewListSeriesIterator(samples{sample{0, 0}, sample{3, 3}}),
NewListSeriesIterator([]tsdbutil.Sample{sample{1, 1}, sample{4, 4}}), NewListSeriesIterator(samples{sample{1, 1}, sample{4, 4}}),
NewListSeriesIterator([]tsdbutil.Sample{sample{2, 2}, sample{5, 5}}), NewListSeriesIterator(samples{sample{2, 2}, sample{5, 5}}),
}, },
expected: []tsdbutil.Sample{ expected: []tsdbutil.Sample{
sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{4, 4}, sample{5, 5}}, sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}, sample{4, 4}, sample{5, 5}},
@ -490,12 +532,12 @@ func TestChainSampleIterator(t *testing.T) {
// Overlap. // Overlap.
{ {
input: []chunkenc.Iterator{ input: []chunkenc.Iterator{
NewListSeriesIterator([]tsdbutil.Sample{sample{0, 0}, sample{1, 1}}), NewListSeriesIterator(samples{sample{0, 0}, sample{1, 1}}),
NewListSeriesIterator([]tsdbutil.Sample{sample{0, 0}, sample{2, 2}}), NewListSeriesIterator(samples{sample{0, 0}, sample{2, 2}}),
NewListSeriesIterator([]tsdbutil.Sample{sample{2, 2}, sample{3, 3}}), NewListSeriesIterator(samples{sample{2, 2}, sample{3, 3}}),
NewListSeriesIterator([]tsdbutil.Sample{}), NewListSeriesIterator(samples{}),
NewListSeriesIterator([]tsdbutil.Sample{}), NewListSeriesIterator(samples{}),
NewListSeriesIterator([]tsdbutil.Sample{}), NewListSeriesIterator(samples{}),
}, },
expected: []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}}, expected: []tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}, sample{3, 3}},
}, },
@ -515,24 +557,24 @@ func TestChainSampleIteratorSeek(t *testing.T) {
}{ }{
{ {
input: []chunkenc.Iterator{ input: []chunkenc.Iterator{
NewListSeriesIterator([]tsdbutil.Sample{sample{0, 0}, sample{1, 1}, sample{2, 2}}), NewListSeriesIterator(samples{sample{0, 0}, sample{1, 1}, sample{2, 2}}),
}, },
seek: 1, seek: 1,
expected: []tsdbutil.Sample{sample{1, 1}, sample{2, 2}}, expected: []tsdbutil.Sample{sample{1, 1}, sample{2, 2}},
}, },
{ {
input: []chunkenc.Iterator{ input: []chunkenc.Iterator{
NewListSeriesIterator([]tsdbutil.Sample{sample{0, 0}, sample{1, 1}}), NewListSeriesIterator(samples{sample{0, 0}, sample{1, 1}}),
NewListSeriesIterator([]tsdbutil.Sample{sample{2, 2}, sample{3, 3}}), NewListSeriesIterator(samples{sample{2, 2}, sample{3, 3}}),
}, },
seek: 2, seek: 2,
expected: []tsdbutil.Sample{sample{2, 2}, sample{3, 3}}, expected: []tsdbutil.Sample{sample{2, 2}, sample{3, 3}},
}, },
{ {
input: []chunkenc.Iterator{ input: []chunkenc.Iterator{
NewListSeriesIterator([]tsdbutil.Sample{sample{0, 0}, sample{3, 3}}), NewListSeriesIterator(samples{sample{0, 0}, sample{3, 3}}),
NewListSeriesIterator([]tsdbutil.Sample{sample{1, 1}, sample{4, 4}}), NewListSeriesIterator(samples{sample{1, 1}, sample{4, 4}}),
NewListSeriesIterator([]tsdbutil.Sample{sample{2, 2}, sample{5, 5}}), NewListSeriesIterator(samples{sample{2, 2}, sample{5, 5}}),
}, },
seek: 2, seek: 2,
expected: []tsdbutil.Sample{sample{2, 2}, sample{3, 3}, sample{4, 4}, sample{5, 5}}, expected: []tsdbutil.Sample{sample{2, 2}, sample{3, 3}, sample{4, 4}, sample{5, 5}},
@ -571,7 +613,7 @@ func makeMergeSeriesSet(numSeriesSets, numSeries, numSamples int) SeriesSet {
for i := 0; i < numSeriesSets; i++ { for i := 0; i < numSeriesSets; i++ {
seriesSets = append(seriesSets, &genericSeriesSetAdapter{makeSeriesSet(numSeries, numSamples)}) seriesSets = append(seriesSets, &genericSeriesSetAdapter{makeSeriesSet(numSeries, numSamples)})
} }
return &seriesSetAdapter{newGenericMergeSeriesSet(seriesSets, nil, (&seriesMergerAdapter{VerticalSeriesMergeFunc: ChainedSeriesMerge}).Merge)} return &seriesSetAdapter{newGenericMergeSeriesSet(seriesSets, (&seriesMergerAdapter{VerticalSeriesMergeFunc: ChainedSeriesMerge}).Merge)}
} }
func benchmarkDrain(seriesSet SeriesSet, b *testing.B) { func benchmarkDrain(seriesSet SeriesSet, b *testing.B) {
@ -603,3 +645,260 @@ func BenchmarkMergeSeriesSet(b *testing.B) {
}) })
} }
} }
type mockGenericQuerier struct {
mtx sync.Mutex
closed bool
labelNamesCalls int
labelNamesRequested []string
sortedSeriesRequested []bool
resp []string
warnings Warnings
err error
}
func (m *mockGenericQuerier) Select(b bool, _ *SelectHints, _ ...*labels.Matcher) genericSeriesSet {
m.mtx.Lock()
m.sortedSeriesRequested = append(m.sortedSeriesRequested, b)
m.mtx.Unlock()
return &mockGenericSeriesSet{resp: m.resp, warnings: m.warnings, err: m.err}
}
func (m *mockGenericQuerier) LabelValues(name string) ([]string, Warnings, error) {
m.mtx.Lock()
m.labelNamesRequested = append(m.labelNamesRequested, name)
m.mtx.Unlock()
return m.resp, m.warnings, m.err
}
func (m *mockGenericQuerier) LabelNames() ([]string, Warnings, error) {
m.mtx.Lock()
m.labelNamesCalls++
m.mtx.Unlock()
return m.resp, m.warnings, m.err
}
func (m *mockGenericQuerier) Close() error {
m.closed = true
return nil
}
type mockGenericSeriesSet struct {
resp []string
warnings Warnings
err error
curr int
}
func (m *mockGenericSeriesSet) Next() bool {
if m.err != nil {
return false
}
if m.curr >= len(m.resp) {
return false
}
m.curr++
return true
}
func (m *mockGenericSeriesSet) Err() error { return m.err }
func (m *mockGenericSeriesSet) Warnings() Warnings { return m.warnings }
func (m *mockGenericSeriesSet) At() Labels {
return mockLabels(m.resp[m.curr-1])
}
type mockLabels string
func (l mockLabels) Labels() labels.Labels {
return labels.FromStrings("test", string(l))
}
func unwrapMockGenericQuerier(t *testing.T, qr genericQuerier) *mockGenericQuerier {
m, ok := qr.(*mockGenericQuerier)
if !ok {
s, ok := qr.(*secondaryQuerier)
testutil.Assert(t, ok, "expected secondaryQuerier got something else")
m, ok = s.genericQuerier.(*mockGenericQuerier)
testutil.Assert(t, ok, "expected mockGenericQuerier got something else")
}
return m
}
func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
var (
errStorage = errors.New("storage error")
warnStorage = errors.New("storage warning")
)
for _, tcase := range []struct {
name string
queriers []genericQuerier
expectedSelectsSeries []labels.Labels
expectedLabels []string
expectedWarnings [3]Warnings
expectedErrs [3]error
}{
{},
{
name: "one successful primary querier",
queriers: []genericQuerier{&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}},
expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "a"),
labels.FromStrings("test", "b"),
},
expectedLabels: []string{"a", "b"},
},
{
name: "multiple successful primary queriers",
queriers: []genericQuerier{
&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil},
&mockGenericQuerier{resp: []string{"b", "c"}, warnings: nil, err: nil},
},
expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "a"),
labels.FromStrings("test", "b"),
labels.FromStrings("test", "c"),
},
expectedLabels: []string{"a", "b", "c"},
},
{
name: "one failed primary querier",
queriers: []genericQuerier{&mockGenericQuerier{warnings: nil, err: errStorage}},
expectedErrs: [3]error{errStorage, errStorage, errStorage},
},
{
name: "one successful primary querier with successful secondaries",
queriers: []genericQuerier{
&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}},
},
expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "a"),
labels.FromStrings("test", "b"),
labels.FromStrings("test", "c"),
},
expectedLabels: []string{"a", "b", "c"},
},
{
name: "one successful primary querier with empty response and successful secondaries",
queriers: []genericQuerier{
&mockGenericQuerier{resp: []string{}, warnings: nil, err: nil},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}},
},
expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "b"),
labels.FromStrings("test", "c"),
},
expectedLabels: []string{"b", "c"},
},
{
name: "one failed primary querier with successful secondaries",
queriers: []genericQuerier{
&mockGenericQuerier{warnings: nil, err: errStorage},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}},
},
expectedErrs: [3]error{errStorage, errStorage, errStorage},
},
{
name: "one successful primary querier with failed secondaries",
queriers: []genericQuerier{
&mockGenericQuerier{resp: []string{"a"}, warnings: nil, err: nil},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: errStorage}},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: errStorage}},
},
expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "a"),
},
expectedLabels: []string{"a"},
expectedWarnings: [3]Warnings{
[]error{errStorage, errStorage},
[]error{errStorage, errStorage},
[]error{errStorage, errStorage},
},
},
{
name: "successful queriers with warnings",
queriers: []genericQuerier{
&mockGenericQuerier{resp: []string{"a"}, warnings: []error{warnStorage}, err: nil},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: []error{warnStorage}, err: nil}},
},
expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "a"),
labels.FromStrings("test", "b"),
},
expectedLabels: []string{"a", "b"},
expectedWarnings: [3]Warnings{
[]error{warnStorage, warnStorage},
[]error{warnStorage, warnStorage},
[]error{warnStorage, warnStorage},
},
},
} {
t.Run(tcase.name, func(t *testing.T) {
q := &mergeGenericQuerier{
queriers: tcase.queriers,
mergeFn: func(l ...Labels) Labels { return l[0] },
}
t.Run("Select", func(t *testing.T) {
res := q.Select(false, nil)
var lbls []labels.Labels
for res.Next() {
lbls = append(lbls, res.At().Labels())
}
testutil.Equals(t, tcase.expectedWarnings[0], res.Warnings())
testutil.Equals(t, tcase.expectedErrs[0], res.Err())
testutil.Assert(t, errors.Is(res.Err(), tcase.expectedErrs[0]), "expected error doesn't match")
testutil.Equals(t, tcase.expectedSelectsSeries, lbls)
for _, qr := range q.queriers {
m := unwrapMockGenericQuerier(t, qr)
exp := []bool{true}
if len(q.queriers) == 1 {
exp[0] = false
}
testutil.Equals(t, exp, m.sortedSeriesRequested)
}
})
t.Run("LabelNames", func(t *testing.T) {
res, w, err := q.LabelNames()
testutil.Equals(t, tcase.expectedWarnings[1], w)
testutil.Assert(t, errors.Is(err, tcase.expectedErrs[1]), "expected error doesn't match")
testutil.Equals(t, tcase.expectedLabels, res)
if err != nil {
return
}
for _, qr := range q.queriers {
m := unwrapMockGenericQuerier(t, qr)
testutil.Equals(t, 1, m.labelNamesCalls)
}
})
t.Run("LabelValues", func(t *testing.T) {
res, w, err := q.LabelValues("test")
testutil.Equals(t, tcase.expectedWarnings[2], w)
testutil.Assert(t, errors.Is(err, tcase.expectedErrs[2]), "expected error doesn't match")
testutil.Equals(t, tcase.expectedLabels, res)
if err != nil {
return
}
for _, qr := range q.queriers {
m := unwrapMockGenericQuerier(t, qr)
testutil.Equals(t, []string{"test"}, m.labelNamesRequested)
}
})
})
}
}

View file

@ -19,14 +19,15 @@ package storage
import "github.com/prometheus/prometheus/pkg/labels" import "github.com/prometheus/prometheus/pkg/labels"
type genericQuerier interface { type genericQuerier interface {
baseQuerier LabelQuerier
Select(bool, *SelectHints, ...*labels.Matcher) (genericSeriesSet, Warnings, error) Select(bool, *SelectHints, ...*labels.Matcher) genericSeriesSet
} }
type genericSeriesSet interface { type genericSeriesSet interface {
Next() bool Next() bool
At() Labels At() Labels
Err() error Err() error
Warnings() Warnings
} }
type genericSeriesMergeFunc func(...Labels) Labels type genericSeriesMergeFunc func(...Labels) Labels
@ -48,28 +49,26 @@ func (a *genericChunkSeriesSetAdapter) At() Labels {
} }
type genericQuerierAdapter struct { type genericQuerierAdapter struct {
baseQuerier LabelQuerier
// One-of. If both are set, Querier will be used. // One-of. If both are set, Querier will be used.
q Querier q Querier
cq ChunkQuerier cq ChunkQuerier
} }
func (q *genericQuerierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) (genericSeriesSet, Warnings, error) { func (q *genericQuerierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
if q.q != nil { if q.q != nil {
s, w, err := q.q.Select(sortSeries, hints, matchers...) return &genericSeriesSetAdapter{q.q.Select(sortSeries, hints, matchers...)}
return &genericSeriesSetAdapter{s}, w, err
} }
s, w, err := q.cq.Select(sortSeries, hints, matchers...) return &genericChunkSeriesSetAdapter{q.cq.Select(sortSeries, hints, matchers...)}
return &genericChunkSeriesSetAdapter{s}, w, err
} }
func newGenericQuerierFrom(q Querier) genericQuerier { func newGenericQuerierFrom(q Querier) genericQuerier {
return &genericQuerierAdapter{baseQuerier: q, q: q} return &genericQuerierAdapter{LabelQuerier: q, q: q}
} }
func newGenericQuerierFromChunk(cq ChunkQuerier) genericQuerier { func newGenericQuerierFromChunk(cq ChunkQuerier) genericQuerier {
return &genericQuerierAdapter{baseQuerier: cq, cq: cq} return &genericQuerierAdapter{LabelQuerier: cq, cq: cq}
} }
type querierAdapter struct { type querierAdapter struct {
@ -84,9 +83,8 @@ func (a *seriesSetAdapter) At() Series {
return a.genericSeriesSet.At().(Series) return a.genericSeriesSet.At().(Series)
} }
func (q *querierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) (SeriesSet, Warnings, error) { func (q *querierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet {
s, w, err := q.genericQuerier.Select(sortSeries, hints, matchers...) return &seriesSetAdapter{q.genericQuerier.Select(sortSeries, hints, matchers...)}
return &seriesSetAdapter{s}, w, err
} }
type chunkQuerierAdapter struct { type chunkQuerierAdapter struct {
@ -101,9 +99,8 @@ func (a *chunkSeriesSetAdapter) At() ChunkSeries {
return a.genericSeriesSet.At().(ChunkSeries) return a.genericSeriesSet.At().(ChunkSeries)
} }
func (q *chunkQuerierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) (ChunkSeriesSet, Warnings, error) { func (q *chunkQuerierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet {
s, w, err := q.genericQuerier.Select(sortSeries, hints, matchers...) return &chunkSeriesSetAdapter{q.genericQuerier.Select(sortSeries, hints, matchers...)}
return &chunkSeriesSetAdapter{s}, w, err
} }
type seriesMergerAdapter struct { type seriesMergerAdapter struct {
@ -119,7 +116,7 @@ func (a *seriesMergerAdapter) Merge(s ...Labels) Labels {
} }
type chunkSeriesMergerAdapter struct { type chunkSeriesMergerAdapter struct {
VerticalChunkSeriesMergerFunc VerticalChunkSeriesMergeFunc
} }
func (a *chunkSeriesMergerAdapter) Merge(s ...Labels) Labels { func (a *chunkSeriesMergerAdapter) Merge(s ...Labels) Labels {
@ -127,5 +124,15 @@ func (a *chunkSeriesMergerAdapter) Merge(s ...Labels) Labels {
for _, ser := range s { for _, ser := range s {
buf = append(buf, ser.(ChunkSeries)) buf = append(buf, ser.(ChunkSeries))
} }
return a.VerticalChunkSeriesMergerFunc(buf...) return a.VerticalChunkSeriesMergeFunc(buf...)
} }
type noopGenericSeriesSet struct{}
func (noopGenericSeriesSet) Next() bool { return false }
func (noopGenericSeriesSet) At() Labels { return nil }
func (noopGenericSeriesSet) Err() error { return nil }
func (noopGenericSeriesSet) Warnings() Warnings { return nil }

View file

@ -37,11 +37,16 @@ type Appendable interface {
Appender() Appender Appender() Appender
} }
// SampleAndChunkQueryable allows retrieving samples as well as encoded samples in form of chunks.
type SampleAndChunkQueryable interface {
Queryable
ChunkQueryable
}
// Storage ingests and manages samples, along with various indexes. All methods // Storage ingests and manages samples, along with various indexes. All methods
// are goroutine-safe. Storage implements storage.SampleAppender. // are goroutine-safe. Storage implements storage.SampleAppender.
// TODO(bwplotka): Add ChunkQueryable to Storage in next PR.
type Storage interface { type Storage interface {
Queryable SampleAndChunkQueryable
Appendable Appendable
// StartTime returns the oldest timestamp stored in the storage. // StartTime returns the oldest timestamp stored in the storage.
@ -60,32 +65,33 @@ type Queryable interface {
// Querier provides querying access over time series data of a fixed time range. // Querier provides querying access over time series data of a fixed time range.
type Querier interface { type Querier interface {
baseQuerier LabelQuerier
// Select returns a set of series that matches the given label matchers. // Select returns a set of series that matches the given label matchers.
// Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance. // Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance.
// It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all. // It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all.
Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) (SeriesSet, Warnings, error) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
} }
// A ChunkQueryable handles queries against a storage. // A ChunkQueryable handles queries against a storage.
// Use it when you need to have access to samples in encoded format. // Use it when you need to have access to samples in encoded format.
type ChunkQueryable interface { type ChunkQueryable interface {
// ChunkQuerier returns a new ChunkQuerier on the storage. // ChunkQuerier returns a new ChunkQuerier on the storage.
ChunkQuerier(ctx context.Context, mint, maxt int64) (ChunkQuerier, Warnings, error) ChunkQuerier(ctx context.Context, mint, maxt int64) (ChunkQuerier, error)
} }
// ChunkQuerier provides querying access over time series data of a fixed time range. // ChunkQuerier provides querying access over time series data of a fixed time range.
type ChunkQuerier interface { type ChunkQuerier interface {
baseQuerier LabelQuerier
// Select returns a set of series that matches the given label matchers. // Select returns a set of series that matches the given label matchers.
// Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance. // Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance.
// It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all. // It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all.
Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) (ChunkSeriesSet, Warnings, error) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet
} }
type baseQuerier interface { // LabelQuerier provides querying access over labels.
type LabelQuerier interface {
// LabelValues returns all potential values for a label name. // LabelValues returns all potential values for a label name.
// It is not safe to use the strings beyond the lifefime of the querier. // It is not safe to use the strings beyond the lifefime of the querier.
LabelValues(name string) ([]string, Warnings, error) LabelValues(name string) ([]string, Warnings, error)
@ -111,6 +117,7 @@ type SelectHints struct {
Range int64 // Range vector selector range in milliseconds. Range int64 // Range vector selector range in milliseconds.
} }
// TODO(bwplotka): Move to promql/engine_test.go?
// QueryableFunc is an adapter to allow the use of ordinary functions as // QueryableFunc is an adapter to allow the use of ordinary functions as
// Queryables. It follows the idea of http.HandlerFunc. // Queryables. It follows the idea of http.HandlerFunc.
type QueryableFunc func(ctx context.Context, mint, maxt int64) (Querier, error) type QueryableFunc func(ctx context.Context, mint, maxt int64) (Querier, error)
@ -153,7 +160,12 @@ type Appender interface {
type SeriesSet interface { type SeriesSet interface {
Next() bool Next() bool
At() Series At() Series
// The error that iteration as failed with.
// When an error occurs, set cannot continue to iterate.
Err() error Err() error
// A collection of warnings for the whole set.
// Warnings could be return even iteration has not failed with error.
Warnings() Warnings
} }
var emptySeriesSet = errSeriesSet{} var emptySeriesSet = errSeriesSet{}
@ -170,6 +182,33 @@ type errSeriesSet struct {
func (s errSeriesSet) Next() bool { return false } func (s errSeriesSet) Next() bool { return false }
func (s errSeriesSet) At() Series { return nil } func (s errSeriesSet) At() Series { return nil }
func (s errSeriesSet) Err() error { return s.err } func (s errSeriesSet) Err() error { return s.err }
func (s errSeriesSet) Warnings() Warnings { return nil }
// ErrSeriesSet returns a series set that wraps an error.
func ErrSeriesSet(err error) SeriesSet {
return errSeriesSet{err: err}
}
var emptyChunkSeriesSet = errChunkSeriesSet{}
// EmptyChunkSeriesSet returns a chunk series set that's always empty.
func EmptyChunkSeriesSet() ChunkSeriesSet {
return emptyChunkSeriesSet
}
type errChunkSeriesSet struct {
err error
}
func (s errChunkSeriesSet) Next() bool { return false }
func (s errChunkSeriesSet) At() ChunkSeries { return nil }
func (s errChunkSeriesSet) Err() error { return s.err }
func (s errChunkSeriesSet) Warnings() Warnings { return nil }
// ErrChunkSeriesSet returns a chunk series set that wraps an error.
func ErrChunkSeriesSet(err error) ChunkSeriesSet {
return errChunkSeriesSet{err: err}
}
// Series exposes a single time series and allows iterating over samples. // Series exposes a single time series and allows iterating over samples.
type Series interface { type Series interface {
@ -181,7 +220,12 @@ type Series interface {
type ChunkSeriesSet interface { type ChunkSeriesSet interface {
Next() bool Next() bool
At() ChunkSeries At() ChunkSeries
// The error that iteration has failed with.
// When an error occurs, set cannot continue to iterate.
Err() error Err() error
// A collection of warnings for the whole set.
// Warnings could be return even iteration has not failed with error.
Warnings() Warnings
} }
// ChunkSeries exposes a single time series and allows iterating over chunks. // ChunkSeries exposes a single time series and allows iterating over chunks.

67
storage/lazy.go Normal file
View file

@ -0,0 +1,67 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
type lazySeriesSet struct {
create func() (s genericSeriesSet, ok bool)
set genericSeriesSet
}
func (c *lazySeriesSet) Next() bool {
if c.set != nil {
return c.set.Next()
}
var ok bool
c.set, ok = c.create()
return ok
}
func (c *lazySeriesSet) Err() error {
if c.set != nil {
return c.set.Err()
}
return nil
}
func (c *lazySeriesSet) At() Labels {
if c.set != nil {
return c.set.At()
}
return nil
}
func (c *lazySeriesSet) Warnings() Warnings {
if c.set != nil {
return c.set.Warnings()
}
return nil
}
type warningsOnlySeriesSet Warnings
func (warningsOnlySeriesSet) Next() bool { return false }
func (warningsOnlySeriesSet) Err() error { return nil }
func (warningsOnlySeriesSet) At() Labels { return nil }
func (c warningsOnlySeriesSet) Warnings() Warnings { return Warnings(c) }
type errorOnlySeriesSet struct {
err error
}
func (errorOnlySeriesSet) Next() bool { return false }
func (errorOnlySeriesSet) At() Labels { return nil }
func (s errorOnlySeriesSet) Err() error { return s.err }
func (errorOnlySeriesSet) Warnings() Warnings { return nil }

View file

@ -24,8 +24,8 @@ func NoopQuerier() Querier {
return noopQuerier{} return noopQuerier{}
} }
func (noopQuerier) Select(bool, *SelectHints, ...*labels.Matcher) (SeriesSet, Warnings, error) { func (noopQuerier) Select(bool, *SelectHints, ...*labels.Matcher) SeriesSet {
return NoopSeriesSet(), nil, nil return NoopSeriesSet()
} }
func (noopQuerier) LabelValues(string) ([]string, Warnings, error) { func (noopQuerier) LabelValues(string) ([]string, Warnings, error) {
@ -47,8 +47,8 @@ func NoopChunkedQuerier() ChunkQuerier {
return noopChunkQuerier{} return noopChunkQuerier{}
} }
func (noopChunkQuerier) Select(bool, *SelectHints, ...*labels.Matcher) (ChunkSeriesSet, Warnings, error) { func (noopChunkQuerier) Select(bool, *SelectHints, ...*labels.Matcher) ChunkSeriesSet {
return NoopChunkedSeriesSet(), nil, nil return NoopChunkedSeriesSet()
} }
func (noopChunkQuerier) LabelValues(string) ([]string, Warnings, error) { func (noopChunkQuerier) LabelValues(string) ([]string, Warnings, error) {
@ -76,6 +76,8 @@ func (noopSeriesSet) At() Series { return nil }
func (noopSeriesSet) Err() error { return nil } func (noopSeriesSet) Err() error { return nil }
func (noopSeriesSet) Warnings() Warnings { return nil }
type noopChunkedSeriesSet struct{} type noopChunkedSeriesSet struct{}
// NoopChunkedSeriesSet is a ChunkSeriesSet that does nothing. // NoopChunkedSeriesSet is a ChunkSeriesSet that does nothing.
@ -88,3 +90,5 @@ func (noopChunkedSeriesSet) Next() bool { return false }
func (noopChunkedSeriesSet) At() ChunkSeries { return nil } func (noopChunkedSeriesSet) At() ChunkSeries { return nil }
func (noopChunkedSeriesSet) Err() error { return nil } func (noopChunkedSeriesSet) Err() error { return nil }
func (noopChunkedSeriesSet) Warnings() Warnings { return nil }

View file

@ -21,6 +21,7 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"strconv"
"strings" "strings"
"time" "time"
@ -28,6 +29,7 @@ import (
"github.com/golang/snappy" "github.com/golang/snappy"
"github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config" config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/version" "github.com/prometheus/common/version"
@ -40,23 +42,86 @@ const maxErrMsgLen = 256
var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) var userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
// Client allows reading and writing from/to a remote HTTP endpoint. var (
type Client struct { remoteReadQueriesTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "read_queries_total",
Help: "The total number of remote read queries.",
},
[]string{remoteName, endpoint, "code"},
)
remoteReadQueries = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "remote_read_queries",
Help: "The number of in-flight remote read queries.",
},
[]string{remoteName, endpoint},
)
remoteReadQueryDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "read_request_duration_seconds",
Help: "Histogram of the latency for remote read requests.",
Buckets: append(prometheus.DefBuckets, 25, 60),
},
[]string{remoteName, endpoint},
)
)
func init() {
prometheus.MustRegister(remoteReadQueriesTotal, remoteReadQueries, remoteReadQueryDuration)
}
// client allows reading and writing from/to a remote HTTP endpoint.
type client struct {
remoteName string // Used to differentiate clients in metrics. remoteName string // Used to differentiate clients in metrics.
url *config_util.URL url *config_util.URL
client *http.Client client *http.Client
timeout time.Duration timeout time.Duration
readQueries prometheus.Gauge
readQueriesTotal *prometheus.CounterVec
readQueriesDuration prometheus.Observer
} }
// ClientConfig configures a Client. // ClientConfig configures a client.
type ClientConfig struct { type ClientConfig struct {
URL *config_util.URL URL *config_util.URL
Timeout model.Duration Timeout model.Duration
HTTPClientConfig config_util.HTTPClientConfig HTTPClientConfig config_util.HTTPClientConfig
} }
// NewClient creates a new Client. // ReadClient uses the SAMPLES method of remote read to read series samples from remote server.
func NewClient(remoteName string, conf *ClientConfig) (*Client, error) { // TODO(bwplotka): Add streamed chunked remote read method as well (https://github.com/prometheus/prometheus/issues/5926).
type ReadClient interface {
Read(ctx context.Context, query *prompb.Query) (*prompb.QueryResult, error)
}
// newReadClient creates a new client for remote read.
func newReadClient(name string, conf *ClientConfig) (ReadClient, error) {
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage", false)
if err != nil {
return nil, err
}
return &client{
remoteName: name,
url: conf.URL,
client: httpClient,
timeout: time.Duration(conf.Timeout),
readQueries: remoteReadQueries.WithLabelValues(name, conf.URL.String()),
readQueriesTotal: remoteReadQueriesTotal.MustCurryWith(prometheus.Labels{remoteName: name, endpoint: conf.URL.String()}),
readQueriesDuration: remoteReadQueryDuration.WithLabelValues(name, conf.URL.String()),
}, nil
}
// NewWriteClient creates a new client for remote write.
func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage", false) httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage", false)
if err != nil { if err != nil {
return nil, err return nil, err
@ -67,8 +132,8 @@ func NewClient(remoteName string, conf *ClientConfig) (*Client, error) {
RoundTripper: t, RoundTripper: t,
} }
return &Client{ return &client{
remoteName: remoteName, remoteName: name,
url: conf.URL, url: conf.URL,
client: httpClient, client: httpClient,
timeout: time.Duration(conf.Timeout), timeout: time.Duration(conf.Timeout),
@ -81,7 +146,7 @@ type recoverableError struct {
// Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled // Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled
// and encoded bytes from codec.go. // and encoded bytes from codec.go.
func (c *Client) Store(ctx context.Context, req []byte) error { func (c *client) Store(ctx context.Context, req []byte) error {
httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(req)) httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(req))
if err != nil { if err != nil {
// Errors from NewRequest are from unparsable URLs, so are not // Errors from NewRequest are from unparsable URLs, so are not
@ -134,17 +199,20 @@ func (c *Client) Store(ctx context.Context, req []byte) error {
} }
// Name uniquely identifies the client. // Name uniquely identifies the client.
func (c Client) Name() string { func (c client) Name() string {
return c.remoteName return c.remoteName
} }
// Endpoint is the remote read or write endpoint. // Endpoint is the remote read or write endpoint.
func (c Client) Endpoint() string { func (c client) Endpoint() string {
return c.url.String() return c.url.String()
} }
// Read reads from a remote endpoint. // Read reads from a remote endpoint.
func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryResult, error) { func (c *client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryResult, error) {
c.readQueries.Inc()
defer c.readQueries.Dec()
req := &prompb.ReadRequest{ req := &prompb.ReadRequest{
// TODO: Support batching multiple queries into one read request, // TODO: Support batching multiple queries into one read request,
// as the protobuf interface allows for it. // as the protobuf interface allows for it.
@ -184,6 +252,7 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
defer ht.Finish() defer ht.Finish()
} }
start := time.Now()
httpResp, err := c.client.Do(httpReq) httpResp, err := c.client.Do(httpReq)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error sending request") return nil, errors.Wrap(err, "error sending request")
@ -192,6 +261,8 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
io.Copy(ioutil.Discard, httpResp.Body) io.Copy(ioutil.Discard, httpResp.Body)
httpResp.Body.Close() httpResp.Body.Close()
}() }()
c.readQueriesDuration.Observe(time.Since(start).Seconds())
c.readQueriesTotal.WithLabelValues(strconv.Itoa(httpResp.StatusCode)).Inc()
compressed, err = ioutil.ReadAll(httpResp.Body) compressed, err = ioutil.ReadAll(httpResp.Body)
if err != nil { if err != nil {

View file

@ -25,7 +25,6 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
config_util "github.com/prometheus/common/config" config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/util/testutil" "github.com/prometheus/prometheus/util/testutil"
) )
@ -71,7 +70,7 @@ func TestStoreHTTPErrorHandling(t *testing.T) {
hash, err := toHash(conf) hash, err := toHash(conf)
testutil.Ok(t, err) testutil.Ok(t, err)
c, err := NewClient(hash, conf) c, err := NewWriteClient(hash, conf)
testutil.Ok(t, err) testutil.Ok(t, err)
err = c.Store(context.Background(), []byte{}) err = c.Store(context.Background(), []byte{})

View file

@ -107,7 +107,7 @@ func ToQuery(from, to int64, matchers []*labels.Matcher, hints *storage.SelectHi
} }
// ToQueryResult builds a QueryResult proto. // ToQueryResult builds a QueryResult proto.
func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, error) { func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, storage.Warnings, error) {
numSamples := 0 numSamples := 0
resp := &prompb.QueryResult{} resp := &prompb.QueryResult{}
for ss.Next() { for ss.Next() {
@ -118,7 +118,7 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
for iter.Next() { for iter.Next() {
numSamples++ numSamples++
if sampleLimit > 0 && numSamples > sampleLimit { if sampleLimit > 0 && numSamples > sampleLimit {
return nil, HTTPError{ return nil, ss.Warnings(), HTTPError{
msg: fmt.Sprintf("exceeded sample limit (%d)", sampleLimit), msg: fmt.Sprintf("exceeded sample limit (%d)", sampleLimit),
status: http.StatusBadRequest, status: http.StatusBadRequest,
} }
@ -130,7 +130,7 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
}) })
} }
if err := iter.Err(); err != nil { if err := iter.Err(); err != nil {
return nil, err return nil, ss.Warnings(), err
} }
resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{ resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{
@ -138,25 +138,18 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
Samples: samples, Samples: samples,
}) })
} }
if err := ss.Err(); err != nil { return resp, ss.Warnings(), ss.Err()
return nil, err
}
return resp, nil
} }
// FromQueryResult unpacks and sorts a QueryResult proto. // FromQueryResult unpacks and sorts a QueryResult proto.
func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet { func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet {
series := make([]storage.Series, 0, len(res.Timeseries)) series := make([]storage.Series, 0, len(res.Timeseries))
for _, ts := range res.Timeseries { for _, ts := range res.Timeseries {
labels := labelProtosToLabels(ts.Labels) lbls := labelProtosToLabels(ts.Labels)
if err := validateLabelsAndMetricName(labels); err != nil { if err := validateLabelsAndMetricName(lbls); err != nil {
return errSeriesSet{err: err} return errSeriesSet{err: err}
} }
series = append(series, &concreteSeries{labels: lbls, samples: ts.Samples})
series = append(series, &concreteSeries{
labels: labels,
samples: ts.Samples,
})
} }
if sortSeries { if sortSeries {
@ -187,15 +180,14 @@ func NegotiateResponseType(accepted []prompb.ReadRequest_ResponseType) (prompb.R
return 0, errors.Errorf("server does not support any of the requested response types: %v; supported: %v", accepted, supported) return 0, errors.Errorf("server does not support any of the requested response types: %v; supported: %v", accepted, supported)
} }
// StreamChunkedReadResponses iterates over series, builds chunks and streams those to the caller. // TODO(bwlpotka): Remove when tsdb will support ChunkQuerier.
// TODO(bwplotka): Encode only what's needed. Fetch the encoded series from blocks instead of re-encoding everything. func DeprecatedStreamChunkedReadResponses(
func StreamChunkedReadResponses(
stream io.Writer, stream io.Writer,
queryIndex int64, queryIndex int64,
ss storage.SeriesSet, ss storage.SeriesSet,
sortedExternalLabels []prompb.Label, sortedExternalLabels []prompb.Label,
maxBytesInFrame int, maxBytesInFrame int,
) error { ) (storage.Warnings, error) {
var ( var (
chks []prompb.Chunk chks []prompb.Chunk
lbls []prompb.Label lbls []prompb.Label
@ -218,7 +210,7 @@ func StreamChunkedReadResponses(
// TODO(bwplotka): Use ChunkIterator once available in TSDB instead of re-encoding: https://github.com/prometheus/prometheus/pull/5882 // TODO(bwplotka): Use ChunkIterator once available in TSDB instead of re-encoding: https://github.com/prometheus/prometheus/pull/5882
chks, err = encodeChunks(iter, chks, maxBytesInFrame-lblsSize) chks, err = encodeChunks(iter, chks, maxBytesInFrame-lblsSize)
if err != nil { if err != nil {
return err return ss.Warnings(), err
} }
if len(chks) == 0 { if len(chks) == 0 {
@ -234,25 +226,25 @@ func StreamChunkedReadResponses(
QueryIndex: queryIndex, QueryIndex: queryIndex,
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "marshal ChunkedReadResponse") return ss.Warnings(), errors.Wrap(err, "marshal ChunkedReadResponse")
} }
if _, err := stream.Write(b); err != nil { if _, err := stream.Write(b); err != nil {
return errors.Wrap(err, "write to stream") return ss.Warnings(), errors.Wrap(err, "write to stream")
} }
chks = chks[:0] chks = chks[:0]
} }
if err := iter.Err(); err != nil { if err := iter.Err(); err != nil {
return err return ss.Warnings(), err
} }
} }
if err := ss.Err(); err != nil { if err := ss.Err(); err != nil {
return err return ss.Warnings(), err
} }
return nil return ss.Warnings(), nil
} }
// encodeChunks expects iterator to be ready to use (aka iter.Next() called before invoking). // encodeChunks expects iterator to be ready to use (aka iter.Next() called before invoking).
@ -315,6 +307,77 @@ func encodeChunks(iter chunkenc.Iterator, chks []prompb.Chunk, frameBytesLeft in
return chks, nil return chks, nil
} }
// StreamChunkedReadResponses iterates over series, builds chunks and streams those to the caller.
// It expects Series set with populated chunks.
func StreamChunkedReadResponses(
stream io.Writer,
queryIndex int64,
ss storage.ChunkSeriesSet,
sortedExternalLabels []prompb.Label,
maxBytesInFrame int,
) (storage.Warnings, error) {
var (
chks []prompb.Chunk
lbls []prompb.Label
)
for ss.Next() {
series := ss.At()
iter := series.Iterator()
lbls = MergeLabels(labelsToLabelsProto(series.Labels(), lbls), sortedExternalLabels)
frameBytesLeft := maxBytesInFrame
for _, lbl := range lbls {
frameBytesLeft -= lbl.Size()
}
isNext := iter.Next()
// Send at most one series per frame; series may be split over multiple frames according to maxBytesInFrame.
for isNext {
chk := iter.At()
if chk.Chunk == nil {
return ss.Warnings(), errors.Errorf("StreamChunkedReadResponses: found not populated chunk returned by SeriesSet at ref: %v", chk.Ref)
}
// Cut the chunk.
chks = append(chks, prompb.Chunk{
MinTimeMs: chk.MinTime,
MaxTimeMs: chk.MaxTime,
Type: prompb.Chunk_Encoding(chk.Chunk.Encoding()),
Data: chk.Chunk.Bytes(),
})
frameBytesLeft -= chks[len(chks)-1].Size()
// We are fine with minor inaccuracy of max bytes per frame. The inaccuracy will be max of full chunk size.
isNext = iter.Next()
if frameBytesLeft > 0 && isNext {
continue
}
b, err := proto.Marshal(&prompb.ChunkedReadResponse{
ChunkedSeries: []*prompb.ChunkedSeries{
{Labels: lbls, Chunks: chks},
},
QueryIndex: queryIndex,
})
if err != nil {
return ss.Warnings(), errors.Wrap(err, "marshal ChunkedReadResponse")
}
if _, err := stream.Write(b); err != nil {
return ss.Warnings(), errors.Wrap(err, "write to stream")
}
chks = chks[:0]
}
if err := iter.Err(); err != nil {
return ss.Warnings(), err
}
}
return ss.Warnings(), ss.Err()
}
// MergeLabels merges two sets of sorted proto labels, preferring those in // MergeLabels merges two sets of sorted proto labels, preferring those in
// primary to those in secondary when there is an overlap. // primary to those in secondary when there is an overlap.
func MergeLabels(primary, secondary []prompb.Label) []prompb.Label { func MergeLabels(primary, secondary []prompb.Label) []prompb.Label {
@ -365,6 +428,8 @@ func (e errSeriesSet) Err() error {
return e.err return e.err
} }
func (e errSeriesSet) Warnings() storage.Warnings { return nil }
// concreteSeriesSet implements storage.SeriesSet. // concreteSeriesSet implements storage.SeriesSet.
type concreteSeriesSet struct { type concreteSeriesSet struct {
cur int cur int
@ -384,6 +449,8 @@ func (c *concreteSeriesSet) Err() error {
return nil return nil
} }
func (c *concreteSeriesSet) Warnings() storage.Warnings { return nil }
// concreteSeries implements storage.Series. // concreteSeries implements storage.Series.
type concreteSeries struct { type concreteSeries struct {
labels labels.Labels labels labels.Labels

View file

@ -115,7 +115,7 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
Subsystem: subsystem, Subsystem: subsystem,
Name: "sent_batch_duration_seconds", Name: "sent_batch_duration_seconds",
Help: "Duration of sample batch send calls to the remote storage.", Help: "Duration of sample batch send calls to the remote storage.",
Buckets: prometheus.DefBuckets, Buckets: append(prometheus.DefBuckets, 25, 60, 120, 300),
ConstLabels: constLabels, ConstLabels: constLabels,
}) })
m.highestSentTimestamp = &maxGauge{ m.highestSentTimestamp = &maxGauge{
@ -220,9 +220,9 @@ func (m *queueManagerMetrics) unregister() {
} }
} }
// StorageClient defines an interface for sending a batch of samples to an // WriteClient defines an interface for sending a batch of samples to an
// external timeseries database. // external timeseries database.
type StorageClient interface { type WriteClient interface {
// Store stores the given samples in the remote storage. // Store stores the given samples in the remote storage.
Store(context.Context, []byte) error Store(context.Context, []byte) error
// Name uniquely identifies the remote storage. // Name uniquely identifies the remote storage.
@ -232,7 +232,7 @@ type StorageClient interface {
} }
// QueueManager manages a queue of samples to be sent to the Storage // QueueManager manages a queue of samples to be sent to the Storage
// indicated by the provided StorageClient. Implements writeTo interface // indicated by the provided WriteClient. Implements writeTo interface
// used by WAL Watcher. // used by WAL Watcher.
type QueueManager struct { type QueueManager struct {
// https://golang.org/pkg/sync/atomic/#pkg-note-BUG // https://golang.org/pkg/sync/atomic/#pkg-note-BUG
@ -246,7 +246,7 @@ type QueueManager struct {
watcher *wal.Watcher watcher *wal.Watcher
clientMtx sync.RWMutex clientMtx sync.RWMutex
storeClient StorageClient storeClient WriteClient
seriesMtx sync.Mutex seriesMtx sync.Mutex
seriesLabels map[uint64]labels.Labels seriesLabels map[uint64]labels.Labels
@ -275,7 +275,7 @@ func NewQueueManager(
cfg config.QueueConfig, cfg config.QueueConfig,
externalLabels labels.Labels, externalLabels labels.Labels,
relabelConfigs []*relabel.Config, relabelConfigs []*relabel.Config,
client StorageClient, client WriteClient,
flushDeadline time.Duration, flushDeadline time.Duration,
) *QueueManager { ) *QueueManager {
if logger == nil { if logger == nil {
@ -364,7 +364,6 @@ func (t *QueueManager) Start() {
// Register and initialise some metrics. // Register and initialise some metrics.
t.metrics.register() t.metrics.register()
t.metrics.shardCapacity.Set(float64(t.cfg.Capacity)) t.metrics.shardCapacity.Set(float64(t.cfg.Capacity))
t.metrics.pendingSamples.Set(0)
t.metrics.maxNumShards.Set(float64(t.cfg.MaxShards)) t.metrics.maxNumShards.Set(float64(t.cfg.MaxShards))
t.metrics.minNumShards.Set(float64(t.cfg.MinShards)) t.metrics.minNumShards.Set(float64(t.cfg.MinShards))
t.metrics.desiredNumShards.Set(float64(t.cfg.MinShards)) t.metrics.desiredNumShards.Set(float64(t.cfg.MinShards))
@ -444,13 +443,13 @@ func (t *QueueManager) SeriesReset(index int) {
// SetClient updates the client used by a queue. Used when only client specific // SetClient updates the client used by a queue. Used when only client specific
// fields are updated to avoid restarting the queue. // fields are updated to avoid restarting the queue.
func (t *QueueManager) SetClient(c StorageClient) { func (t *QueueManager) SetClient(c WriteClient) {
t.clientMtx.Lock() t.clientMtx.Lock()
t.storeClient = c t.storeClient = c
t.clientMtx.Unlock() t.clientMtx.Unlock()
} }
func (t *QueueManager) client() StorageClient { func (t *QueueManager) client() WriteClient {
t.clientMtx.RLock() t.clientMtx.RLock()
defer t.clientMtx.RUnlock() defer t.clientMtx.RUnlock()
return t.storeClient return t.storeClient
@ -672,6 +671,7 @@ type shards struct {
// Hard shutdown context is used to terminate outgoing HTTP connections // Hard shutdown context is used to terminate outgoing HTTP connections
// after giving them a chance to terminate. // after giving them a chance to terminate.
hardShutdown context.CancelFunc hardShutdown context.CancelFunc
droppedOnHardShutdown uint32
} }
// start the shards; must be called before any call to enqueue. // start the shards; must be called before any call to enqueue.
@ -679,6 +679,9 @@ func (s *shards) start(n int) {
s.mtx.Lock() s.mtx.Lock()
defer s.mtx.Unlock() defer s.mtx.Unlock()
s.qm.metrics.pendingSamples.Set(0)
s.qm.metrics.numShards.Set(float64(n))
newQueues := make([]chan sample, n) newQueues := make([]chan sample, n)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
newQueues[i] = make(chan sample, s.qm.cfg.Capacity) newQueues[i] = make(chan sample, s.qm.cfg.Capacity)
@ -691,10 +694,10 @@ func (s *shards) start(n int) {
s.softShutdown = make(chan struct{}) s.softShutdown = make(chan struct{})
s.running = int32(n) s.running = int32(n)
s.done = make(chan struct{}) s.done = make(chan struct{})
atomic.StoreUint32(&s.droppedOnHardShutdown, 0)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
go s.runShard(hardShutdownCtx, i, newQueues[i]) go s.runShard(hardShutdownCtx, i, newQueues[i])
} }
s.qm.metrics.numShards.Set(float64(n))
} }
// stop the shards; subsequent call to enqueue will return false. // stop the shards; subsequent call to enqueue will return false.
@ -719,12 +722,14 @@ func (s *shards) stop() {
case <-s.done: case <-s.done:
return return
case <-time.After(s.qm.flushDeadline): case <-time.After(s.qm.flushDeadline):
level.Error(s.qm.logger).Log("msg", "Failed to flush all samples on shutdown")
} }
// Force an unclean shutdown. // Force an unclean shutdown.
s.hardShutdown() s.hardShutdown()
<-s.done <-s.done
if dropped := atomic.LoadUint32(&s.droppedOnHardShutdown); dropped > 0 {
level.Error(s.qm.logger).Log("msg", "Failed to flush all samples on shutdown", "count", dropped)
}
} }
// enqueue a sample. If we are currently in the process of shutting down or resharding, // enqueue a sample. If we are currently in the process of shutting down or resharding,
@ -744,6 +749,7 @@ func (s *shards) enqueue(ref uint64, sample sample) bool {
case <-s.softShutdown: case <-s.softShutdown:
return false return false
case s.queues[shard] <- sample: case s.queues[shard] <- sample:
s.qm.metrics.pendingSamples.Inc()
return true return true
} }
} }
@ -781,6 +787,12 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan sample) {
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
// In this case we drop all samples in the buffer and the queue.
// Remove them from pending and mark them as failed.
droppedSamples := nPending + len(queue)
s.qm.metrics.pendingSamples.Sub(float64(droppedSamples))
s.qm.metrics.failedSamplesTotal.Add(float64(droppedSamples))
atomic.AddUint32(&s.droppedOnHardShutdown, uint32(droppedSamples))
return return
case sample, ok := <-queue: case sample, ok := <-queue:
@ -801,7 +813,6 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan sample) {
pendingSamples[nPending].Samples[0].Timestamp = sample.t pendingSamples[nPending].Samples[0].Timestamp = sample.t
pendingSamples[nPending].Samples[0].Value = sample.v pendingSamples[nPending].Samples[0].Value = sample.v
nPending++ nPending++
s.qm.metrics.pendingSamples.Inc()
if nPending >= max { if nPending >= max {
s.sendSamples(ctx, pendingSamples, &buf) s.sendSamples(ctx, pendingSamples, &buf)

View file

@ -51,7 +51,7 @@ func TestSampleDelivery(t *testing.T) {
n := config.DefaultQueueConfig.MaxSamplesPerSend * 2 n := config.DefaultQueueConfig.MaxSamplesPerSend * 2
samples, series := createTimeseries(n, n) samples, series := createTimeseries(n, n)
c := NewTestStorageClient() c := NewTestWriteClient()
c.expectSamples(samples[:len(samples)/2], series) c.expectSamples(samples[:len(samples)/2], series)
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
@ -81,7 +81,7 @@ func TestSampleDeliveryTimeout(t *testing.T) {
// Let's send one less sample than batch size, and wait the timeout duration // Let's send one less sample than batch size, and wait the timeout duration
n := 9 n := 9
samples, series := createTimeseries(n, n) samples, series := createTimeseries(n, n)
c := NewTestStorageClient() c := NewTestWriteClient()
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
cfg.MaxShards = 1 cfg.MaxShards = 1
@ -125,7 +125,7 @@ func TestSampleDeliveryOrder(t *testing.T) {
}) })
} }
c := NewTestStorageClient() c := NewTestWriteClient()
c.expectSamples(samples, series) c.expectSamples(samples, series)
dir, err := ioutil.TempDir("", "TestSampleDeliveryOrder") dir, err := ioutil.TempDir("", "TestSampleDeliveryOrder")
@ -145,7 +145,7 @@ func TestSampleDeliveryOrder(t *testing.T) {
func TestShutdown(t *testing.T) { func TestShutdown(t *testing.T) {
deadline := 1 * time.Second deadline := 1 * time.Second
c := NewTestBlockedStorageClient() c := NewTestBlockedWriteClient()
dir, err := ioutil.TempDir("", "TestShutdown") dir, err := ioutil.TempDir("", "TestShutdown")
testutil.Ok(t, err) testutil.Ok(t, err)
@ -181,7 +181,7 @@ func TestShutdown(t *testing.T) {
} }
func TestSeriesReset(t *testing.T) { func TestSeriesReset(t *testing.T) {
c := NewTestBlockedStorageClient() c := NewTestBlockedWriteClient()
deadline := 5 * time.Second deadline := 5 * time.Second
numSegments := 4 numSegments := 4
numSeries := 25 numSeries := 25
@ -210,7 +210,7 @@ func TestReshard(t *testing.T) {
nSamples := config.DefaultQueueConfig.Capacity * size nSamples := config.DefaultQueueConfig.Capacity * size
samples, series := createTimeseries(nSamples, nSeries) samples, series := createTimeseries(nSamples, nSeries)
c := NewTestStorageClient() c := NewTestWriteClient()
c.expectSamples(samples, series) c.expectSamples(samples, series)
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
@ -245,7 +245,7 @@ func TestReshard(t *testing.T) {
} }
func TestReshardRaceWithStop(t *testing.T) { func TestReshardRaceWithStop(t *testing.T) {
c := NewTestStorageClient() c := NewTestWriteClient()
var m *QueueManager var m *QueueManager
h := sync.Mutex{} h := sync.Mutex{}
@ -271,7 +271,7 @@ func TestReshardRaceWithStop(t *testing.T) {
func TestReleaseNoninternedString(t *testing.T) { func TestReleaseNoninternedString(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
c := NewTestStorageClient() c := NewTestWriteClient()
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), config.DefaultQueueConfig, nil, nil, c, defaultFlushDeadline) m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), config.DefaultQueueConfig, nil, nil, c, defaultFlushDeadline)
m.Start() m.Start()
@ -319,7 +319,7 @@ func TestShouldReshard(t *testing.T) {
} }
for _, c := range cases { for _, c := range cases {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
client := NewTestStorageClient() client := NewTestWriteClient()
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), config.DefaultQueueConfig, nil, nil, client, defaultFlushDeadline) m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), config.DefaultQueueConfig, nil, nil, client, defaultFlushDeadline)
m.numShards = c.startingShards m.numShards = c.startingShards
m.samplesIn.incr(c.samplesIn) m.samplesIn.incr(c.samplesIn)
@ -367,7 +367,7 @@ func getSeriesNameFromRef(r record.RefSeries) string {
return "" return ""
} }
type TestStorageClient struct { type TestWriteClient struct {
receivedSamples map[string][]prompb.Sample receivedSamples map[string][]prompb.Sample
expectedSamples map[string][]prompb.Sample expectedSamples map[string][]prompb.Sample
withWaitGroup bool withWaitGroup bool
@ -376,15 +376,15 @@ type TestStorageClient struct {
buf []byte buf []byte
} }
func NewTestStorageClient() *TestStorageClient { func NewTestWriteClient() *TestWriteClient {
return &TestStorageClient{ return &TestWriteClient{
withWaitGroup: true, withWaitGroup: true,
receivedSamples: map[string][]prompb.Sample{}, receivedSamples: map[string][]prompb.Sample{},
expectedSamples: map[string][]prompb.Sample{}, expectedSamples: map[string][]prompb.Sample{},
} }
} }
func (c *TestStorageClient) expectSamples(ss []record.RefSample, series []record.RefSeries) { func (c *TestWriteClient) expectSamples(ss []record.RefSample, series []record.RefSeries) {
if !c.withWaitGroup { if !c.withWaitGroup {
return return
} }
@ -404,7 +404,7 @@ func (c *TestStorageClient) expectSamples(ss []record.RefSample, series []record
c.wg.Add(len(ss)) c.wg.Add(len(ss))
} }
func (c *TestStorageClient) waitForExpectedSamples(tb testing.TB) { func (c *TestWriteClient) waitForExpectedSamples(tb testing.TB) {
if !c.withWaitGroup { if !c.withWaitGroup {
return return
} }
@ -418,7 +418,7 @@ func (c *TestStorageClient) waitForExpectedSamples(tb testing.TB) {
} }
} }
func (c *TestStorageClient) expectSampleCount(numSamples int) { func (c *TestWriteClient) expectSampleCount(numSamples int) {
if !c.withWaitGroup { if !c.withWaitGroup {
return return
} }
@ -427,14 +427,14 @@ func (c *TestStorageClient) expectSampleCount(numSamples int) {
c.wg.Add(numSamples) c.wg.Add(numSamples)
} }
func (c *TestStorageClient) waitForExpectedSampleCount() { func (c *TestWriteClient) waitForExpectedSampleCount() {
if !c.withWaitGroup { if !c.withWaitGroup {
return return
} }
c.wg.Wait() c.wg.Wait()
} }
func (c *TestStorageClient) Store(_ context.Context, req []byte) error { func (c *TestWriteClient) Store(_ context.Context, req []byte) error {
c.mtx.Lock() c.mtx.Lock()
defer c.mtx.Unlock() defer c.mtx.Unlock()
// nil buffers are ok for snappy, ignore cast error. // nil buffers are ok for snappy, ignore cast error.
@ -472,41 +472,41 @@ func (c *TestStorageClient) Store(_ context.Context, req []byte) error {
return nil return nil
} }
func (c *TestStorageClient) Name() string { func (c *TestWriteClient) Name() string {
return "teststorageclient" return "testwriteclient"
} }
func (c *TestStorageClient) Endpoint() string { func (c *TestWriteClient) Endpoint() string {
return "http://test-remote.com/1234" return "http://test-remote.com/1234"
} }
// TestBlockingStorageClient is a queue_manager StorageClient which will block // TestBlockingWriteClient is a queue_manager WriteClient which will block
// on any calls to Store(), until the request's Context is cancelled, at which // on any calls to Store(), until the request's Context is cancelled, at which
// point the `numCalls` property will contain a count of how many times Store() // point the `numCalls` property will contain a count of how many times Store()
// was called. // was called.
type TestBlockingStorageClient struct { type TestBlockingWriteClient struct {
numCalls uint64 numCalls uint64
} }
func NewTestBlockedStorageClient() *TestBlockingStorageClient { func NewTestBlockedWriteClient() *TestBlockingWriteClient {
return &TestBlockingStorageClient{} return &TestBlockingWriteClient{}
} }
func (c *TestBlockingStorageClient) Store(ctx context.Context, _ []byte) error { func (c *TestBlockingWriteClient) Store(ctx context.Context, _ []byte) error {
atomic.AddUint64(&c.numCalls, 1) atomic.AddUint64(&c.numCalls, 1)
<-ctx.Done() <-ctx.Done()
return nil return nil
} }
func (c *TestBlockingStorageClient) NumCalls() uint64 { func (c *TestBlockingWriteClient) NumCalls() uint64 {
return atomic.LoadUint64(&c.numCalls) return atomic.LoadUint64(&c.numCalls)
} }
func (c *TestBlockingStorageClient) Name() string { func (c *TestBlockingWriteClient) Name() string {
return "testblockingstorageclient" return "testblockingwriteclient"
} }
func (c *TestBlockingStorageClient) Endpoint() string { func (c *TestBlockingWriteClient) Endpoint() string {
return "http://test-remote-blocking.com/1234" return "http://test-remote-blocking.com/1234"
} }
@ -516,7 +516,7 @@ func BenchmarkSampleDelivery(b *testing.B) {
n := config.DefaultQueueConfig.MaxSamplesPerSend * 10 n := config.DefaultQueueConfig.MaxSamplesPerSend * 10
samples, series := createTimeseries(n, n) samples, series := createTimeseries(n, n)
c := NewTestStorageClient() c := NewTestWriteClient()
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond) cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond)
@ -568,7 +568,7 @@ func BenchmarkStartup(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
c := NewTestBlockedStorageClient() c := NewTestBlockedWriteClient()
m := NewQueueManager(metrics, nil, nil, logger, dir, m := NewQueueManager(metrics, nil, nil, logger, dir,
newEWMARate(ewmaWeight, shardUpdateDuration), newEWMARate(ewmaWeight, shardUpdateDuration),
config.DefaultQueueConfig, nil, nil, c, 1*time.Minute) config.DefaultQueueConfig, nil, nil, c, 1*time.Minute)
@ -611,7 +611,7 @@ func TestProcessExternalLabels(t *testing.T) {
} }
func TestCalculateDesiredShards(t *testing.T) { func TestCalculateDesiredShards(t *testing.T) {
c := NewTestStorageClient() c := NewTestWriteClient()
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
dir, err := ioutil.TempDir("", "TestCalculateDesiredShards") dir, err := ioutil.TempDir("", "TestCalculateDesiredShards")

View file

@ -15,193 +15,162 @@ package remote
import ( import (
"context" "context"
"fmt"
"github.com/prometheus/client_golang/prometheus" "github.com/pkg/errors"
"github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
) )
var remoteReadQueries = prometheus.NewGaugeVec( type sampleAndChunkQueryableClient struct {
prometheus.GaugeOpts{ client ReadClient
Namespace: namespace, externalLabels labels.Labels
Subsystem: subsystem, requiredMatchers []*labels.Matcher
Name: "remote_read_queries", readRecent bool
Help: "The number of in-flight remote read queries.", callback startTimeCallback
},
[]string{remoteName, endpoint},
)
var remoteReadQueriesTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "remote_read_queries_total",
Help: "The total number of remote read queries.",
},
[]string{remoteName, endpoint},
)
func init() {
prometheus.MustRegister(remoteReadQueries)
prometheus.MustRegister(remoteReadQueriesTotal)
} }
// QueryableClient returns a storage.Queryable which queries the given // NewSampleAndChunkQueryableClient returns a storage.SampleAndChunkQueryable which queries the given client to select series sets.
// Client to select series sets. func NewSampleAndChunkQueryableClient(
func QueryableClient(c *Client) storage.Queryable { c ReadClient,
remoteReadQueries.WithLabelValues(c.remoteName, c.url.String()) externalLabels labels.Labels,
remoteReadQueriesTotal.WithLabelValues(c.remoteName, c.url.String()) requiredMatchers []*labels.Matcher,
readRecent bool,
callback startTimeCallback,
) storage.SampleAndChunkQueryable {
return &sampleAndChunkQueryableClient{
client: c,
return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { externalLabels: externalLabels,
return &querier{ requiredMatchers: requiredMatchers,
readRecent: readRecent,
callback: callback,
}
}
func (c *sampleAndChunkQueryableClient) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
q := &querier{
ctx: ctx, ctx: ctx,
mint: mint, mint: mint,
maxt: maxt, maxt: maxt,
client: c, client: c.client,
}, nil externalLabels: c.externalLabels,
}) requiredMatchers: c.requiredMatchers,
}
if c.readRecent {
return q, nil
} }
// querier is an adapter to make a Client usable as a storage.Querier. var (
type querier struct { noop bool
ctx context.Context err error
mint, maxt int64 )
client *Client q.maxt, noop, err = c.preferLocalStorage(mint, maxt)
}
// Select implements storage.Querier and uses the given matchers to read series sets from the Client.
func (q *querier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) {
query, err := ToQuery(q.mint, q.maxt, matchers, hints)
if err != nil {
return nil, nil, err
}
remoteReadGauge := remoteReadQueries.WithLabelValues(q.client.remoteName, q.client.url.String())
remoteReadGauge.Inc()
defer remoteReadGauge.Dec()
remoteReadTotalCounter := remoteReadQueriesTotal.WithLabelValues(q.client.remoteName, q.client.url.String())
remoteReadTotalCounter.Inc()
res, err := q.client.Read(q.ctx, query)
if err != nil {
return nil, nil, fmt.Errorf("remote_read: %v", err)
}
return FromQueryResult(sortSeries, res), nil, nil
}
// LabelValues implements storage.Querier and is a noop.
func (q *querier) LabelValues(string) ([]string, storage.Warnings, error) {
// TODO implement?
return nil, nil, nil
}
// LabelNames implements storage.Querier and is a noop.
func (q *querier) LabelNames() ([]string, storage.Warnings, error) {
// TODO implement?
return nil, nil, nil
}
// Close implements storage.Querier and is a noop.
func (q *querier) Close() error {
return nil
}
// ExternalLabelsHandler returns a storage.Queryable which creates a
// externalLabelsQuerier.
func ExternalLabelsHandler(next storage.Queryable, externalLabels labels.Labels) storage.Queryable {
return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
q, err := next.Querier(ctx, mint, maxt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &externalLabelsQuerier{Querier: q, externalLabels: externalLabels}, nil if noop {
}) return storage.NoopQuerier(), nil
}
return q, nil
} }
// externalLabelsQuerier is a querier which ensures that Select() results match func (c *sampleAndChunkQueryableClient) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) {
// the configured external labels. cq := &chunkQuerier{
type externalLabelsQuerier struct { querier: querier{
storage.Querier ctx: ctx,
mint: mint,
externalLabels labels.Labels maxt: maxt,
client: c.client,
externalLabels: c.externalLabels,
requiredMatchers: c.requiredMatchers,
},
}
if c.readRecent {
return cq, nil
} }
// Select adds equality matchers for all external labels to the list of matchers var (
// before calling the wrapped storage.Queryable. The added external labels are noop bool
// removed from the returned series sets. err error
func (q externalLabelsQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { )
m, added := q.addExternalLabels(matchers) cq.querier.maxt, noop, err = c.preferLocalStorage(mint, maxt)
s, warnings, err := q.Querier.Select(sortSeries, hints, m...)
if err != nil { if err != nil {
return nil, warnings, err return nil, err
} }
return newSeriesSetFilter(s, added), warnings, nil if noop {
return storage.NoopChunkedQuerier(), nil
}
return cq, nil
} }
// PreferLocalStorageFilter returns a QueryableFunc which creates a NoopQuerier // preferLocalStorage returns noop if requested timeframe can be answered completely by the local TSDB, and
// if requested timeframe can be answered completely by the local TSDB, and
// reduces maxt if the timeframe can be partially answered by TSDB. // reduces maxt if the timeframe can be partially answered by TSDB.
func PreferLocalStorageFilter(next storage.Queryable, cb startTimeCallback) storage.Queryable { func (c *sampleAndChunkQueryableClient) preferLocalStorage(mint, maxt int64) (cmaxt int64, noop bool, err error) {
return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { localStartTime, err := c.callback()
localStartTime, err := cb()
if err != nil { if err != nil {
return nil, err return 0, false, err
} }
cmaxt := maxt cmaxt = maxt
// Avoid queries whose time range is later than the first timestamp in local DB. // Avoid queries whose time range is later than the first timestamp in local DB.
if mint > localStartTime { if mint > localStartTime {
return storage.NoopQuerier(), nil return 0, true, nil
} }
// Query only samples older than the first timestamp in local DB. // Query only samples older than the first timestamp in local DB.
if maxt > localStartTime { if maxt > localStartTime {
cmaxt = localStartTime cmaxt = localStartTime
} }
return next.Querier(ctx, mint, cmaxt) return cmaxt, false, nil
})
} }
// RequiredMatchersFilter returns a storage.Queryable which creates a type querier struct {
// requiredMatchersQuerier. ctx context.Context
func RequiredMatchersFilter(next storage.Queryable, required []*labels.Matcher) storage.Queryable { mint, maxt int64
return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { client ReadClient
q, err := next.Querier(ctx, mint, maxt)
if err != nil {
return nil, err
}
return &requiredMatchersQuerier{Querier: q, requiredMatchers: required}, nil
})
}
// requiredMatchersQuerier wraps a storage.Querier and requires Select() calls
// to match the given labelSet.
type requiredMatchersQuerier struct {
storage.Querier
// Derived from configuration.
externalLabels labels.Labels
requiredMatchers []*labels.Matcher requiredMatchers []*labels.Matcher
} }
// Select returns a NoopSeriesSet if the given matchers don't match the label // Select implements storage.Querier and uses the given matchers to read series sets from the client.
// set of the requiredMatchersQuerier. Otherwise it'll call the wrapped querier. // Select also adds equality matchers for all external labels to the list of matchers before calling remote endpoint.
func (q requiredMatchersQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { // The added external labels are removed from the returned series sets.
ms := q.requiredMatchers //
// If requiredMatchers are given, select returns a NoopSeriesSet if the given matchers don't match the label set of the
// requiredMatchers. Otherwise it'll just call remote endpoint.
func (q *querier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
if len(q.requiredMatchers) > 0 {
// Copy to not modify slice configured by user.
requiredMatchers := append([]*labels.Matcher{}, q.requiredMatchers...)
for _, m := range matchers { for _, m := range matchers {
for i, r := range ms { for i, r := range requiredMatchers {
if m.Type == labels.MatchEqual && m.Name == r.Name && m.Value == r.Value { if m.Type == labels.MatchEqual && m.Name == r.Name && m.Value == r.Value {
ms = append(ms[:i], ms[i+1:]...) // Requirement matched.
requiredMatchers = append(requiredMatchers[:i], requiredMatchers[i+1:]...)
break break
} }
} }
if len(ms) == 0 { if len(requiredMatchers) == 0 {
break break
} }
} }
if len(ms) > 0 { if len(requiredMatchers) > 0 {
return storage.NoopSeriesSet(), nil, nil return storage.NoopSeriesSet()
} }
return q.Querier.Select(sortSeries, hints, matchers...) }
m, added := q.addExternalLabels(matchers)
query, err := ToQuery(q.mint, q.maxt, m, hints)
if err != nil {
return storage.ErrSeriesSet(errors.Wrap(err, "toQuery"))
}
res, err := q.client.Read(q.ctx, query)
if err != nil {
return storage.ErrSeriesSet(errors.Wrap(err, "remote_read"))
}
return newSeriesSetFilter(FromQueryResult(sortSeries, res), added)
} }
// addExternalLabels adds matchers for each external label. External labels // addExternalLabels adds matchers for each external label. External labels
@ -210,7 +179,7 @@ func (q requiredMatchersQuerier) Select(sortSeries bool, hints *storage.SelectHi
// We return the new set of matchers, along with a map of labels for which // We return the new set of matchers, along with a map of labels for which
// matchers were added, so that these can later be removed from the result // matchers were added, so that these can later be removed from the result
// time series again. // time series again.
func (q externalLabelsQuerier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, labels.Labels) { func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, labels.Labels) {
el := make(labels.Labels, len(q.externalLabels)) el := make(labels.Labels, len(q.externalLabels))
copy(el, q.externalLabels) copy(el, q.externalLabels)
@ -235,6 +204,35 @@ func (q externalLabelsQuerier) addExternalLabels(ms []*labels.Matcher) ([]*label
return ms, el return ms, el
} }
// LabelValues implements storage.Querier and is a noop.
func (q *querier) LabelValues(string) ([]string, storage.Warnings, error) {
// TODO: Implement: https://github.com/prometheus/prometheus/issues/3351
return nil, nil, errors.New("not implemented")
}
// LabelNames implements storage.Querier and is a noop.
func (q *querier) LabelNames() ([]string, storage.Warnings, error) {
// TODO: Implement: https://github.com/prometheus/prometheus/issues/3351
return nil, nil, errors.New("not implemented")
}
// Close implements storage.Querier and is a noop.
func (q *querier) Close() error {
return nil
}
// chunkQuerier is an adapter to make a client usable as a storage.ChunkQuerier.
type chunkQuerier struct {
querier
}
// Select implements storage.ChunkQuerier and uses the given matchers to read chunk series sets from the client.
// It uses remote.querier.Select so it supports external labels and required matchers if specified.
func (q *chunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet {
// TODO(bwplotka) Support remote read chunked and allow returning chunks directly (TODO ticket).
return storage.NewSeriesSetToChunkSet(q.querier.Select(sortSeries, hints, matchers...))
}
func newSeriesSetFilter(ss storage.SeriesSet, toFilter labels.Labels) storage.SeriesSet { func newSeriesSetFilter(ss storage.SeriesSet, toFilter labels.Labels) storage.SeriesSet {
return &seriesSetFilter{ return &seriesSetFilter{
SeriesSet: ss, SeriesSet: ss,

View file

@ -22,6 +22,7 @@ import (
"sort" "sort"
"testing" "testing"
"github.com/pkg/errors"
config_util "github.com/prometheus/common/config" config_util "github.com/prometheus/common/config"
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/labels"
@ -40,7 +41,7 @@ func TestNoDuplicateReadConfigs(t *testing.T) {
URL: &config_util.URL{ URL: &config_util.URL{
URL: &url.URL{ URL: &url.URL{
Scheme: "http", Scheme: "http",
Host: "localhost", Host: "localhost1",
}, },
}, },
} }
@ -49,7 +50,7 @@ func TestNoDuplicateReadConfigs(t *testing.T) {
URL: &config_util.URL{ URL: &config_util.URL{
URL: &url.URL{ URL: &url.URL{
Scheme: "http", Scheme: "http",
Host: "localhost", Host: "localhost2",
}, },
}, },
} }
@ -57,7 +58,7 @@ func TestNoDuplicateReadConfigs(t *testing.T) {
URL: &config_util.URL{ URL: &config_util.URL{
URL: &url.URL{ URL: &url.URL{
Scheme: "http", Scheme: "http",
Host: "localhost", Host: "localhost3",
}, },
}, },
} }
@ -92,6 +93,7 @@ func TestNoDuplicateReadConfigs(t *testing.T) {
} }
for _, tc := range cases { for _, tc := range cases {
t.Run("", func(t *testing.T) {
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline) s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig, GlobalConfig: config.DefaultGlobalConfig,
@ -100,29 +102,8 @@ func TestNoDuplicateReadConfigs(t *testing.T) {
err := s.ApplyConfig(conf) err := s.ApplyConfig(conf)
gotError := err != nil gotError := err != nil
testutil.Equals(t, tc.err, gotError) testutil.Equals(t, tc.err, gotError)
testutil.Ok(t, s.Close())
err = s.Close() })
testutil.Ok(t, err)
}
}
func TestExternalLabelsQuerierSelect(t *testing.T) {
matchers := []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "job", "api-server"),
}
q := &externalLabelsQuerier{
Querier: mockQuerier{},
externalLabels: labels.Labels{
{Name: "region", Value: "europe"},
},
}
want := newSeriesSetFilter(mockSeriesSet{}, q.externalLabels)
have, _, err := q.Select(false, nil, matchers...)
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(want, have) {
t.Errorf("expected series set %+v, got %+v", want, have)
} }
} }
@ -181,7 +162,7 @@ func TestExternalLabelsQuerierAddExternalLabels(t *testing.T) {
} }
for i, test := range tests { for i, test := range tests {
q := &externalLabelsQuerier{Querier: mockQuerier{}, externalLabels: test.el} q := &querier{externalLabels: test.el}
matchers, added := q.addExternalLabels(test.inMatchers) matchers, added := q.addExternalLabels(test.inMatchers)
sort.Slice(test.outMatchers, func(i, j int) bool { return test.outMatchers[i].Name < test.outMatchers[j].Name }) sort.Slice(test.outMatchers, func(i, j int) bool { return test.outMatchers[i].Name < test.outMatchers[j].Name })
@ -218,195 +199,316 @@ func TestSeriesSetFilter(t *testing.T) {
}, },
} }
for i, tc := range tests { for _, tc := range tests {
filtered := newSeriesSetFilter(FromQueryResult(true, tc.in), tc.toRemove) filtered := newSeriesSetFilter(FromQueryResult(true, tc.in), tc.toRemove)
have, err := ToQueryResult(filtered, 1e6) act, ws, err := ToQueryResult(filtered, 1e6)
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
testutil.Equals(t, tc.expected, act)
}
}
type mockedRemoteClient struct {
got *prompb.Query
store []*prompb.TimeSeries
}
func (c *mockedRemoteClient) Read(_ context.Context, query *prompb.Query) (*prompb.QueryResult, error) {
if c.got != nil {
return nil, errors.Errorf("expected only one call to remote client got: %v", query)
}
c.got = query
matchers, err := FromLabelMatchers(query.Matchers)
if err != nil { if err != nil {
t.Fatal(err) return nil, err
} }
if !reflect.DeepEqual(have, tc.expected) { q := &prompb.QueryResult{}
t.Fatalf("%d. unexpected labels; want %v, got %v", i, tc.expected, have) for _, s := range c.store {
l := labelProtosToLabels(s.Labels)
var notMatch bool
for _, m := range matchers {
if v := l.Get(m.Name); v != "" {
if !m.Matches(v) {
notMatch = true
break
} }
} }
} }
type mockQuerier struct { if !notMatch {
ctx context.Context q.Timeseries = append(q.Timeseries, &prompb.TimeSeries{Labels: s.Labels})
mint, maxt int64 }
}
storage.Querier return q, nil
} }
type mockSeriesSet struct { func (c *mockedRemoteClient) reset() {
storage.SeriesSet c.got = nil
} }
func (mockQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { // NOTE: We don't need to test ChunkQuerier as it's uses querier for all operations anyway.
return mockSeriesSet{}, nil, nil func TestSampleAndChunkQueryableClient(t *testing.T) {
} m := &mockedRemoteClient{
// Samples does not matter for below tests.
func TestPreferLocalStorageFilter(t *testing.T) { store: []*prompb.TimeSeries{
ctx := context.Background() {Labels: []prompb.Label{{Name: "a", Value: "b"}}},
{Labels: []prompb.Label{{Name: "a", Value: "b3"}, {Name: "region", Value: "us"}}},
tests := []struct { {Labels: []prompb.Label{{Name: "a", Value: "b2"}, {Name: "region", Value: "europe"}}},
localStartTime int64
mint int64
maxt int64
querier storage.Querier
}{
{
localStartTime: int64(100),
mint: int64(0),
maxt: int64(50),
querier: mockQuerier{ctx: ctx, mint: 0, maxt: 50},
},
{
localStartTime: int64(20),
mint: int64(0),
maxt: int64(50),
querier: mockQuerier{ctx: ctx, mint: 0, maxt: 20},
},
{
localStartTime: int64(20),
mint: int64(30),
maxt: int64(50),
querier: storage.NoopQuerier(),
}, },
} }
for i, test := range tests { for _, tc := range []struct {
f := PreferLocalStorageFilter( name string
storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
return mockQuerier{ctx: ctx, mint: mint, maxt: maxt}, nil
}),
func() (int64, error) { return test.localStartTime, nil },
)
q, err := f.Querier(ctx, test.mint, test.maxt)
if err != nil {
t.Fatal(err)
}
if test.querier != q {
t.Errorf("%d. expected querier %+v, got %+v", i, test.querier, q)
}
}
}
func TestRequiredMatchersFilter(t *testing.T) {
ctx := context.Background()
f := RequiredMatchersFilter(
storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
return mockQuerier{ctx: ctx, mint: mint, maxt: maxt}, nil
}),
[]*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "special", "label")},
)
want := &requiredMatchersQuerier{
Querier: mockQuerier{ctx: ctx, mint: 0, maxt: 50},
requiredMatchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "special", "label")},
}
have, err := f.Querier(ctx, 0, 50)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(want, have) {
t.Errorf("expected querier %+v, got %+v", want, have)
}
}
func TestRequiredLabelsQuerierSelect(t *testing.T) {
tests := []struct {
requiredMatchers []*labels.Matcher
matchers []*labels.Matcher matchers []*labels.Matcher
seriesSet storage.SeriesSet mint, maxt int64
externalLabels labels.Labels
requiredMatchers []*labels.Matcher
readRecent bool
callback startTimeCallback
expectedQuery *prompb.Query
expectedSeries []labels.Labels
}{ }{
{ {
requiredMatchers: []*labels.Matcher{}, name: "empty",
mint: 1, maxt: 2,
matchers: []*labels.Matcher{ matchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "special", "label"), labels.MustNewMatcher(labels.MatchNotEqual, "a", "something"),
}, },
seriesSet: mockSeriesSet{}, readRecent: true,
},
{
requiredMatchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "special", "label"),
},
matchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "special", "label"),
},
seriesSet: mockSeriesSet{},
},
{
requiredMatchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "special", "label"),
},
matchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchRegexp, "special", "label"),
},
seriesSet: storage.NoopSeriesSet(),
},
{
requiredMatchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "special", "label"),
},
matchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "special", "different"),
},
seriesSet: storage.NoopSeriesSet(),
},
{
requiredMatchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "special", "label"),
},
matchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "special", "label"),
labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"),
},
seriesSet: mockSeriesSet{},
},
{
requiredMatchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "special", "label"),
labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"),
},
matchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "special", "label"),
labels.MustNewMatcher(labels.MatchEqual, "foo", "baz"),
},
seriesSet: storage.NoopSeriesSet(),
},
{
requiredMatchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "special", "label"),
labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"),
},
matchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "special", "label"),
labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"),
},
seriesSet: mockSeriesSet{},
},
}
for i, test := range tests { expectedQuery: &prompb.Query{
q := &requiredMatchersQuerier{ StartTimestampMs: 1,
Querier: mockQuerier{}, EndTimestampMs: 2,
requiredMatchers: test.requiredMatchers, Matchers: []*prompb.LabelMatcher{
} {Type: prompb.LabelMatcher_NEQ, Name: "a", Value: "something"},
},
},
expectedSeries: []labels.Labels{
labels.FromStrings("a", "b"),
labels.FromStrings("a", "b2", "region", "europe"),
labels.FromStrings("a", "b3", "region", "us"),
},
},
{
name: "external labels specified, not explicitly requested",
mint: 1, maxt: 2,
matchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchNotEqual, "a", "something"),
},
readRecent: true,
externalLabels: labels.Labels{
{Name: "region", Value: "europe"},
},
have, _, err := q.Select(false, nil, test.matchers...) expectedQuery: &prompb.Query{
if err != nil { StartTimestampMs: 1,
t.Error(err) EndTimestampMs: 2,
} Matchers: []*prompb.LabelMatcher{
if want := test.seriesSet; want != have { {Type: prompb.LabelMatcher_NEQ, Name: "a", Value: "something"},
t.Errorf("%d. expected series set %+v, got %+v", i, want, have) {Type: prompb.LabelMatcher_EQ, Name: "region", Value: "europe"},
} },
if want, have := test.requiredMatchers, q.requiredMatchers; !reflect.DeepEqual(want, have) { },
t.Errorf("%d. requiredMatchersQuerier.Select() has modified the matchers", i) expectedSeries: []labels.Labels{
labels.FromStrings("a", "b"),
labels.FromStrings("a", "b2"),
},
},
{
name: "external labels specified, explicitly requested europe",
mint: 1, maxt: 2,
matchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchNotEqual, "a", "something"),
labels.MustNewMatcher(labels.MatchEqual, "region", "europe"),
},
readRecent: true,
externalLabels: labels.Labels{
{Name: "region", Value: "europe"},
},
expectedQuery: &prompb.Query{
StartTimestampMs: 1,
EndTimestampMs: 2,
Matchers: []*prompb.LabelMatcher{
{Type: prompb.LabelMatcher_NEQ, Name: "a", Value: "something"},
{Type: prompb.LabelMatcher_EQ, Name: "region", Value: "europe"},
},
},
expectedSeries: []labels.Labels{
labels.FromStrings("a", "b"),
labels.FromStrings("a", "b2", "region", "europe"),
},
},
{
name: "external labels specified, explicitly requested not europe",
mint: 1, maxt: 2,
matchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchNotEqual, "a", "something"),
labels.MustNewMatcher(labels.MatchEqual, "region", "us"),
},
readRecent: true,
externalLabels: labels.Labels{
{Name: "region", Value: "europe"},
},
expectedQuery: &prompb.Query{
StartTimestampMs: 1,
EndTimestampMs: 2,
Matchers: []*prompb.LabelMatcher{
{Type: prompb.LabelMatcher_NEQ, Name: "a", Value: "something"},
{Type: prompb.LabelMatcher_EQ, Name: "region", Value: "us"},
},
},
expectedSeries: []labels.Labels{
labels.FromStrings("a", "b"),
labels.FromStrings("a", "b3", "region", "us"),
},
},
{
name: "prefer local storage",
mint: 0, maxt: 50,
callback: func() (i int64, err error) { return 100, nil },
readRecent: false,
expectedQuery: &prompb.Query{
StartTimestampMs: 0,
EndTimestampMs: 50,
Matchers: []*prompb.LabelMatcher{},
},
expectedSeries: []labels.Labels{
labels.FromStrings("a", "b"),
labels.FromStrings("a", "b2", "region", "europe"),
labels.FromStrings("a", "b3", "region", "us"),
},
},
{
name: "prefer local storage, limited time",
mint: 0, maxt: 50,
callback: func() (i int64, err error) { return 20, nil },
readRecent: false,
expectedQuery: &prompb.Query{
StartTimestampMs: 0,
EndTimestampMs: 20,
Matchers: []*prompb.LabelMatcher{},
},
expectedSeries: []labels.Labels{
labels.FromStrings("a", "b"),
labels.FromStrings("a", "b2", "region", "europe"),
labels.FromStrings("a", "b3", "region", "us"),
},
},
{
name: "prefer local storage, skipped",
mint: 30, maxt: 50,
callback: func() (i int64, err error) { return 20, nil },
readRecent: false,
expectedQuery: nil,
expectedSeries: nil, // Noop should be used.
},
{
name: "required matcher specified, user also specifies same",
mint: 1, maxt: 2,
matchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "a", "b2"),
},
readRecent: true,
requiredMatchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "a", "b2"),
},
expectedQuery: &prompb.Query{
StartTimestampMs: 1,
EndTimestampMs: 2,
Matchers: []*prompb.LabelMatcher{
{Type: prompb.LabelMatcher_EQ, Name: "a", Value: "b2"},
},
},
expectedSeries: []labels.Labels{
labels.FromStrings("a", "b2", "region", "europe"),
},
},
{
name: "required matcher specified",
mint: 1, maxt: 2,
matchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "a", "b2"),
},
readRecent: true,
requiredMatchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "a", "b2"),
},
expectedQuery: &prompb.Query{
StartTimestampMs: 1,
EndTimestampMs: 2,
Matchers: []*prompb.LabelMatcher{
{Type: prompb.LabelMatcher_EQ, Name: "a", Value: "b2"},
},
},
expectedSeries: []labels.Labels{
labels.FromStrings("a", "b2", "region", "europe"),
},
},
{
name: "required matcher specified, given matcher does not match",
mint: 1, maxt: 2,
matchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchNotEqual, "a", "something"),
},
readRecent: true,
requiredMatchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "a", "b2"),
},
expectedQuery: nil,
expectedSeries: nil, // Given matchers does not match with required ones, noop expected.
},
{
name: "required matcher specified, given matcher does not match2",
mint: 1, maxt: 2,
matchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchNotEqual, "x", "something"),
},
readRecent: true,
requiredMatchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, "a", "b2"),
},
expectedQuery: nil,
expectedSeries: nil, // Given matchers does not match with required ones, noop expected.
},
} {
t.Run(tc.name, func(t *testing.T) {
m.reset()
c := NewSampleAndChunkQueryableClient(
m,
tc.externalLabels,
tc.requiredMatchers,
tc.readRecent,
tc.callback,
)
q, err := c.Querier(context.TODO(), tc.mint, tc.maxt)
testutil.Ok(t, err)
defer testutil.Ok(t, q.Close())
ss := q.Select(true, nil, tc.matchers...)
testutil.Ok(t, err)
testutil.Equals(t, storage.Warnings(nil), ss.Warnings())
testutil.Equals(t, tc.expectedQuery, m.got)
var got []labels.Labels
for ss.Next() {
got = append(got, ss.At().Labels())
} }
testutil.Ok(t, ss.Err())
testutil.Equals(t, tc.expectedSeries, got)
})
} }
} }

View file

@ -51,8 +51,8 @@ type Storage struct {
rws *WriteStorage rws *WriteStorage
// For reads // For reads.
queryables []storage.Queryable queryables []storage.SampleAndChunkQueryable
localStartTimeCallback startTimeCallback localStartTimeCallback startTimeCallback
} }
@ -61,6 +61,7 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal
if l == nil { if l == nil {
l = log.NewNopLogger() l = log.NewNopLogger()
} }
s := &Storage{ s := &Storage{
logger: logging.Dedupe(l, 1*time.Minute), logger: logging.Dedupe(l, 1*time.Minute),
localStartTimeCallback: stCallback, localStartTimeCallback: stCallback,
@ -80,7 +81,7 @@ func (s *Storage) ApplyConfig(conf *config.Config) error {
// Update read clients // Update read clients
readHashes := make(map[string]struct{}) readHashes := make(map[string]struct{})
queryables := make([]storage.Queryable, 0, len(conf.RemoteReadConfigs)) queryables := make([]storage.SampleAndChunkQueryable, 0, len(conf.RemoteReadConfigs))
for _, rrConf := range conf.RemoteReadConfigs { for _, rrConf := range conf.RemoteReadConfigs {
hash, err := toHash(rrConf) hash, err := toHash(rrConf)
if err != nil { if err != nil {
@ -96,12 +97,12 @@ func (s *Storage) ApplyConfig(conf *config.Config) error {
// Set the queue name to the config hash if the user has not set // Set the queue name to the config hash if the user has not set
// a name in their remote write config so we can still differentiate // a name in their remote write config so we can still differentiate
// between queues that have the same remote write endpoint. // between queues that have the same remote write endpoint.
name := string(hash[:6]) name := hash[:6]
if rrConf.Name != "" { if rrConf.Name != "" {
name = rrConf.Name name = rrConf.Name
} }
c, err := NewClient(name, &ClientConfig{ c, err := newReadClient(name, &ClientConfig{
URL: rrConf.URL, URL: rrConf.URL,
Timeout: rrConf.RemoteTimeout, Timeout: rrConf.RemoteTimeout,
HTTPClientConfig: rrConf.HTTPClientConfig, HTTPClientConfig: rrConf.HTTPClientConfig,
@ -110,15 +111,13 @@ func (s *Storage) ApplyConfig(conf *config.Config) error {
return err return err
} }
q := QueryableClient(c) queryables = append(queryables, NewSampleAndChunkQueryableClient(
q = ExternalLabelsHandler(q, conf.GlobalConfig.ExternalLabels) c,
if len(rrConf.RequiredMatchers) > 0 { conf.GlobalConfig.ExternalLabels,
q = RequiredMatchersFilter(q, labelsToEqualityMatchers(rrConf.RequiredMatchers)) labelsToEqualityMatchers(rrConf.RequiredMatchers),
} rrConf.ReadRecent,
if !rrConf.ReadRecent { s.localStartTimeCallback,
q = PreferLocalStorageFilter(q, s.localStartTimeCallback) ))
}
queryables = append(queryables, q)
} }
s.queryables = queryables s.queryables = queryables
@ -132,6 +131,9 @@ func (s *Storage) StartTime() (int64, error) {
// Querier returns a storage.MergeQuerier combining the remote client queriers // Querier returns a storage.MergeQuerier combining the remote client queriers
// of each configured remote read endpoint. // of each configured remote read endpoint.
// Returned querier will never return error as all queryables are assumed best effort.
// Additionally all returned queriers ensure that its Select's SeriesSets have ready data after first `Next` invoke.
// This is because Prometheus (fanout and secondary queries) can't handle the stream failing half way through by design.
func (s *Storage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { func (s *Storage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
s.mtx.Lock() s.mtx.Lock()
queryables := s.queryables queryables := s.queryables
@ -145,7 +147,25 @@ func (s *Storage) Querier(ctx context.Context, mint, maxt int64) (storage.Querie
} }
queriers = append(queriers, q) queriers = append(queriers, q)
} }
return storage.NewMergeQuerier(nil, queriers, storage.ChainedSeriesMerge), nil return storage.NewMergeQuerier(storage.NoopQuerier(), queriers, storage.ChainedSeriesMerge), nil
}
// ChunkQuerier returns a storage.MergeQuerier combining the remote client queriers
// of each configured remote read endpoint.
func (s *Storage) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) {
s.mtx.Lock()
queryables := s.queryables
s.mtx.Unlock()
queriers := make([]storage.ChunkQuerier, 0, len(queryables))
for _, queryable := range queryables {
q, err := queryable.ChunkQuerier(ctx, mint, maxt)
if err != nil {
return nil, err
}
queriers = append(queriers, q)
}
return storage.NewMergeChunkQuerier(storage.NoopChunkedQuerier(), queriers, storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge)), nil
} }
// Appender implements storage.Storage. // Appender implements storage.Storage.

View file

@ -113,12 +113,12 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
// Set the queue name to the config hash if the user has not set // Set the queue name to the config hash if the user has not set
// a name in their remote write config so we can still differentiate // a name in their remote write config so we can still differentiate
// between queues that have the same remote write endpoint. // between queues that have the same remote write endpoint.
name := string(hash[:6]) name := hash[:6]
if rwConf.Name != "" { if rwConf.Name != "" {
name = rwConf.Name name = rwConf.Name
} }
c, err := NewClient(name, &ClientConfig{ c, err := NewWriteClient(name, &ClientConfig{
URL: rwConf.URL, URL: rwConf.URL,
Timeout: rwConf.RemoteTimeout, Timeout: rwConf.RemoteTimeout,
HTTPClientConfig: rwConf.HTTPClientConfig, HTTPClientConfig: rwConf.HTTPClientConfig,

112
storage/secondary.go Normal file
View file

@ -0,0 +1,112 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"sync"
"github.com/prometheus/prometheus/pkg/labels"
)
// secondaryQuerier is a wrapper that allows a querier to be treated in a best effort manner.
// This means that an error on any method returned by Querier except Close will be returned as a warning,
// and the result will be empty.
//
// Additionally, Querier ensures that if ANY SeriesSet returned by this querier's Select failed on an initial Next,
// All other SeriesSet will be return no response as well. This ensures consistent partial response strategy, where you
// have either full results or none from each secondary Querier.
// NOTE: This works well only for implementations that only fail during first Next() (e.g fetch from network). If implementation fails
// during further iterations, set will panic. If Select is invoked after first Next of any returned SeriesSet, querier will panic.
//
// Not go-routine safe.
// NOTE: Prometheus treats all remote storages as secondary / best effort.
type secondaryQuerier struct {
genericQuerier
once sync.Once
done bool
asyncSets []genericSeriesSet
}
func newSecondaryQuerierFrom(q Querier) genericQuerier {
return &secondaryQuerier{genericQuerier: newGenericQuerierFrom(q)}
}
func newSecondaryQuerierFromChunk(cq ChunkQuerier) genericQuerier {
return &secondaryQuerier{genericQuerier: newGenericQuerierFromChunk(cq)}
}
func (s *secondaryQuerier) LabelValues(name string) ([]string, Warnings, error) {
vals, w, err := s.genericQuerier.LabelValues(name)
if err != nil {
return nil, append([]error{err}, w...), nil
}
return vals, w, nil
}
func (s *secondaryQuerier) LabelNames() ([]string, Warnings, error) {
names, w, err := s.genericQuerier.LabelNames()
if err != nil {
return nil, append([]error{err}, w...), nil
}
return names, w, nil
}
func (s *secondaryQuerier) createFn(asyncSet genericSeriesSet) func() (genericSeriesSet, bool) {
s.asyncSets = append(s.asyncSets, asyncSet)
curr := len(s.asyncSets) - 1
return func() (genericSeriesSet, bool) {
s.once.Do(func() {
// At first create invocation we iterate over all sets and ensure its Next() returns some value without
// errors. This is to ensure we support consistent partial failures.
for i, set := range s.asyncSets {
if set.Next() {
continue
}
ws := set.Warnings()
// Failed set.
if err := set.Err(); err != nil {
ws = append([]error{err}, ws...)
// Promote the warnings to the current one.
s.asyncSets[curr] = warningsOnlySeriesSet(ws)
// One of the sets failed, ensure rest of the sets returns nothing. (All or nothing logic).
for i := range s.asyncSets {
if curr != i {
s.asyncSets[i] = noopGenericSeriesSet{}
}
}
break
}
// Exhausted set.
s.asyncSets[i] = warningsOnlySeriesSet(ws)
}
s.done = true
})
switch s.asyncSets[curr].(type) {
case warningsOnlySeriesSet, noopGenericSeriesSet:
return s.asyncSets[curr], false
default:
return s.asyncSets[curr], true
}
}
}
func (s *secondaryQuerier) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
if s.done {
panic("secondaryQuerier: Select invoked after first Next of any returned SeriesSet was done")
}
return &lazySeriesSet{create: s.createFn(s.genericQuerier.Select(sortSeries, hints, matchers...))}
}

View file

@ -14,6 +14,7 @@
package storage package storage
import ( import (
"math"
"sort" "sort"
"github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/labels"
@ -23,23 +24,34 @@ import (
) )
type listSeriesIterator struct { type listSeriesIterator struct {
samples []tsdbutil.Sample samples Samples
idx int idx int
} }
type samples []tsdbutil.Sample
func (s samples) Get(i int) tsdbutil.Sample { return s[i] }
func (s samples) Len() int { return len(s) }
// Samples interface allows to work on arrays of types that are compatible with tsdbutil.Sample.
type Samples interface {
Get(i int) tsdbutil.Sample
Len() int
}
// NewListSeriesIterator returns listSeriesIterator that allows to iterate over provided samples. Does not handle overlaps. // NewListSeriesIterator returns listSeriesIterator that allows to iterate over provided samples. Does not handle overlaps.
func NewListSeriesIterator(samples []tsdbutil.Sample) chunkenc.Iterator { func NewListSeriesIterator(samples Samples) chunkenc.Iterator {
return &listSeriesIterator{samples: samples, idx: -1} return &listSeriesIterator{samples: samples, idx: -1}
} }
func (it *listSeriesIterator) At() (int64, float64) { func (it *listSeriesIterator) At() (int64, float64) {
s := it.samples[it.idx] s := it.samples.Get(it.idx)
return s.T(), s.V() return s.T(), s.V()
} }
func (it *listSeriesIterator) Next() bool { func (it *listSeriesIterator) Next() bool {
it.idx++ it.idx++
return it.idx < len(it.samples) return it.idx < it.samples.Len()
} }
func (it *listSeriesIterator) Seek(t int64) bool { func (it *listSeriesIterator) Seek(t int64) bool {
@ -47,12 +59,12 @@ func (it *listSeriesIterator) Seek(t int64) bool {
it.idx = 0 it.idx = 0
} }
// Do binary search between current position and end. // Do binary search between current position and end.
it.idx = sort.Search(len(it.samples)-it.idx, func(i int) bool { it.idx = sort.Search(it.samples.Len()-it.idx, func(i int) bool {
s := it.samples[i+it.idx] s := it.samples.Get(i + it.idx)
return s.T() >= t return s.T() >= t
}) })
return it.idx < len(it.samples) return it.idx < it.samples.Len()
} }
func (it *listSeriesIterator) Err() error { return nil } func (it *listSeriesIterator) Err() error { return nil }
@ -84,7 +96,6 @@ type chunkSetToSeriesSet struct {
chkIterErr error chkIterErr error
sameSeriesChunks []Series sameSeriesChunks []Series
bufIterator chunkenc.Iterator
} }
// NewSeriesSetFromChunkSeriesSet converts ChunkSeriesSet to SeriesSet by decoding chunks one by one. // NewSeriesSetFromChunkSeriesSet converts ChunkSeriesSet to SeriesSet by decoding chunks one by one.
@ -101,10 +112,9 @@ func (c *chunkSetToSeriesSet) Next() bool {
c.sameSeriesChunks = c.sameSeriesChunks[:0] c.sameSeriesChunks = c.sameSeriesChunks[:0]
for iter.Next() { for iter.Next() {
c.sameSeriesChunks = append(c.sameSeriesChunks, &chunkToSeries{ c.sameSeriesChunks = append(c.sameSeriesChunks, &chunkToSeriesDecoder{
labels: c.ChunkSeriesSet.At().Labels(), labels: c.ChunkSeriesSet.At().Labels(),
chk: iter.At(), Meta: iter.At(),
buf: c.bufIterator,
}) })
} }
@ -128,11 +138,82 @@ func (c *chunkSetToSeriesSet) Err() error {
return c.ChunkSeriesSet.Err() return c.ChunkSeriesSet.Err()
} }
type chunkToSeries struct { type chunkToSeriesDecoder struct {
chunks.Meta
labels labels.Labels labels labels.Labels
chk chunks.Meta
buf chunkenc.Iterator
} }
func (s *chunkToSeries) Labels() labels.Labels { return s.labels } func (s *chunkToSeriesDecoder) Labels() labels.Labels { return s.labels }
func (s *chunkToSeries) Iterator() chunkenc.Iterator { return s.chk.Chunk.Iterator(s.buf) }
// TODO(bwplotka): Can we provide any chunkenc buffer?
func (s *chunkToSeriesDecoder) Iterator() chunkenc.Iterator { return s.Chunk.Iterator(nil) }
type seriesSetToChunkSet struct {
SeriesSet
}
// NewSeriesSetToChunkSet converts SeriesSet to ChunkSeriesSet by encoding chunks from samples.
func NewSeriesSetToChunkSet(chk SeriesSet) ChunkSeriesSet {
return &seriesSetToChunkSet{SeriesSet: chk}
}
func (c *seriesSetToChunkSet) Next() bool {
if c.Err() != nil || !c.SeriesSet.Next() {
return false
}
return true
}
func (c *seriesSetToChunkSet) At() ChunkSeries {
return &seriesToChunkEncoder{
Series: c.SeriesSet.At(),
}
}
func (c *seriesSetToChunkSet) Err() error {
return c.SeriesSet.Err()
}
type seriesToChunkEncoder struct {
Series
}
// TODO(bwplotka): Currently encoder will just naively build one chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670
func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
chk := chunkenc.NewXORChunk()
app, err := chk.Appender()
if err != nil {
return errChunksIterator{err: err}
}
mint := int64(math.MaxInt64)
maxt := int64(math.MinInt64)
seriesIter := s.Series.Iterator()
for seriesIter.Next() {
t, v := seriesIter.At()
app.Append(t, v)
maxt = t
if mint == math.MaxInt64 {
mint = t
}
}
if err := seriesIter.Err(); err != nil {
return errChunksIterator{err: err}
}
return NewListChunkSeriesIterator(chunks.Meta{
MinTime: mint,
MaxTime: maxt,
Chunk: chk,
})
}
type errChunksIterator struct {
err error
}
func (e errChunksIterator) At() chunks.Meta { return chunks.Meta{} }
func (e errChunksIterator) Next() bool { return false }
func (e errChunksIterator) Err() error { return e.err }

View file

@ -27,11 +27,11 @@ type MockSeries struct {
SampleIteratorFn func() chunkenc.Iterator SampleIteratorFn func() chunkenc.Iterator
} }
func NewListSeries(lset labels.Labels, samples []tsdbutil.Sample) *MockSeries { func NewListSeries(lset labels.Labels, s []tsdbutil.Sample) *MockSeries {
return &MockSeries{ return &MockSeries{
labels: lset, labels: lset,
SampleIteratorFn: func() chunkenc.Iterator { SampleIteratorFn: func() chunkenc.Iterator {
return NewListSeriesIterator(samples) return NewListSeriesIterator(samples(s))
}, },
} }
} }

View file

@ -62,7 +62,10 @@ type IndexReader interface {
// beyond the lifetime of the index reader. // beyond the lifetime of the index reader.
Symbols() index.StringIter Symbols() index.StringIter
// LabelValues returns sorted possible label values. // SortedLabelValues returns sorted possible label values.
SortedLabelValues(name string) ([]string, error)
// LabelValues returns possible label values which may not be sorted.
LabelValues(name string) ([]string, error) LabelValues(name string) ([]string, error)
// Postings returns the postings list iterator for the label pairs. // Postings returns the postings list iterator for the label pairs.
@ -419,6 +422,11 @@ func (r blockIndexReader) Symbols() index.StringIter {
return r.ir.Symbols() return r.ir.Symbols()
} }
func (r blockIndexReader) SortedLabelValues(name string) ([]string, error) {
st, err := r.ir.SortedLabelValues(name)
return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
}
func (r blockIndexReader) LabelValues(name string) ([]string, error) { func (r blockIndexReader) LabelValues(name string) ([]string, error) {
st, err := r.ir.LabelValues(name) st, err := r.ir.LabelValues(name)
return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID)

View file

@ -203,9 +203,7 @@ func TestCorruptedChunk(t *testing.T) {
querier, err := NewBlockQuerier(b, 0, 1) querier, err := NewBlockQuerier(b, 0, 1)
testutil.Ok(t, err) testutil.Ok(t, err)
defer func() { testutil.Ok(t, querier.Close()) }() defer func() { testutil.Ok(t, querier.Close()) }()
set, ws, err := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) set := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
// Check query err. // Check query err.
testutil.Equals(t, false, set.Next()) testutil.Equals(t, false, set.Next())

View file

@ -41,7 +41,10 @@
package chunkenc package chunkenc
import "io" import (
"encoding/binary"
"io"
)
// bstream is a stream of bits. // bstream is a stream of bits.
type bstream struct { type bstream struct {
@ -49,10 +52,6 @@ type bstream struct {
count uint8 // how many bits are valid in current byte count uint8 // how many bits are valid in current byte
} }
func newBReader(b []byte) bstream {
return bstream{stream: b, count: 8}
}
func (b *bstream) bytes() []byte { func (b *bstream) bytes() []byte {
return b.stream return b.stream
} }
@ -111,90 +110,131 @@ func (b *bstream) writeBits(u uint64, nbits int) {
} }
} }
func (b *bstream) readBit() (bit, error) { type bstreamReader struct {
if len(b.stream) == 0 { stream []byte
streamOffset int // The offset from which read the next byte from the stream.
buffer uint64 // The current buffer, filled from the stream, containing up to 8 bytes from which read bits.
valid uint8 // The number of bits valid to read (from left) in the current buffer.
}
func newBReader(b []byte) bstreamReader {
return bstreamReader{
stream: b,
}
}
func (b *bstreamReader) readBit() (bit, error) {
if b.valid == 0 {
if !b.loadNextBuffer(1) {
return false, io.EOF
}
}
return b.readBitFast()
}
// readBitFast is like readBit but can return io.EOF if the internal buffer is empty.
// If it returns io.EOF, the caller should retry reading bits calling readBit().
// This function must be kept small and a leaf in order to help the compiler inlining it
// and further improve performances.
func (b *bstreamReader) readBitFast() (bit, error) {
if b.valid == 0 {
return false, io.EOF return false, io.EOF
} }
if b.count == 0 { b.valid--
b.stream = b.stream[1:] bitmask := uint64(1) << b.valid
return (b.buffer & bitmask) != 0, nil
if len(b.stream) == 0 {
return false, io.EOF
}
b.count = 8
} }
d := (b.stream[0] << (8 - b.count)) & 0x80 func (b *bstreamReader) readBits(nbits uint8) (uint64, error) {
b.count-- if b.valid == 0 {
return d != 0, nil if !b.loadNextBuffer(nbits) {
return 0, io.EOF
}
} }
func (b *bstream) ReadByte() (byte, error) { if nbits <= b.valid {
return b.readByte() return b.readBitsFast(nbits)
} }
func (b *bstream) readByte() (byte, error) { // We have to read all remaining valid bits from the current buffer and a part from the next one.
if len(b.stream) == 0 { bitmask := (uint64(1) << b.valid) - 1
nbits -= b.valid
v := (b.buffer & bitmask) << nbits
b.valid = 0
if !b.loadNextBuffer(nbits) {
return 0, io.EOF return 0, io.EOF
} }
if b.count == 0 { bitmask = (uint64(1) << nbits) - 1
b.stream = b.stream[1:] v = v | ((b.buffer >> (b.valid - nbits)) & bitmask)
b.valid -= nbits
if len(b.stream) == 0 { return v, nil
return 0, io.EOF
}
return b.stream[0], nil
} }
if b.count == 8 { // readBitsFast is like readBits but can return io.EOF if the internal buffer is empty.
b.count = 0 // If it returns io.EOF, the caller should retry reading bits calling readBits().
return b.stream[0], nil // This function must be kept small and a leaf in order to help the compiler inlining it
} // and further improve performances.
func (b *bstreamReader) readBitsFast(nbits uint8) (uint64, error) {
byt := b.stream[0] << (8 - b.count) if nbits > b.valid {
b.stream = b.stream[1:]
if len(b.stream) == 0 {
return 0, io.EOF return 0, io.EOF
} }
// We just advanced the stream and can assume the shift to be 0. bitmask := (uint64(1) << nbits) - 1
byt |= b.stream[0] >> b.count b.valid -= nbits
return byt, nil return (b.buffer >> b.valid) & bitmask, nil
} }
func (b *bstream) readBits(nbits int) (uint64, error) { func (b *bstreamReader) ReadByte() (byte, error) {
var u uint64 v, err := b.readBits(8)
for nbits >= 8 {
byt, err := b.readByte()
if err != nil { if err != nil {
return 0, err return 0, err
} }
return byte(v), nil
u = (u << 8) | uint64(byt)
nbits -= 8
} }
if nbits == 0 { // loadNextBuffer loads the next bytes from the stream into the internal buffer.
return u, nil // The input nbits is the minimum number of bits that must be read, but the implementation
// can read more (if possible) to improve performances.
func (b *bstreamReader) loadNextBuffer(nbits uint8) bool {
if b.streamOffset >= len(b.stream) {
return false
} }
if nbits > int(b.count) { // Handle the case there are more then 8 bytes in the buffer (most common case)
u = (u << uint(b.count)) | uint64((b.stream[0]<<(8-b.count))>>(8-b.count)) // in a optimized way. It's guaranteed that this branch will never read from the
nbits -= int(b.count) // very last byte of the stream (which suffers race conditions due to concurrent
b.stream = b.stream[1:] // writes).
if b.streamOffset+8 < len(b.stream) {
if len(b.stream) == 0 { b.buffer = binary.BigEndian.Uint64(b.stream[b.streamOffset:])
return 0, io.EOF b.streamOffset += 8
} b.valid = 64
b.count = 8 return true
} }
u = (u << uint(nbits)) | uint64((b.stream[0]<<(8-b.count))>>(8-uint(nbits))) // We're here if the are 8 or less bytes left in the stream. Since this reader needs
b.count -= uint8(nbits) // to handle race conditions with concurrent writes happening on the very last byte
return u, nil // we make sure to never over more than the minimum requested bits (rounded up to
// the next byte). The following code is slower but called less frequently.
nbytes := int((nbits / 8) + 1)
if b.streamOffset+nbytes > len(b.stream) {
nbytes = len(b.stream) - b.streamOffset
}
buffer := uint64(0)
for i := 0; i < nbytes; i++ {
buffer = buffer | (uint64(b.stream[b.streamOffset+i]) << uint(8*(nbytes-i-1)))
}
b.buffer = buffer
b.streamOffset += nbytes
b.valid = uint8(nbytes * 8)
return true
} }

View file

@ -0,0 +1,61 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunkenc
import (
"testing"
"github.com/prometheus/prometheus/util/testutil"
)
func TestBstreamReader(t *testing.T) {
// Write to the bit stream.
w := bstream{}
for _, bit := range []bit{true, false} {
w.writeBit(bit)
}
for nbits := 1; nbits <= 64; nbits++ {
w.writeBits(uint64(nbits), nbits)
}
for v := 1; v < 10000; v += 123 {
w.writeBits(uint64(v), 29)
}
// Read back.
r := newBReader(w.bytes())
for _, bit := range []bit{true, false} {
v, err := r.readBitFast()
if err != nil {
v, err = r.readBit()
}
testutil.Ok(t, err)
testutil.Equals(t, bit, v)
}
for nbits := uint8(1); nbits <= 64; nbits++ {
v, err := r.readBitsFast(nbits)
if err != nil {
v, err = r.readBits(nbits)
}
testutil.Ok(t, err)
testutil.Equals(t, uint64(nbits), v, "nbits=%d", nbits)
}
for v := 1; v < 10000; v += 123 {
actual, err := r.readBitsFast(29)
if err != nil {
actual, err = r.readBits(29)
}
testutil.Ok(t, err)
testutil.Equals(t, uint64(v), actual, "v=%d", v)
}
}

View file

@ -240,7 +240,7 @@ func (a *xorAppender) writeVDelta(v float64) {
} }
type xorIterator struct { type xorIterator struct {
br bstream br bstreamReader
numTotal uint16 numTotal uint16
numRead uint16 numRead uint16
@ -328,7 +328,10 @@ func (it *xorIterator) Next() bool {
// read delta-of-delta // read delta-of-delta
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
d <<= 1 d <<= 1
bit, err := it.br.readBit() bit, err := it.br.readBitFast()
if err != nil {
bit, err = it.br.readBit()
}
if err != nil { if err != nil {
it.err = err it.err = err
return false return false
@ -350,6 +353,7 @@ func (it *xorIterator) Next() bool {
case 0x0e: case 0x0e:
sz = 20 sz = 20
case 0x0f: case 0x0f:
// Do not use fast because it's very unlikely it will succeed.
bits, err := it.br.readBits(64) bits, err := it.br.readBits(64)
if err != nil { if err != nil {
it.err = err it.err = err
@ -360,7 +364,10 @@ func (it *xorIterator) Next() bool {
} }
if sz != 0 { if sz != 0 {
bits, err := it.br.readBits(int(sz)) bits, err := it.br.readBitsFast(sz)
if err != nil {
bits, err = it.br.readBits(sz)
}
if err != nil { if err != nil {
it.err = err it.err = err
return false return false
@ -379,7 +386,10 @@ func (it *xorIterator) Next() bool {
} }
func (it *xorIterator) readValue() bool { func (it *xorIterator) readValue() bool {
bit, err := it.br.readBit() bit, err := it.br.readBitFast()
if err != nil {
bit, err = it.br.readBit()
}
if err != nil { if err != nil {
it.err = err it.err = err
return false return false
@ -388,7 +398,10 @@ func (it *xorIterator) readValue() bool {
if bit == zero { if bit == zero {
// it.val = it.val // it.val = it.val
} else { } else {
bit, err := it.br.readBit() bit, err := it.br.readBitFast()
if err != nil {
bit, err = it.br.readBit()
}
if err != nil { if err != nil {
it.err = err it.err = err
return false return false
@ -397,14 +410,20 @@ func (it *xorIterator) readValue() bool {
// reuse leading/trailing zero bits // reuse leading/trailing zero bits
// it.leading, it.trailing = it.leading, it.trailing // it.leading, it.trailing = it.leading, it.trailing
} else { } else {
bits, err := it.br.readBits(5) bits, err := it.br.readBitsFast(5)
if err != nil {
bits, err = it.br.readBits(5)
}
if err != nil { if err != nil {
it.err = err it.err = err
return false return false
} }
it.leading = uint8(bits) it.leading = uint8(bits)
bits, err = it.br.readBitsFast(6)
if err != nil {
bits, err = it.br.readBits(6) bits, err = it.br.readBits(6)
}
if err != nil { if err != nil {
it.err = err it.err = err
return false return false
@ -417,14 +436,17 @@ func (it *xorIterator) readValue() bool {
it.trailing = 64 - it.leading - mbits it.trailing = 64 - it.leading - mbits
} }
mbits := int(64 - it.leading - it.trailing) mbits := 64 - it.leading - it.trailing
bits, err := it.br.readBits(mbits) bits, err := it.br.readBitsFast(mbits)
if err != nil {
bits, err = it.br.readBits(mbits)
}
if err != nil { if err != nil {
it.err = err it.err = err
return false return false
} }
vbits := math.Float64bits(it.val) vbits := math.Float64bits(it.val)
vbits ^= (bits << it.trailing) vbits ^= bits << it.trailing
it.val = math.Float64frombits(vbits) it.val = math.Float64frombits(vbits)
} }

View file

@ -190,23 +190,23 @@ func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) {
lastSeq := chkFileIndices[0] lastSeq := chkFileIndices[0]
for _, seq := range chkFileIndices[1:] { for _, seq := range chkFileIndices[1:] {
if seq != lastSeq+1 { if seq != lastSeq+1 {
return errors.Errorf("found unsequential head chunk files %d and %d", lastSeq, seq) return errors.Errorf("found unsequential head chunk files %s (index: %d) and %s (index: %d)", files[lastSeq], lastSeq, files[seq], seq)
} }
lastSeq = seq lastSeq = seq
} }
for i, b := range cdm.mmappedChunkFiles { for i, b := range cdm.mmappedChunkFiles {
if b.byteSlice.Len() < HeadChunkFileHeaderSize { if b.byteSlice.Len() < HeadChunkFileHeaderSize {
return errors.Wrapf(errInvalidSize, "invalid head chunk file header in file %d", i) return errors.Wrapf(errInvalidSize, "%s: invalid head chunk file header", files[i])
} }
// Verify magic number. // Verify magic number.
if m := binary.BigEndian.Uint32(b.byteSlice.Range(0, MagicChunksSize)); m != MagicHeadChunks { if m := binary.BigEndian.Uint32(b.byteSlice.Range(0, MagicChunksSize)); m != MagicHeadChunks {
return errors.Errorf("invalid magic number %x", m) return errors.Errorf("%s: invalid magic number %x", files[i], m)
} }
// Verify chunk format version. // Verify chunk format version.
if v := int(b.byteSlice.Range(MagicChunksSize, MagicChunksSize+ChunksFormatVersionSize)[0]); v != chunksFormatV1 { if v := int(b.byteSlice.Range(MagicChunksSize, MagicChunksSize+ChunksFormatVersionSize)[0]); v != chunksFormatV1 {
return errors.Errorf("invalid chunk format version %d", v) return errors.Errorf("%s: invalid chunk format version %d", files[i], v)
} }
cdm.size += int64(b.byteSlice.Len()) cdm.size += int64(b.byteSlice.Len())

View file

@ -493,10 +493,10 @@ func analyzeBlock(b tsdb.BlockReader, limit int) error {
sort.Slice(postingInfos, func(i, j int) bool { return postingInfos[i].metric > postingInfos[j].metric }) sort.Slice(postingInfos, func(i, j int) bool { return postingInfos[i].metric > postingInfos[j].metric })
for i, pc := range postingInfos { for i, pc := range postingInfos {
fmt.Printf("%d %s\n", pc.metric, pc.key)
if i >= limit { if i >= limit {
break break
} }
fmt.Printf("%d %s\n", pc.metric, pc.key)
} }
} }
@ -556,7 +556,7 @@ func analyzeBlock(b tsdb.BlockReader, limit int) error {
postingInfos = postingInfos[:0] postingInfos = postingInfos[:0]
for _, n := range allLabelNames { for _, n := range allLabelNames {
values, err := ir.LabelValues(n) values, err := ir.SortedLabelValues(n)
if err != nil { if err != nil {
return err return err
} }
@ -572,7 +572,7 @@ func analyzeBlock(b tsdb.BlockReader, limit int) error {
postingInfos = postingInfos[:0] postingInfos = postingInfos[:0]
for _, n := range allLabelNames { for _, n := range allLabelNames {
lv, err := ir.LabelValues(n) lv, err := ir.SortedLabelValues(n)
if err != nil { if err != nil {
return err return err
} }
@ -582,7 +582,7 @@ func analyzeBlock(b tsdb.BlockReader, limit int) error {
printInfo(postingInfos) printInfo(postingInfos)
postingInfos = postingInfos[:0] postingInfos = postingInfos[:0]
lv, err := ir.LabelValues("__name__") lv, err := ir.SortedLabelValues("__name__")
if err != nil { if err != nil {
return err return err
} }
@ -617,18 +617,7 @@ func dumpSamples(db *tsdb.DBReadOnly, mint, maxt int64) (err error) {
err = merr.Err() err = merr.Err()
}() }()
ss, ws, err := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")) ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
if err != nil {
return err
}
if len(ws) > 0 {
var merr tsdb_errors.MultiError
for _, w := range ws {
merr.Add(w)
}
return merr.Err()
}
for ss.Next() { for ss.Next() {
series := ss.At() series := ss.At()
@ -643,6 +632,14 @@ func dumpSamples(db *tsdb.DBReadOnly, mint, maxt int64) (err error) {
} }
} }
if ws := ss.Warnings(); len(ws) > 0 {
var merr tsdb_errors.MultiError
for _, w := range ws {
merr.Add(w)
}
return merr.Err()
}
if ss.Err() != nil { if ss.Err() != nil {
return ss.Err() return ss.Err()
} }

View file

@ -39,11 +39,11 @@ import (
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/wal"
"golang.org/x/sync/errgroup"
// Load the package into main to make sure minium Go version is met. // Load the package into main to make sure minium Go version is met.
_ "github.com/prometheus/prometheus/tsdb/goversion" _ "github.com/prometheus/prometheus/tsdb/goversion"
"github.com/prometheus/prometheus/tsdb/wal"
"golang.org/x/sync/errgroup"
) )
const ( const (
@ -420,6 +420,11 @@ func (db *DBReadOnly) Querier(ctx context.Context, mint, maxt int64) (storage.Qu
return dbWritable.Querier(ctx, mint, maxt) return dbWritable.Querier(ctx, mint, maxt)
} }
func (db *DBReadOnly) ChunkQuerier(context.Context, int64, int64) (storage.ChunkQuerier, error) {
// TODO(bwplotka): Implement in next PR.
return nil, errors.New("not implemented")
}
// Blocks returns a slice of block readers for persisted blocks. // Blocks returns a slice of block readers for persisted blocks.
func (db *DBReadOnly) Blocks() ([]BlockReader, error) { func (db *DBReadOnly) Blocks() ([]BlockReader, error) {
select { select {
@ -441,10 +446,14 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) {
if len(corrupted) > 0 { if len(corrupted) > 0 {
for _, b := range loadable { for _, b := range loadable {
if err := b.Close(); err != nil { if err := b.Close(); err != nil {
level.Warn(db.logger).Log("msg", "Closing a block", err) level.Warn(db.logger).Log("msg", "Closing block failed", "err", err, "block", b)
} }
} }
return nil, errors.Errorf("unexpected corrupted block:%v", corrupted) var merr tsdb_errors.MultiError
for ulid, err := range corrupted {
merr.Add(errors.Wrapf(err, "corrupted block %s", ulid.String()))
}
return nil, merr.Err()
} }
if len(loadable) == 0 { if len(loadable) == 0 {
@ -880,7 +889,11 @@ func (db *DB) reload() (err error) {
block.Close() block.Close()
} }
} }
return fmt.Errorf("unexpected corrupted block:%v", corrupted) var merr tsdb_errors.MultiError
for ulid, err := range corrupted {
merr.Add(errors.Wrapf(err, "corrupted block %s", ulid.String()))
}
return merr.Err()
} }
// All deletable blocks should not be loaded. // All deletable blocks should not be loaded.
@ -1054,7 +1067,7 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error {
for ulid, block := range blocks { for ulid, block := range blocks {
if block != nil { if block != nil {
if err := block.Close(); err != nil { if err := block.Close(); err != nil {
level.Warn(db.logger).Log("msg", "Closing block failed", "err", err) level.Warn(db.logger).Log("msg", "Closing block failed", "err", err, "block", ulid)
} }
} }
if err := os.RemoveAll(filepath.Join(db.dir, ulid.String())); err != nil { if err := os.RemoveAll(filepath.Join(db.dir, ulid.String())); err != nil {
@ -1337,6 +1350,11 @@ func (db *DB) Querier(_ context.Context, mint, maxt int64) (storage.Querier, err
}, nil }, nil
} }
func (db *DB) ChunkQuerier(context.Context, int64, int64) (storage.ChunkQuerier, error) {
// TODO(bwplotka): Implement in next PR.
return nil, errors.New("not implemented")
}
func rangeForTimestamp(t int64, width int64) (maxt int64) { func rangeForTimestamp(t int64, width int64) (maxt int64) {
return (t/width)*width + width return (t/width)*width + width
} }

View file

@ -67,15 +67,12 @@ func openTestDB(t testing.TB, opts *Options, rngs []int64) (db *DB, close func()
// query runs a matcher query against the querier and fully expands its data. // query runs a matcher query against the querier and fully expands its data.
func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[string][]tsdbutil.Sample { func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[string][]tsdbutil.Sample {
ss, ws, err := q.Select(false, nil, matchers...) ss := q.Select(false, nil, matchers...)
defer func() { defer func() {
testutil.Ok(t, q.Close()) testutil.Ok(t, q.Close())
}() }()
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
result := map[string][]tsdbutil.Sample{} result := map[string][]tsdbutil.Sample{}
for ss.Next() { for ss.Next() {
series := ss.At() series := ss.At()
@ -95,6 +92,7 @@ func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[str
result[name] = samples result[name] = samples
} }
testutil.Ok(t, ss.Err()) testutil.Ok(t, ss.Err())
testutil.Equals(t, 0, len(ss.Warnings()))
return result return result
} }
@ -315,9 +313,7 @@ Outer:
q, err := db.Querier(context.TODO(), 0, numSamples) q, err := db.Querier(context.TODO(), 0, numSamples)
testutil.Ok(t, err) testutil.Ok(t, err)
res, ws, err := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) res := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint)) expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
for _, ts := range c.remaint { for _, ts := range c.remaint {
@ -333,6 +329,7 @@ Outer:
testutil.Equals(t, eok, rok) testutil.Equals(t, eok, rok)
if !eok { if !eok {
testutil.Equals(t, 0, len(res.Warnings()))
continue Outer continue Outer
} }
sexp := expss.At() sexp := expss.At()
@ -491,10 +488,7 @@ func TestDB_Snapshot(t *testing.T) {
defer func() { testutil.Ok(t, querier.Close()) }() defer func() { testutil.Ok(t, querier.Close()) }()
// sum values // sum values
seriesSet, ws, err := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) seriesSet := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
sum := 0.0 sum := 0.0
for seriesSet.Next() { for seriesSet.Next() {
series := seriesSet.At().Iterator() series := seriesSet.At().Iterator()
@ -505,6 +499,7 @@ func TestDB_Snapshot(t *testing.T) {
testutil.Ok(t, series.Err()) testutil.Ok(t, series.Err())
} }
testutil.Ok(t, seriesSet.Err()) testutil.Ok(t, seriesSet.Err())
testutil.Equals(t, 0, len(seriesSet.Warnings()))
testutil.Equals(t, 1000.0, sum) testutil.Equals(t, 1000.0, sum)
} }
@ -546,10 +541,7 @@ func TestDB_Snapshot_ChunksOutsideOfCompactedRange(t *testing.T) {
defer func() { testutil.Ok(t, querier.Close()) }() defer func() { testutil.Ok(t, querier.Close()) }()
// Sum values. // Sum values.
seriesSet, ws, err := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) seriesSet := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
sum := 0.0 sum := 0.0
for seriesSet.Next() { for seriesSet.Next() {
series := seriesSet.At().Iterator() series := seriesSet.At().Iterator()
@ -560,6 +552,7 @@ func TestDB_Snapshot_ChunksOutsideOfCompactedRange(t *testing.T) {
testutil.Ok(t, series.Err()) testutil.Ok(t, series.Err())
} }
testutil.Ok(t, seriesSet.Err()) testutil.Ok(t, seriesSet.Err())
testutil.Equals(t, 0, len(seriesSet.Warnings()))
// Since we snapshotted with MaxTime - 10, so expect 10 less samples. // Since we snapshotted with MaxTime - 10, so expect 10 less samples.
testutil.Equals(t, 1000.0-10, sum) testutil.Equals(t, 1000.0-10, sum)
@ -618,9 +611,7 @@ Outer:
testutil.Ok(t, err) testutil.Ok(t, err)
defer func() { testutil.Ok(t, q.Close()) }() defer func() { testutil.Ok(t, q.Close()) }()
res, ws, err := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) res := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint)) expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
for _, ts := range c.remaint { for _, ts := range c.remaint {
@ -641,6 +632,7 @@ Outer:
testutil.Equals(t, eok, rok) testutil.Equals(t, eok, rok)
if !eok { if !eok {
testutil.Equals(t, 0, len(res.Warnings()))
continue Outer continue Outer
} }
sexp := expss.At() sexp := expss.At()
@ -792,10 +784,7 @@ func TestDB_e2e(t *testing.T) {
q, err := db.Querier(context.TODO(), mint, maxt) q, err := db.Querier(context.TODO(), mint, maxt)
testutil.Ok(t, err) testutil.Ok(t, err)
ss, ws, err := q.Select(false, nil, qry.ms...) ss := q.Select(false, nil, qry.ms...)
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
result := map[string][]tsdbutil.Sample{} result := map[string][]tsdbutil.Sample{}
for ss.Next() { for ss.Next() {
@ -810,6 +799,7 @@ func TestDB_e2e(t *testing.T) {
} }
testutil.Ok(t, ss.Err()) testutil.Ok(t, ss.Err())
testutil.Equals(t, 0, len(ss.Warnings()))
testutil.Equals(t, expected, result) testutil.Equals(t, expected, result)
q.Close() q.Close()
@ -968,9 +958,7 @@ func TestTombstoneClean(t *testing.T) {
testutil.Ok(t, err) testutil.Ok(t, err)
defer q.Close() defer q.Close()
res, ws, err := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) res := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint)) expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
for _, ts := range c.remaint { for _, ts := range c.remaint {
@ -1004,6 +992,7 @@ func TestTombstoneClean(t *testing.T) {
testutil.Equals(t, errExp, errRes) testutil.Equals(t, errExp, errRes)
testutil.Equals(t, smplExp, smplRes) testutil.Equals(t, smplExp, smplRes)
} }
testutil.Equals(t, 0, len(res.Warnings()))
for _, b := range db.Blocks() { for _, b := range db.Blocks() {
testutil.Equals(t, tombstones.NewMemTombstones(), b.tombstones) testutil.Equals(t, tombstones.NewMemTombstones(), b.tombstones)
@ -1306,19 +1295,17 @@ func TestNotMatcherSelectsLabelsUnsetSeries(t *testing.T) {
defer func() { testutil.Ok(t, q.Close()) }() defer func() { testutil.Ok(t, q.Close()) }()
for _, c := range cases { for _, c := range cases {
ss, ws, err := q.Select(false, nil, c.selector...) ss := q.Select(false, nil, c.selector...)
lres, _, ws, err := expandSeriesSet(ss)
testutil.Ok(t, err) testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws)) testutil.Equals(t, 0, len(ws))
lres, _, err := expandSeriesSet(ss)
testutil.Ok(t, err)
testutil.Equals(t, c.series, lres) testutil.Equals(t, c.series, lres)
} }
} }
// expandSeriesSet returns the raw labels in the order they are retrieved from // expandSeriesSet returns the raw labels in the order they are retrieved from
// the series set and the samples keyed by Labels().String(). // the series set and the samples keyed by Labels().String().
func expandSeriesSet(ss storage.SeriesSet) ([]labels.Labels, map[string][]sample, error) { func expandSeriesSet(ss storage.SeriesSet) ([]labels.Labels, map[string][]sample, storage.Warnings, error) {
resultLabels := []labels.Labels{} resultLabels := []labels.Labels{}
resultSamples := map[string][]sample{} resultSamples := map[string][]sample{}
for ss.Next() { for ss.Next() {
@ -1332,7 +1319,7 @@ func expandSeriesSet(ss storage.SeriesSet) ([]labels.Labels, map[string][]sample
resultLabels = append(resultLabels, series.Labels()) resultLabels = append(resultLabels, series.Labels())
resultSamples[series.Labels().String()] = samples resultSamples[series.Labels().String()] = samples
} }
return resultLabels, resultSamples, ss.Err() return resultLabels, resultSamples, ss.Warnings(), ss.Err()
} }
func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) { func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) {
@ -2503,9 +2490,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
defer func() { testutil.Ok(t, querier.Close()) }() defer func() { testutil.Ok(t, querier.Close()) }()
// Sum the values. // Sum the values.
seriesSet, ws, err := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, defaultLabelName, "flush")) seriesSet := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, defaultLabelName, "flush"))
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
sum := 0.0 sum := 0.0
for seriesSet.Next() { for seriesSet.Next() {
@ -2517,6 +2502,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
testutil.Ok(t, series.Err()) testutil.Ok(t, series.Err())
} }
testutil.Ok(t, seriesSet.Err()) testutil.Ok(t, seriesSet.Err())
testutil.Equals(t, 0, len(seriesSet.Warnings()))
testutil.Equals(t, 1000.0, sum) testutil.Equals(t, 1000.0, sum)
} }
@ -2568,11 +2554,11 @@ func TestDBCannotSeePartialCommits(t *testing.T) {
testutil.Ok(t, err) testutil.Ok(t, err)
defer querier.Close() defer querier.Close()
ss, _, err := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) ss := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
_, seriesSet, ws, err := expandSeriesSet(ss)
testutil.Ok(t, err) testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
_, seriesSet, err := expandSeriesSet(ss)
testutil.Ok(t, err)
values := map[float64]struct{}{} values := map[float64]struct{}{}
for _, series := range seriesSet { for _, series := range seriesSet {
values[series[len(series)-1].v] = struct{}{} values[series[len(series)-1].v] = struct{}{}
@ -2608,16 +2594,16 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) {
defer querierAfterAddButBeforeCommit.Close() defer querierAfterAddButBeforeCommit.Close()
// None of the queriers should return anything after the Add but before the commit. // None of the queriers should return anything after the Add but before the commit.
ss, _, err := querierBeforeAdd.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) ss := querierBeforeAdd.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
testutil.Ok(t, err) _, seriesSet, ws, err := expandSeriesSet(ss)
_, seriesSet, err := expandSeriesSet(ss)
testutil.Ok(t, err) testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
testutil.Equals(t, map[string][]sample{}, seriesSet) testutil.Equals(t, map[string][]sample{}, seriesSet)
ss, _, err = querierAfterAddButBeforeCommit.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) ss = querierAfterAddButBeforeCommit.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
testutil.Ok(t, err) _, seriesSet, ws, err = expandSeriesSet(ss)
_, seriesSet, err = expandSeriesSet(ss)
testutil.Ok(t, err) testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
testutil.Equals(t, map[string][]sample{}, seriesSet) testutil.Equals(t, map[string][]sample{}, seriesSet)
// This commit is after the queriers are created, so should not be returned. // This commit is after the queriers are created, so should not be returned.
@ -2625,17 +2611,17 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) {
testutil.Ok(t, err) testutil.Ok(t, err)
// Nothing returned for querier created before the Add. // Nothing returned for querier created before the Add.
ss, _, err = querierBeforeAdd.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) ss = querierBeforeAdd.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
testutil.Ok(t, err) _, seriesSet, ws, err = expandSeriesSet(ss)
_, seriesSet, err = expandSeriesSet(ss)
testutil.Ok(t, err) testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
testutil.Equals(t, map[string][]sample{}, seriesSet) testutil.Equals(t, map[string][]sample{}, seriesSet)
// Series exists but has no samples for querier created after Add. // Series exists but has no samples for querier created after Add.
ss, _, err = querierAfterAddButBeforeCommit.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) ss = querierAfterAddButBeforeCommit.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
testutil.Ok(t, err) _, seriesSet, ws, err = expandSeriesSet(ss)
_, seriesSet, err = expandSeriesSet(ss)
testutil.Ok(t, err) testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
testutil.Equals(t, map[string][]sample{`{foo="bar"}`: {}}, seriesSet) testutil.Equals(t, map[string][]sample{`{foo="bar"}`: {}}, seriesSet)
querierAfterCommit, err := db.Querier(context.Background(), 0, 1000000) querierAfterCommit, err := db.Querier(context.Background(), 0, 1000000)
@ -2643,10 +2629,10 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) {
defer querierAfterCommit.Close() defer querierAfterCommit.Close()
// Samples are returned for querier created after Commit. // Samples are returned for querier created after Commit.
ss, _, err = querierAfterCommit.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) ss = querierAfterCommit.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
testutil.Ok(t, err) _, seriesSet, ws, err = expandSeriesSet(ss)
_, seriesSet, err = expandSeriesSet(ss)
testutil.Ok(t, err) testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
testutil.Equals(t, map[string][]sample{`{foo="bar"}`: {{t: 0, v: 0}}}, seriesSet) testutil.Equals(t, map[string][]sample{`{foo="bar"}`: {{t: 0, v: 0}}}, seriesSet)
} }
@ -2872,3 +2858,94 @@ func TestChunkReader_ConcurrentReads(t *testing.T) {
} }
testutil.Ok(t, r.Close()) testutil.Ok(t, r.Close())
} }
// TestCompactHead ensures that the head compaction
// creates a block that is ready for loading and
// does not cause data loss.
// This test:
// * opens a storage;
// * appends values;
// * compacts the head; and
// * queries the db to ensure the samples are present from the compacted head.
func TestCompactHead(t *testing.T) {
dbDir, err := ioutil.TempDir("", "testFlush")
testutil.Ok(t, err)
defer func() { testutil.Ok(t, os.RemoveAll(dbDir)) }()
// Open a DB and append data to the WAL.
tsdbCfg := &Options{
RetentionDuration: int64(time.Hour * 24 * 15 / time.Millisecond),
NoLockfile: true,
MinBlockDuration: int64(time.Hour * 2 / time.Millisecond),
MaxBlockDuration: int64(time.Hour * 2 / time.Millisecond),
WALCompression: true,
}
db, err := Open(dbDir, log.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg)
testutil.Ok(t, err)
app := db.Appender()
var expSamples []sample
maxt := 100
for i := 0; i < maxt; i++ {
val := rand.Float64()
_, err := app.Add(labels.FromStrings("a", "b"), int64(i), val)
testutil.Ok(t, err)
expSamples = append(expSamples, sample{int64(i), val})
}
testutil.Ok(t, app.Commit())
// Compact the Head to create a new block.
testutil.Ok(t, db.CompactHead(NewRangeHead(db.Head(), 0, int64(maxt)-1)))
testutil.Ok(t, db.Close())
// Delete everything but the new block and
// reopen the db to query it to ensure it includes the head data.
testutil.Ok(t, deleteNonBlocks(db.Dir()))
db, err = Open(dbDir, log.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg)
testutil.Ok(t, err)
testutil.Equals(t, 1, len(db.Blocks()))
testutil.Equals(t, int64(maxt), db.Head().MinTime())
defer func() { testutil.Ok(t, db.Close()) }()
querier, err := db.Querier(context.Background(), 0, int64(maxt)-1)
testutil.Ok(t, err)
defer func() { testutil.Ok(t, querier.Close()) }()
seriesSet := querier.Select(false, nil, &labels.Matcher{Type: labels.MatchEqual, Name: "a", Value: "b"})
var actSamples []sample
for seriesSet.Next() {
series := seriesSet.At().Iterator()
for series.Next() {
time, val := series.At()
actSamples = append(actSamples, sample{int64(time), val})
}
testutil.Ok(t, series.Err())
}
testutil.Equals(t, expSamples, actSamples)
testutil.Ok(t, seriesSet.Err())
}
func deleteNonBlocks(dbDir string) error {
dirs, err := ioutil.ReadDir(dbDir)
if err != nil {
return err
}
for _, dir := range dirs {
if ok := isBlockDir(dir); !ok {
if err := os.RemoveAll(filepath.Join(dbDir, dir.Name())); err != nil {
return err
}
}
}
dirs, err = ioutil.ReadDir(dbDir)
if err != nil {
return err
}
for _, dir := range dirs {
if ok := isBlockDir(dir); !ok {
return errors.Errorf("root folder:%v still hase non block directory:%v", dbDir, dir.Name())
}
}
return nil
}

View file

@ -2,5 +2,6 @@
* [Index](index.md) * [Index](index.md)
* [Chunks](chunks.md) * [Chunks](chunks.md)
* [Head Chunks](head_chunks.md)
* [Tombstones](tombstones.md) * [Tombstones](tombstones.md)
* [Wal](wal.md) * [Wal](wal.md)

View file

@ -1,7 +1,7 @@
# Head Chunks on Disk Format # Head Chunks on Disk Format
The following describes the format of a chunks file, The following describes the format of a chunks file, which is created in the
which is created in the `wal/chunks/` inside the data directory. `chunks_head/` directory inside the data directory.
Chunks in the files are referenced from the index by uint64 composed of Chunks in the files are referenced from the index by uint64 composed of
in-file offset (lower 4 bytes) and segment sequence number (upper 4 bytes). in-file offset (lower 4 bytes) and segment sequence number (upper 4 bytes).
@ -27,7 +27,10 @@ in-file offset (lower 4 bytes) and segment sequence number (upper 4 bytes).
# Chunk # Chunk
Unlike chunks in the on-disk blocks, here we additionally store series reference that the chunks belongs to and the mint/maxt of the chunks. This is because we don't have an index associated with these chunks, hence these meta information are used while replaying the chunks. Unlike chunks in the on-disk blocks, here we additionally store series
reference that the chunks belongs to and the mint/maxt of the chunks. This is
because we don't have an index associated with these chunks, hence these meta
information are used while replaying the chunks.
``` ```
┌─────────────────────┬───────────────────────┬───────────────────────┬───────────────────┬───────────────┬──────────────┬────────────────┐ ┌─────────────────────┬───────────────────────┬───────────────────────┬───────────────────┬───────────────┬──────────────┬────────────────┐

View file

@ -1537,6 +1537,16 @@ func (h *headIndexReader) Symbols() index.StringIter {
return index.NewStringListIter(res) return index.NewStringListIter(res)
} }
// SortedLabelValues returns label values present in the head for the
// specific label name that are within the time range mint to maxt.
func (h *headIndexReader) SortedLabelValues(name string) ([]string, error) {
values, err := h.LabelValues(name)
if err == nil {
sort.Strings(values)
}
return values, err
}
// LabelValues returns label values present in the head for the // LabelValues returns label values present in the head for the
// specific label name that are within the time range mint to maxt. // specific label name that are within the time range mint to maxt.
func (h *headIndexReader) LabelValues(name string) ([]string, error) { func (h *headIndexReader) LabelValues(name string) ([]string, error) {
@ -1552,7 +1562,6 @@ func (h *headIndexReader) LabelValues(name string) ([]string, error) {
sl = append(sl, s) sl = append(sl, s)
} }
h.head.symMtx.RUnlock() h.head.symMtx.RUnlock()
sort.Strings(sl)
return sl, nil return sl, nil
} }

View file

@ -562,9 +562,7 @@ func TestHeadDeleteSimple(t *testing.T) {
for _, h := range []*Head{head, reloadedHead} { for _, h := range []*Head{head, reloadedHead} {
q, err := NewBlockQuerier(h, h.MinTime(), h.MaxTime()) q, err := NewBlockQuerier(h, h.MinTime(), h.MaxTime())
testutil.Ok(t, err) testutil.Ok(t, err)
actSeriesSet, ws, err := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, lblDefault.Name, lblDefault.Value)) actSeriesSet := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, lblDefault.Name, lblDefault.Value))
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
testutil.Ok(t, q.Close()) testutil.Ok(t, q.Close())
expSeriesSet := newMockSeriesSet([]storage.Series{ expSeriesSet := newMockSeriesSet([]storage.Series{
newSeries(map[string]string{lblDefault.Name: lblDefault.Value}, func() []tsdbutil.Sample { newSeries(map[string]string{lblDefault.Name: lblDefault.Value}, func() []tsdbutil.Sample {
@ -583,6 +581,8 @@ func TestHeadDeleteSimple(t *testing.T) {
if !eok { if !eok {
testutil.Ok(t, h.Close()) testutil.Ok(t, h.Close())
testutil.Ok(t, actSeriesSet.Err())
testutil.Equals(t, 0, len(actSeriesSet.Warnings()))
continue Outer continue Outer
} }
expSeries := expSeriesSet.At() expSeries := expSeriesSet.At()
@ -623,13 +623,15 @@ func TestDeleteUntilCurMax(t *testing.T) {
// Test the series returns no samples. The series is cleared only after compaction. // Test the series returns no samples. The series is cleared only after compaction.
q, err := NewBlockQuerier(hb, 0, 100000) q, err := NewBlockQuerier(hb, 0, 100000)
testutil.Ok(t, err) testutil.Ok(t, err)
res, ws, err := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) res := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
testutil.Assert(t, res.Next(), "series is not present") testutil.Assert(t, res.Next(), "series is not present")
s := res.At() s := res.At()
it := s.Iterator() it := s.Iterator()
testutil.Assert(t, !it.Next(), "expected no samples") testutil.Assert(t, !it.Next(), "expected no samples")
for res.Next() {
}
testutil.Ok(t, res.Err())
testutil.Equals(t, 0, len(res.Warnings()))
// Add again and test for presence. // Add again and test for presence.
app = hb.Appender() app = hb.Appender()
@ -638,15 +640,17 @@ func TestDeleteUntilCurMax(t *testing.T) {
testutil.Ok(t, app.Commit()) testutil.Ok(t, app.Commit())
q, err = NewBlockQuerier(hb, 0, 100000) q, err = NewBlockQuerier(hb, 0, 100000)
testutil.Ok(t, err) testutil.Ok(t, err)
res, ws, err = q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) res = q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
testutil.Assert(t, res.Next(), "series don't exist") testutil.Assert(t, res.Next(), "series don't exist")
exps := res.At() exps := res.At()
it = exps.Iterator() it = exps.Iterator()
resSamples, err := expandSeriesIterator(it) resSamples, err := expandSeriesIterator(it)
testutil.Ok(t, err) testutil.Ok(t, err)
testutil.Equals(t, []tsdbutil.Sample{sample{11, 1}}, resSamples) testutil.Equals(t, []tsdbutil.Sample{sample{11, 1}}, resSamples)
for res.Next() {
}
testutil.Ok(t, res.Err())
testutil.Equals(t, 0, len(res.Warnings()))
} }
func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) { func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) {
@ -807,9 +811,7 @@ func TestDelete_e2e(t *testing.T) {
q, err := NewBlockQuerier(hb, 0, 100000) q, err := NewBlockQuerier(hb, 0, 100000)
testutil.Ok(t, err) testutil.Ok(t, err)
defer q.Close() defer q.Close()
ss, ws, err := q.Select(true, nil, del.ms...) ss := q.Select(true, nil, del.ms...)
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
// Build the mockSeriesSet. // Build the mockSeriesSet.
matchedSeries := make([]storage.Series, 0, len(matched)) matchedSeries := make([]storage.Series, 0, len(matched))
for _, m := range matched { for _, m := range matched {
@ -850,6 +852,8 @@ func TestDelete_e2e(t *testing.T) {
testutil.Equals(t, errExp, errRes) testutil.Equals(t, errExp, errRes)
testutil.Equals(t, smplExp, smplRes) testutil.Equals(t, smplExp, smplRes)
} }
testutil.Ok(t, ss.Err())
testutil.Equals(t, 0, len(ss.Warnings()))
} }
} }
} }
@ -1118,11 +1122,12 @@ func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) {
testutil.Ok(t, err) testutil.Ok(t, err)
defer q.Close() defer q.Close()
ss, ws, err := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1")) ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1"))
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
testutil.Equals(t, true, ss.Next()) testutil.Equals(t, true, ss.Next())
for ss.Next() {
}
testutil.Ok(t, ss.Err())
testutil.Equals(t, 0, len(ss.Warnings()))
} }
func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) { func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) {
@ -1148,11 +1153,9 @@ func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) {
testutil.Ok(t, err) testutil.Ok(t, err)
defer q.Close() defer q.Close()
ss, ws, err := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1")) ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1"))
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
testutil.Equals(t, false, ss.Next()) testutil.Equals(t, false, ss.Next())
testutil.Equals(t, 0, len(ss.Warnings()))
// Truncate again, this time the series should be deleted // Truncate again, this time the series should be deleted
testutil.Ok(t, h.Truncate(2050)) testutil.Ok(t, h.Truncate(2050))
@ -1434,11 +1437,11 @@ func TestMemSeriesIsolation(t *testing.T) {
testutil.Ok(t, err) testutil.Ok(t, err)
defer querier.Close() defer querier.Close()
ss, _, err := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) ss := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
_, seriesSet, ws, err := expandSeriesSet(ss)
testutil.Ok(t, err) testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
_, seriesSet, err := expandSeriesSet(ss)
testutil.Ok(t, err)
for _, series := range seriesSet { for _, series := range seriesSet {
return int(series[len(series)-1].v) return int(series[len(series)-1].v)
} }
@ -1790,8 +1793,9 @@ func testHeadSeriesChunkRace(t *testing.T) {
h.gc() h.gc()
wg.Done() wg.Done()
}() }()
ss, _, err := q.Select(false, nil, matcher) ss := q.Select(false, nil, matcher)
testutil.Ok(t, err) for ss.Next() {
}
testutil.Ok(t, ss.Err()) testutil.Ok(t, ss.Err())
wg.Wait() wg.Wait()
} }
@ -1863,7 +1867,7 @@ func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) {
testutil.Equals(t, tt.expectedNames, actualLabelNames) testutil.Equals(t, tt.expectedNames, actualLabelNames)
if len(tt.expectedValues) > 0 { if len(tt.expectedValues) > 0 {
for i, name := range expectedLabelNames { for i, name := range expectedLabelNames {
actualLabelValue, err := headIdxReader.LabelValues(name) actualLabelValue, err := headIdxReader.SortedLabelValues(name)
testutil.Ok(t, err) testutil.Ok(t, err)
testutil.Equals(t, []string{tt.expectedValues[i]}, actualLabelValue) testutil.Equals(t, []string{tt.expectedValues[i]}, actualLabelValue)
} }

View file

@ -1418,6 +1418,17 @@ func (r *Reader) SymbolTableSize() uint64 {
return uint64(r.symbols.Size()) return uint64(r.symbols.Size())
} }
// SortedLabelValues returns value tuples that exist for the given label name.
// It is not safe to use the return value beyond the lifetime of the byte slice
// passed into the Reader.
func (r *Reader) SortedLabelValues(name string) ([]string, error) {
values, err := r.LabelValues(name)
if err == nil && r.version == FormatV1 {
sort.Strings(values)
}
return values, err
}
// LabelValues returns value tuples that exist for the given label name. // LabelValues returns value tuples that exist for the given label name.
// It is not safe to use the return value beyond the lifetime of the byte slice // It is not safe to use the return value beyond the lifetime of the byte slice
// passed into the Reader. // passed into the Reader.
@ -1431,7 +1442,6 @@ func (r *Reader) LabelValues(name string) ([]string, error) {
for k := range e { for k := range e {
values = append(values, k) values = append(values, k)
} }
sort.Strings(values)
return values, nil return values, nil
} }

View file

@ -453,7 +453,7 @@ func TestPersistence_index_e2e(t *testing.T) {
for k, v := range labelPairs { for k, v := range labelPairs {
sort.Strings(v) sort.Strings(v)
res, err := ir.LabelValues(k) res, err := ir.SortedLabelValues(k)
testutil.Ok(t, err) testutil.Ok(t, err)
testutil.Equals(t, len(v), len(res)) testutil.Equals(t, len(v), len(res))

View file

@ -85,9 +85,9 @@ func (q *querier) lvals(qs []storage.Querier, n string) ([]string, storage.Warni
return mergeStrings(s1, s2), ws, nil return mergeStrings(s1, s2), ws, nil
} }
func (q *querier) Select(sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { func (q *querier) Select(sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.SeriesSet {
if len(q.blocks) == 0 { if len(q.blocks) == 0 {
return storage.EmptySeriesSet(), nil, nil return storage.EmptySeriesSet()
} }
if len(q.blocks) == 1 { if len(q.blocks) == 1 {
// Sorting Head series is slow, and unneeded when only the // Sorting Head series is slow, and unneeded when only the
@ -96,18 +96,12 @@ func (q *querier) Select(sortSeries bool, hints *storage.SelectHints, ms ...*lab
} }
ss := make([]storage.SeriesSet, len(q.blocks)) ss := make([]storage.SeriesSet, len(q.blocks))
var ws storage.Warnings
for i, b := range q.blocks { for i, b := range q.blocks {
// We have to sort if blocks > 1 as MergedSeriesSet requires it. // We have to sort if blocks > 1 as MergedSeriesSet requires it.
s, w, err := b.Select(true, hints, ms...) ss[i] = b.Select(true, hints, ms...)
ws = append(ws, w...)
if err != nil {
return nil, ws, err
}
ss[i] = s
} }
return NewMergedSeriesSet(ss), ws, nil return NewMergedSeriesSet(ss)
} }
func (q *querier) Close() error { func (q *querier) Close() error {
@ -125,31 +119,23 @@ type verticalQuerier struct {
querier querier
} }
func (q *verticalQuerier) Select(sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { func (q *verticalQuerier) Select(sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.SeriesSet {
return q.sel(sortSeries, hints, q.blocks, ms) return q.sel(sortSeries, hints, q.blocks, ms)
} }
func (q *verticalQuerier) sel(sortSeries bool, hints *storage.SelectHints, qs []storage.Querier, ms []*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { func (q *verticalQuerier) sel(sortSeries bool, hints *storage.SelectHints, qs []storage.Querier, ms []*labels.Matcher) storage.SeriesSet {
if len(qs) == 0 { if len(qs) == 0 {
return storage.EmptySeriesSet(), nil, nil return storage.EmptySeriesSet()
} }
if len(qs) == 1 { if len(qs) == 1 {
return qs[0].Select(sortSeries, hints, ms...) return qs[0].Select(sortSeries, hints, ms...)
} }
l := len(qs) / 2 l := len(qs) / 2
var ws storage.Warnings return newMergedVerticalSeriesSet(
a, w, err := q.sel(sortSeries, hints, qs[:l], ms) q.sel(sortSeries, hints, qs[:l], ms),
ws = append(ws, w...) q.sel(sortSeries, hints, qs[l:], ms),
if err != nil { )
return nil, ws, err
}
b, w, err := q.sel(sortSeries, hints, qs[l:], ms)
ws = append(ws, w...)
if err != nil {
return nil, ws, err
}
return newMergedVerticalSeriesSet(a, b), ws, nil
} }
// NewBlockQuerier returns a querier against the reader. // NewBlockQuerier returns a querier against the reader.
@ -189,7 +175,7 @@ type blockQuerier struct {
mint, maxt int64 mint, maxt int64
} }
func (q *blockQuerier) Select(sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { func (q *blockQuerier) Select(sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.SeriesSet {
var base storage.DeprecatedChunkSeriesSet var base storage.DeprecatedChunkSeriesSet
var err error var err error
@ -199,7 +185,7 @@ func (q *blockQuerier) Select(sortSeries bool, hints *storage.SelectHints, ms ..
base, err = LookupChunkSeries(q.index, q.tombstones, ms...) base, err = LookupChunkSeries(q.index, q.tombstones, ms...)
} }
if err != nil { if err != nil {
return nil, nil, err return storage.ErrSeriesSet(err)
} }
mint := q.mint mint := q.mint
@ -218,11 +204,11 @@ func (q *blockQuerier) Select(sortSeries bool, hints *storage.SelectHints, ms ..
mint: mint, mint: mint,
maxt: maxt, maxt: maxt,
}, nil, nil }
} }
func (q *blockQuerier) LabelValues(name string) ([]string, storage.Warnings, error) { func (q *blockQuerier) LabelValues(name string) ([]string, storage.Warnings, error) {
res, err := q.index.LabelValues(name) res, err := q.index.SortedLabelValues(name)
return res, nil, err return res, nil, err
} }
@ -407,9 +393,14 @@ func postingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Postings, erro
} }
var res []string var res []string
lastVal, isSorted := "", true
for _, val := range vals { for _, val := range vals {
if m.Matches(val) { if m.Matches(val) {
res = append(res, val) res = append(res, val)
if isSorted && val < lastVal {
isSorted = false
}
lastVal = val
} }
} }
@ -417,6 +408,9 @@ func postingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Postings, erro
return index.EmptyPostings(), nil return index.EmptyPostings(), nil
} }
if !isSorted {
sort.Strings(res)
}
return ix.Postings(m.Name, res...) return ix.Postings(m.Name, res...)
} }
@ -428,12 +422,20 @@ func inversePostingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Posting
} }
var res []string var res []string
lastVal, isSorted := "", true
for _, val := range vals { for _, val := range vals {
if !m.Matches(val) { if !m.Matches(val) {
res = append(res, val) res = append(res, val)
if isSorted && val < lastVal {
isSorted = false
}
lastVal = val
} }
} }
if !isSorted {
sort.Strings(res)
}
return ix.Postings(m.Name, res...) return ix.Postings(m.Name, res...)
} }
@ -501,6 +503,14 @@ func (s *mergedSeriesSet) Err() error {
return s.err return s.err
} }
func (s *mergedSeriesSet) Warnings() storage.Warnings {
var ws storage.Warnings
for _, ss := range s.all {
ws = append(ws, ss.Warnings()...)
}
return ws
}
// nextAll is to call Next() for all SeriesSet. // nextAll is to call Next() for all SeriesSet.
// Because the order of the SeriesSet slice will affect the results, // Because the order of the SeriesSet slice will affect the results,
// we need to use an buffer slice to hold the order. // we need to use an buffer slice to hold the order.
@ -509,7 +519,10 @@ func (s *mergedSeriesSet) nextAll() {
for _, ss := range s.all { for _, ss := range s.all {
if ss.Next() { if ss.Next() {
s.buf = append(s.buf, ss) s.buf = append(s.buf, ss)
} else if ss.Err() != nil { continue
}
if ss.Err() != nil {
s.done = true s.done = true
s.err = ss.Err() s.err = ss.Err()
break break
@ -623,6 +636,13 @@ func (s *mergedVerticalSeriesSet) Err() error {
return s.b.Err() return s.b.Err()
} }
func (s *mergedVerticalSeriesSet) Warnings() storage.Warnings {
var ws storage.Warnings
ws = append(ws, s.a.Warnings()...)
ws = append(ws, s.b.Warnings()...)
return ws
}
func (s *mergedVerticalSeriesSet) compare() int { func (s *mergedVerticalSeriesSet) compare() int {
if s.adone { if s.adone {
return 1 return 1
@ -850,6 +870,7 @@ func (s *blockSeriesSet) Next() bool {
func (s *blockSeriesSet) At() storage.Series { return s.cur } func (s *blockSeriesSet) At() storage.Series { return s.cur }
func (s *blockSeriesSet) Err() error { return s.err } func (s *blockSeriesSet) Err() error { return s.err }
func (s *blockSeriesSet) Warnings() storage.Warnings { return nil }
// chunkSeries is a series that is backed by a sequence of chunks holding // chunkSeries is a series that is backed by a sequence of chunks holding
// time series data. // time series data.

View file

@ -155,8 +155,7 @@ func BenchmarkQuerierSelect(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
ss, _, err := q.Select(sorted, nil, matcher) ss := q.Select(sorted, nil, matcher)
testutil.Ok(b, err)
for ss.Next() { for ss.Next() {
} }
testutil.Ok(b, ss.Err()) testutil.Ok(b, ss.Err())

View file

@ -39,12 +39,14 @@ import (
type mockSeriesSet struct { type mockSeriesSet struct {
next func() bool next func() bool
series func() storage.Series series func() storage.Series
ws func() storage.Warnings
err func() error err func() error
} }
func (m *mockSeriesSet) Next() bool { return m.next() } func (m *mockSeriesSet) Next() bool { return m.next() }
func (m *mockSeriesSet) At() storage.Series { return m.series() } func (m *mockSeriesSet) At() storage.Series { return m.series() }
func (m *mockSeriesSet) Err() error { return m.err() } func (m *mockSeriesSet) Err() error { return m.err() }
func (m *mockSeriesSet) Warnings() storage.Warnings { return m.ws() }
func newMockSeriesSet(list []storage.Series) *mockSeriesSet { func newMockSeriesSet(list []storage.Series) *mockSeriesSet {
i := -1 i := -1
@ -57,11 +59,11 @@ func newMockSeriesSet(list []storage.Series) *mockSeriesSet {
return list[i] return list[i]
}, },
err: func() error { return nil }, err: func() error { return nil },
ws: func() storage.Warnings { return nil },
} }
} }
func TestMergedSeriesSet(t *testing.T) { func TestMergedSeriesSet(t *testing.T) {
cases := []struct { cases := []struct {
// The input sets in order (samples in series in b are strictly // The input sets in order (samples in series in b are strictly
// after those in a). // after those in a).
@ -373,15 +375,14 @@ Outer:
maxt: c.maxt, maxt: c.maxt,
} }
res, ws, err := querier.Select(false, nil, c.ms...) res := querier.Select(false, nil, c.ms...)
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
for { for {
eok, rok := c.exp.Next(), res.Next() eok, rok := c.exp.Next(), res.Next()
testutil.Equals(t, eok, rok) testutil.Equals(t, eok, rok)
if !eok { if !eok {
testutil.Equals(t, 0, len(res.Warnings()))
continue Outer continue Outer
} }
sexp := c.exp.At() sexp := c.exp.At()
@ -536,15 +537,14 @@ Outer:
maxt: c.maxt, maxt: c.maxt,
} }
res, ws, err := querier.Select(false, nil, c.ms...) res := querier.Select(false, nil, c.ms...)
testutil.Ok(t, err)
testutil.Equals(t, 0, len(ws))
for { for {
eok, rok := c.exp.Next(), res.Next() eok, rok := c.exp.Next(), res.Next()
testutil.Equals(t, eok, rok) testutil.Equals(t, eok, rok)
if !eok { if !eok {
testutil.Equals(t, 0, len(res.Warnings()))
continue Outer continue Outer
} }
sexp := c.exp.At() sexp := c.exp.At()
@ -1400,6 +1400,12 @@ func (m mockIndex) Close() error {
return nil return nil
} }
func (m mockIndex) SortedLabelValues(name string) ([]string, error) {
values, _ := m.LabelValues(name)
sort.Strings(values)
return values, nil
}
func (m mockIndex) LabelValues(name string) ([]string, error) { func (m mockIndex) LabelValues(name string) ([]string, error) {
values := []string{} values := []string{}
for l := range m.postings { for l := range m.postings {
@ -1407,7 +1413,6 @@ func (m mockIndex) LabelValues(name string) ([]string, error) {
values = append(values, l.Value) values = append(values, l.Value)
} }
} }
sort.Strings(values)
return values, nil return values, nil
} }
@ -1654,7 +1659,7 @@ func BenchmarkQuerySeek(b *testing.B) {
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
ss, ws, err := sq.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) ss := sq.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
for ss.Next() { for ss.Next() {
it := ss.At().Iterator() it := ss.At().Iterator()
for t := mint; t <= maxt; t++ { for t := mint; t <= maxt; t++ {
@ -1664,7 +1669,7 @@ func BenchmarkQuerySeek(b *testing.B) {
} }
testutil.Ok(b, ss.Err()) testutil.Ok(b, ss.Err())
testutil.Ok(b, err) testutil.Ok(b, err)
testutil.Equals(b, 0, len(ws)) testutil.Equals(b, 0, len(ss.Warnings()))
}) })
} }
} }
@ -1792,9 +1797,11 @@ func BenchmarkSetMatcher(b *testing.B) {
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
_, ws, err := que.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "test", c.pattern)) ss := que.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "test", c.pattern))
testutil.Ok(b, err) for ss.Next() {
testutil.Equals(b, 0, len(ws)) }
testutil.Ok(b, ss.Err())
testutil.Equals(b, 0, len(ss.Warnings()))
} }
}) })
} }
@ -2252,9 +2259,7 @@ func benchQuery(b *testing.B, expExpansions int, q storage.Querier, selectors la
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
ss, ws, err := q.Select(false, nil, selectors...) ss := q.Select(false, nil, selectors...)
testutil.Ok(b, err)
testutil.Equals(b, 0, len(ws))
var actualExpansions int var actualExpansions int
for ss.Next() { for ss.Next() {
s := ss.At() s := ss.At()
@ -2264,6 +2269,8 @@ func benchQuery(b *testing.B, expExpansions int, q storage.Querier, selectors la
} }
actualExpansions++ actualExpansions++
} }
testutil.Ok(b, ss.Err())
testutil.Equals(b, 0, len(ss.Warnings()))
testutil.Equals(b, expExpansions, actualExpansions) testutil.Equals(b, expExpansions, actualExpansions)
testutil.Ok(b, ss.Err()) testutil.Ok(b, ss.Err())
} }
@ -2276,6 +2283,11 @@ func (m mockMatcherIndex) Symbols() index.StringIter { return nil }
func (m mockMatcherIndex) Close() error { return nil } func (m mockMatcherIndex) Close() error { return nil }
// SortedLabelValues will return error if it is called.
func (m mockMatcherIndex) SortedLabelValues(name string) ([]string, error) {
return []string{}, errors.New("sorted label values called")
}
// LabelValues will return error if it is called. // LabelValues will return error if it is called.
func (m mockMatcherIndex) LabelValues(name string) ([]string, error) { func (m mockMatcherIndex) LabelValues(name string) ([]string, error) {
return []string{}, errors.New("label values called") return []string{}, errors.New("label values called")

1
vendor/github.com/digitalocean/godo/.gitignore generated vendored Normal file
View file

@ -0,0 +1 @@
vendor/

8
vendor/github.com/digitalocean/godo/.whitesource generated vendored Normal file
View file

@ -0,0 +1,8 @@
{
"checkRunSettings": {
"vulnerableCheckRunConclusionLevel": "failure"
},
"issueSettings": {
"minSeverityLevel": "LOW"
}
}

52
vendor/github.com/digitalocean/godo/1-click.go generated vendored Normal file
View file

@ -0,0 +1,52 @@
package godo
import (
"context"
"fmt"
"net/http"
)
const oneClickBasePath = "v2/1-clicks"
// OneClickService is an interface for interacting with 1-clicks with the
// DigitalOcean API.
// See: https://developers.digitalocean.com/documentation/v2/#1-click-applications
type OneClickService interface {
List(context.Context, string) ([]*OneClick, *Response, error)
}
var _ OneClickService = &OneClickServiceOp{}
// OneClickServiceOp interfaces with 1-click endpoints in the DigitalOcean API.
type OneClickServiceOp struct {
client *Client
}
// OneClick is the structure of a 1-click
type OneClick struct {
Slug string `json:"slug"`
Type string `json:"type"`
}
// OneClicksRoot is the root of the json payload that contains a list of 1-clicks
type OneClicksRoot struct {
List []*OneClick `json:"1_clicks"`
}
// List returns a list of the available 1-click applications.
func (ocs *OneClickServiceOp) List(ctx context.Context, oneClickType string) ([]*OneClick, *Response, error) {
path := fmt.Sprintf(`%s?type=%s`, oneClickBasePath, oneClickType)
req, err := ocs.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(OneClicksRoot)
resp, err := ocs.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.List, resp, nil
}

255
vendor/github.com/digitalocean/godo/CHANGELOG.md generated vendored Normal file
View file

@ -0,0 +1,255 @@
# Change Log
## [v1.37.0] - 2020-06-01
- #336 registry: URL encode repository names when building URLs. @adamwg
- #335 Add 1-click service and request. @scottcrawford03
## [v1.36.0] - 2020-05-12
- #331 Expose expiry_seconds for Registry.DockerCredentials. @andrewsomething
## [v1.35.1] - 2020-04-21
- #328 Update vulnerable x/crypto dependency - @bentranter
## [v1.35.0] - 2020-04-20
- #326 Add TagCount field to registry/Repository - @nicktate
- #325 Add DOCR EA routes - @nicktate
- #324 Upgrade godo to Go 1.14 - @bentranter
## [v1.34.0] - 2020-03-30
- #320 Add VPC v3 attributes - @viola
## [v1.33.1] - 2020-03-23
- #318 upgrade github.com/stretchr/objx past 0.1.1 - @hilary
## [v1.33.0] - 2020-03-20
- #310 Add BillingHistory service and List endpoint - @rbutler
- #316 load balancers: add new enable_backend_keepalive field - @anitgandhi
## [v1.32.0] - 2020-03-04
- #311 Add reset database user auth method - @zbarahal-do
## [v1.31.0] - 2020-02-28
- #305 invoices: GetPDF and GetCSV methods - @rbutler
- #304 Add NewFromToken convenience method to init client - @bentranter
- #301 invoices: Get, Summary, and List methods - @rbutler
- #299 Fix param expiry_seconds for kubernetes.GetCredentials request - @velp
## [v1.30.0] - 2020-02-03
- #295 registry: support the created_at field - @adamwg
- #293 doks: node pool labels - @snormore
## [v1.29.0] - 2019-12-13
- #288 Add Balance Get method - @rbutler
- #286,#289 Deserialize meta field - @timoreimann
## [v1.28.0] - 2019-12-04
- #282 Add valid Redis eviction policy constants - @bentranter
- #281 Remove databases info from top-level godoc string - @bentranter
- #280 Fix VolumeSnapshotResourceType value volumesnapshot -> volume_snapshot - @aqche
## [v1.27.0] - 2019-11-18
- #278 add mysql user auth settings for database users - @gregmankes
## [v1.26.0] - 2019-11-13
- #272 dbaas: get and set mysql sql mode - @mikejholly
## [v1.25.0] - 2019-11-13
- #275 registry/docker-credentials: add support for the read/write parameter - @kamaln7
- #273 implement the registry/docker-credentials endpoint - @kamaln7
- #271 Add registry resource - @snormore
## [v1.24.1] - 2019-11-04
- #264 Update isLast to check p.Next - @aqche
## [v1.24.0] - 2019-10-30
- #267 Return []DatabaseFirewallRule in addition to raw response. - @andrewsomething
## [v1.23.1] - 2019-10-30
- #265 add support for getting/setting firewall rules - @gregmankes
- #262 remove ResolveReference call - @mdanzinger
- #261 Update CONTRIBUTING.md - @mdanzinger
## [v1.22.0] - 2019-09-24
- #259 Add Kubernetes GetCredentials method - @snormore
## [v1.21.1] - 2019-09-19
- #257 Upgrade to Go 1.13 - @bentranter
## [v1.21.0] - 2019-09-16
- #255 Add DropletID to Kubernetes Node instance - @snormore
- #254 Add tags to Database, DatabaseReplica - @Zyqsempai
## [v1.20.0] - 2019-09-06
- #252 Add Kubernetes autoscale config fields - @snormore
- #251 Support unset fields on Kubernetes cluster and node pool updates - @snormore
- #250 Add Kubernetes GetUser method - @snormore
## [v1.19.0] - 2019-07-19
- #244 dbaas: add private-network-uuid field to create request
## [v1.18.0] - 2019-07-17
- #241 Databases: support for custom VPC UUID on migrate @mikejholly
- #240 Add the ability to get URN for a Database @stack72
- #236 Fix omitempty typos in JSON struct tags @amccarthy1
## [v1.17.0] - 2019-06-21
- #238 Add support for Redis eviction policy in Databases @mikejholly
## [v1.16.0] - 2019-06-04
- #233 Add Kubernetes DeleteNode method, deprecate RecycleNodePoolNodes @bouk
## [v1.15.0] - 2019-05-13
- #231 Add private connection fields to Databases - @mikejholly
- #223 Introduce Go modules - @andreiavrammsd
## [v1.14.0] - 2019-05-13
- #229 Add support for upgrading Kubernetes clusters - @adamwg
## [v1.13.0] - 2019-04-19
- #213 Add tagging support for volume snapshots - @jcodybaker
## [v1.12.0] - 2019-04-18
- #224 Add maintenance window support for Kubernetes- @fatih
## [v1.11.1] - 2019-04-04
- #222 Fix Create Database Pools json fields - @sunny-b
## [v1.11.0] - 2019-04-03
- #220 roll out vpc functionality - @jheimann
## [v1.10.1] - 2019-03-27
- #219 Fix Database Pools json field - @sunny-b
## [v1.10.0] - 2019-03-20
- #215 Add support for Databases - @mikejholly
## [v1.9.0] - 2019-03-18
- #214 add support for enable_proxy_protocol. - @mregmi
## [v1.8.0] - 2019-03-13
- #210 Expose tags on storage volume create/list/get. - @jcodybaker
## [v1.7.5] - 2019-03-04
- #207 Add support for custom subdomains for Spaces CDN [beta] - @xornivore
## [v1.7.4] - 2019-02-08
- #202 Allow tagging volumes - @mchitten
## [v1.7.3] - 2018-12-18
- #196 Expose tag support for creating Load Balancers.
## [v1.7.2] - 2018-12-04
- #192 Exposes more options for Kubernetes clusters.
## [v1.7.1] - 2018-11-27
- #190 Expose constants for the state of Kubernetes clusters.
## [v1.7.0] - 2018-11-13
- #188 Kubernetes support [beta] - @aybabtme
## [v1.6.0] - 2018-10-16
- #185 Projects support [beta] - @mchitten
## [v1.5.0] - 2018-10-01
- #181 Adding tagging images support - @hugocorbucci
## [v1.4.2] - 2018-08-30
- #178 Allowing creating domain records with weight of 0 - @TFaga
- #177 Adding `VolumeLimit` to account - @lxfontes
## [v1.4.1] - 2018-08-23
- #176 Fix cdn flush cache API endpoint - @sunny-b
## [v1.4.0] - 2018-08-22
- #175 Add support for Spaces CDN - @sunny-b
## [v1.3.0] - 2018-05-24
- #170 Add support for volume formatting - @adamwg
## [v1.2.0] - 2018-05-08
- #166 Remove support for Go 1.6 - @iheanyi
- #165 Add support for Let's Encrypt Certificates - @viola
## [v1.1.3] - 2018-03-07
- #156 Handle non-json errors from the API - @aknuds1
- #158 Update droplet example to use latest instance type - @dan-v
## [v1.1.2] - 2018-03-06
- #157 storage: list volumes should handle only name or only region params - @andrewsykim
- #154 docs: replace first example with fully-runnable example - @xmudrii
- #152 Handle flags & tag properties of domain record - @jaymecd
## [v1.1.1] - 2017-09-29
- #151 Following user agent field recommendations - @joonas
- #148 AsRequest method to create load balancers requests - @lukegb
## [v1.1.0] - 2017-06-06
### Added
- #145 Add FirewallsService for managing Firewalls with the DigitalOcean API. - @viola
- #139 Add TTL field to the Domains. - @xmudrii
### Fixed
- #143 Fix oauth2.NoContext depreciation. - @jbowens
- #141 Fix DropletActions on tagged resources. - @xmudrii
## [v1.0.0] - 2017-03-10
### Added
- #130 Add Convert to ImageActionsService. - @xmudrii
- #126 Add CertificatesService for managing certificates with the DigitalOcean API. - @viola
- #125 Add LoadBalancersService for managing load balancers with the DigitalOcean API. - @viola
- #122 Add GetVolumeByName to StorageService. - @protochron
- #113 Add context.Context to all calls. - @aybabtme

54
vendor/github.com/digitalocean/godo/CONTRIBUTING.md generated vendored Normal file
View file

@ -0,0 +1,54 @@
# Contributing
We love contributions! You are welcome to open a pull request, but it's a good idea to
open an issue and discuss your idea with us first.
Once you are ready to open a PR, please keep the following guidelines in mind:
1. Code should be `go fmt` compliant.
1. Types, structs and funcs should be documented.
1. Tests pass.
## Getting set up
`godo` uses go modules. Just fork this repo, clone your fork and off you go!
## Running tests
When working on code in this repository, tests can be run via:
```sh
go test -mod=vendor .
```
## Versioning
Godo follows [semver](https://www.semver.org) versioning semantics.
New functionality should be accompanied by increment to the minor
version number. Any code merged to master is subject to release.
## Releasing
Releasing a new version of godo is currently a manual process.
Submit a separate pull request for the version change from the pull
request with your changes.
1. Update the `CHANGELOG.md` with your changes. If a version header
for the next (unreleased) version does not exist, create one.
Include one bullet point for each piece of new functionality in the
release, including the pull request ID, description, and author(s).
```
## [v1.8.0] - 2019-03-13
- #210 Expose tags on storage volume create/list/get. - @jcodybaker
- #123 Update test dependencies - @digitalocean
```
2. Update the `libraryVersion` number in `godo.go`.
3. Make a pull request with these changes. This PR should be separate from the PR containing the godo changes.
4. Once the pull request has been merged, [draft a new release](https://github.com/digitalocean/godo/releases/new).
5. Update the `Tag version` and `Release title` field with the new godo version. Be sure the version has a `v` prefixed in both places. Ex `v1.8.0`.
6. Copy the changelog bullet points to the description field.
7. Publish the release.

55
vendor/github.com/digitalocean/godo/LICENSE.txt generated vendored Normal file
View file

@ -0,0 +1,55 @@
Copyright (c) 2014-2016 The godo AUTHORS. All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
======================
Portions of the client are based on code at:
https://github.com/google/go-github/
Copyright (c) 2013 The go-github AUTHORS. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

139
vendor/github.com/digitalocean/godo/README.md generated vendored Normal file
View file

@ -0,0 +1,139 @@
# Godo
[![Build Status](https://travis-ci.org/digitalocean/godo.svg)](https://travis-ci.org/digitalocean/godo)
[![GoDoc](https://godoc.org/github.com/digitalocean/godo?status.svg)](https://godoc.org/github.com/digitalocean/godo)
Godo is a Go client library for accessing the DigitalOcean V2 API.
You can view the client API docs here: [http://godoc.org/github.com/digitalocean/godo](http://godoc.org/github.com/digitalocean/godo)
You can view DigitalOcean API docs here: [https://developers.digitalocean.com/documentation/v2/](https://developers.digitalocean.com/documentation/v2/)
## Install
```sh
go get github.com/digitalocean/godo@vX.Y.Z
```
where X.Y.Z is the [version](https://github.com/digitalocean/godo/releases) you need.
or
```sh
go get github.com/digitalocean/godo
```
for non Go modules usage or latest version.
## Usage
```go
import "github.com/digitalocean/godo"
```
Create a new DigitalOcean client, then use the exposed services to
access different parts of the DigitalOcean API.
### Authentication
Currently, Personal Access Token (PAT) is the only method of
authenticating with the API. You can manage your tokens
at the DigitalOcean Control Panel [Applications Page](https://cloud.digitalocean.com/settings/applications).
You can then use your token to create a new client:
```go
package main
import (
"github.com/digitalocean/godo"
)
func main() {
client := godo.NewFromToken("my-digitalocean-api-token")
}
```
If you need to provide a `context.Context` to your new client, you should use [`godo.NewClient`](https://godoc.org/github.com/digitalocean/godo#NewClient) to manually construct a client instead.
## Examples
To create a new Droplet:
```go
dropletName := "super-cool-droplet"
createRequest := &godo.DropletCreateRequest{
Name: dropletName,
Region: "nyc3",
Size: "s-1vcpu-1gb",
Image: godo.DropletCreateImage{
Slug: "ubuntu-14-04-x64",
},
}
ctx := context.TODO()
newDroplet, _, err := client.Droplets.Create(ctx, createRequest)
if err != nil {
fmt.Printf("Something bad happened: %s\n\n", err)
return err
}
```
### Pagination
If a list of items is paginated by the API, you must request pages individually. For example, to fetch all Droplets:
```go
func DropletList(ctx context.Context, client *godo.Client) ([]godo.Droplet, error) {
// create a list to hold our droplets
list := []godo.Droplet{}
// create options. initially, these will be blank
opt := &godo.ListOptions{}
for {
droplets, resp, err := client.Droplets.List(ctx, opt)
if err != nil {
return nil, err
}
// append the current page's droplets to our list
for _, d := range droplets {
list = append(list, d)
}
// if we are at the last page, break out the for loop
if resp.Links == nil || resp.Links.IsLastPage() {
break
}
page, err := resp.Links.CurrentPage()
if err != nil {
return nil, err
}
// set the page we want for the next request
opt.Page = page + 1
}
return list, nil
}
```
## Versioning
Each version of the client is tagged and the version is updated accordingly.
To see the list of past versions, run `git tag`.
## Documentation
For a comprehensive list of examples, check out the [API documentation](https://developers.digitalocean.com/documentation/v2/).
For details on all the functionality in this library, see the [GoDoc](http://godoc.org/github.com/digitalocean/godo) documentation.
## Contributing
We love pull requests! Please see the [contribution guidelines](CONTRIBUTING.md).

60
vendor/github.com/digitalocean/godo/account.go generated vendored Normal file
View file

@ -0,0 +1,60 @@
package godo
import (
"context"
"net/http"
)
// AccountService is an interface for interfacing with the Account
// endpoints of the DigitalOcean API
// See: https://developers.digitalocean.com/documentation/v2/#account
type AccountService interface {
Get(context.Context) (*Account, *Response, error)
}
// AccountServiceOp handles communication with the Account related methods of
// the DigitalOcean API.
type AccountServiceOp struct {
client *Client
}
var _ AccountService = &AccountServiceOp{}
// Account represents a DigitalOcean Account
type Account struct {
DropletLimit int `json:"droplet_limit,omitempty"`
FloatingIPLimit int `json:"floating_ip_limit,omitempty"`
VolumeLimit int `json:"volume_limit,omitempty"`
Email string `json:"email,omitempty"`
UUID string `json:"uuid,omitempty"`
EmailVerified bool `json:"email_verified,omitempty"`
Status string `json:"status,omitempty"`
StatusMessage string `json:"status_message,omitempty"`
}
type accountRoot struct {
Account *Account `json:"account"`
}
func (r Account) String() string {
return Stringify(r)
}
// Get DigitalOcean account info
func (s *AccountServiceOp) Get(ctx context.Context) (*Account, *Response, error) {
path := "v2/account"
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(accountRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Account, resp, err
}

108
vendor/github.com/digitalocean/godo/action.go generated vendored Normal file
View file

@ -0,0 +1,108 @@
package godo
import (
"context"
"fmt"
"net/http"
)
const (
actionsBasePath = "v2/actions"
// ActionInProgress is an in progress action status
ActionInProgress = "in-progress"
//ActionCompleted is a completed action status
ActionCompleted = "completed"
)
// ActionsService handles communction with action related methods of the
// DigitalOcean API: https://developers.digitalocean.com/documentation/v2#actions
type ActionsService interface {
List(context.Context, *ListOptions) ([]Action, *Response, error)
Get(context.Context, int) (*Action, *Response, error)
}
// ActionsServiceOp handles communition with the image action related methods of the
// DigitalOcean API.
type ActionsServiceOp struct {
client *Client
}
var _ ActionsService = &ActionsServiceOp{}
type actionsRoot struct {
Actions []Action `json:"actions"`
Links *Links `json:"links"`
Meta *Meta `json:"meta"`
}
type actionRoot struct {
Event *Action `json:"action"`
}
// Action represents a DigitalOcean Action
type Action struct {
ID int `json:"id"`
Status string `json:"status"`
Type string `json:"type"`
StartedAt *Timestamp `json:"started_at"`
CompletedAt *Timestamp `json:"completed_at"`
ResourceID int `json:"resource_id"`
ResourceType string `json:"resource_type"`
Region *Region `json:"region,omitempty"`
RegionSlug string `json:"region_slug,omitempty"`
}
// List all actions
func (s *ActionsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Action, *Response, error) {
path := actionsBasePath
path, err := addOptions(path, opt)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(actionsRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
if m := root.Meta; m != nil {
resp.Meta = m
}
return root.Actions, resp, err
}
// Get an action by ID.
func (s *ActionsServiceOp) Get(ctx context.Context, id int) (*Action, *Response, error) {
if id < 1 {
return nil, nil, NewArgError("id", "cannot be less than 1")
}
path := fmt.Sprintf("%s/%d", actionsBasePath, id)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(actionRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Event, resp, err
}
func (a Action) String() string {
return Stringify(a)
}

52
vendor/github.com/digitalocean/godo/balance.go generated vendored Normal file
View file

@ -0,0 +1,52 @@
package godo
import (
"context"
"net/http"
"time"
)
// BalanceService is an interface for interfacing with the Balance
// endpoints of the DigitalOcean API
// See: https://developers.digitalocean.com/documentation/v2/#balance
type BalanceService interface {
Get(context.Context) (*Balance, *Response, error)
}
// BalanceServiceOp handles communication with the Balance related methods of
// the DigitalOcean API.
type BalanceServiceOp struct {
client *Client
}
var _ BalanceService = &BalanceServiceOp{}
// Balance represents a DigitalOcean Balance
type Balance struct {
MonthToDateBalance string `json:"month_to_date_balance"`
AccountBalance string `json:"account_balance"`
MonthToDateUsage string `json:"month_to_date_usage"`
GeneratedAt time.Time `json:"generated_at"`
}
func (r Balance) String() string {
return Stringify(r)
}
// Get DigitalOcean balance info
func (s *BalanceServiceOp) Get(ctx context.Context) (*Balance, *Response, error) {
path := "v2/customers/my/balance"
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(Balance)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root, resp, err
}

72
vendor/github.com/digitalocean/godo/billing_history.go generated vendored Normal file
View file

@ -0,0 +1,72 @@
package godo
import (
"context"
"net/http"
"time"
)
const billingHistoryBasePath = "v2/customers/my/billing_history"
// BillingHistoryService is an interface for interfacing with the BillingHistory
// endpoints of the DigitalOcean API
// See: https://developers.digitalocean.com/documentation/v2/#billing_history
type BillingHistoryService interface {
List(context.Context, *ListOptions) (*BillingHistory, *Response, error)
}
// BillingHistoryServiceOp handles communication with the BillingHistory related methods of
// the DigitalOcean API.
type BillingHistoryServiceOp struct {
client *Client
}
var _ BillingHistoryService = &BillingHistoryServiceOp{}
// BillingHistory represents a DigitalOcean Billing History
type BillingHistory struct {
BillingHistory []BillingHistoryEntry `json:"billing_history"`
Links *Links `json:"links"`
Meta *Meta `json:"meta"`
}
// BillingHistoryEntry represents an entry in a customer's Billing History
type BillingHistoryEntry struct {
Description string `json:"description"`
Amount string `json:"amount"`
InvoiceID *string `json:"invoice_id"`
InvoiceUUID *string `json:"invoice_uuid"`
Date time.Time `json:"date"`
Type string `json:"type"`
}
func (b BillingHistory) String() string {
return Stringify(b)
}
// List the Billing History for a customer
func (s *BillingHistoryServiceOp) List(ctx context.Context, opt *ListOptions) (*BillingHistory, *Response, error) {
path, err := addOptions(billingHistoryBasePath, opt)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(BillingHistory)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
if m := root.Meta; m != nil {
resp.Meta = m
}
return root, resp, err
}

218
vendor/github.com/digitalocean/godo/cdn.go generated vendored Normal file
View file

@ -0,0 +1,218 @@
package godo
import (
"context"
"fmt"
"net/http"
"time"
)
const cdnBasePath = "v2/cdn/endpoints"
// CDNService is an interface for managing Spaces CDN with the DigitalOcean API.
type CDNService interface {
List(context.Context, *ListOptions) ([]CDN, *Response, error)
Get(context.Context, string) (*CDN, *Response, error)
Create(context.Context, *CDNCreateRequest) (*CDN, *Response, error)
UpdateTTL(context.Context, string, *CDNUpdateTTLRequest) (*CDN, *Response, error)
UpdateCustomDomain(context.Context, string, *CDNUpdateCustomDomainRequest) (*CDN, *Response, error)
FlushCache(context.Context, string, *CDNFlushCacheRequest) (*Response, error)
Delete(context.Context, string) (*Response, error)
}
// CDNServiceOp handles communication with the CDN related methods of the
// DigitalOcean API.
type CDNServiceOp struct {
client *Client
}
var _ CDNService = &CDNServiceOp{}
// CDN represents a DigitalOcean CDN
type CDN struct {
ID string `json:"id"`
Origin string `json:"origin"`
Endpoint string `json:"endpoint"`
CreatedAt time.Time `json:"created_at"`
TTL uint32 `json:"ttl"`
CertificateID string `json:"certificate_id,omitempty"`
CustomDomain string `json:"custom_domain,omitempty"`
}
// CDNRoot represents a response from the DigitalOcean API
type cdnRoot struct {
Endpoint *CDN `json:"endpoint"`
}
type cdnsRoot struct {
Endpoints []CDN `json:"endpoints"`
Links *Links `json:"links"`
Meta *Meta `json:"meta"`
}
// CDNCreateRequest represents a request to create a CDN.
type CDNCreateRequest struct {
Origin string `json:"origin"`
TTL uint32 `json:"ttl"`
CustomDomain string `json:"custom_domain,omitempty"`
CertificateID string `json:"certificate_id,omitempty"`
}
// CDNUpdateTTLRequest represents a request to update the ttl of a CDN.
type CDNUpdateTTLRequest struct {
TTL uint32 `json:"ttl"`
}
// CDNUpdateCustomDomainRequest represents a request to update the custom domain of a CDN.
type CDNUpdateCustomDomainRequest struct {
CustomDomain string `json:"custom_domain"`
CertificateID string `json:"certificate_id"`
}
// CDNFlushCacheRequest represents a request to flush cache of a CDN.
type CDNFlushCacheRequest struct {
Files []string `json:"files"`
}
// List all CDN endpoints
func (c CDNServiceOp) List(ctx context.Context, opt *ListOptions) ([]CDN, *Response, error) {
path, err := addOptions(cdnBasePath, opt)
if err != nil {
return nil, nil, err
}
req, err := c.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(cdnsRoot)
resp, err := c.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
if m := root.Meta; m != nil {
resp.Meta = m
}
return root.Endpoints, resp, err
}
// Get individual CDN. It requires a non-empty cdn id.
func (c CDNServiceOp) Get(ctx context.Context, id string) (*CDN, *Response, error) {
if len(id) == 0 {
return nil, nil, NewArgError("id", "cannot be an empty string")
}
path := fmt.Sprintf("%s/%s", cdnBasePath, id)
req, err := c.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(cdnRoot)
resp, err := c.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Endpoint, resp, err
}
// Create a new CDN
func (c CDNServiceOp) Create(ctx context.Context, createRequest *CDNCreateRequest) (*CDN, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
req, err := c.client.NewRequest(ctx, http.MethodPost, cdnBasePath, createRequest)
if err != nil {
return nil, nil, err
}
root := new(cdnRoot)
resp, err := c.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Endpoint, resp, err
}
// UpdateTTL updates the ttl of an individual CDN
func (c CDNServiceOp) UpdateTTL(ctx context.Context, id string, updateRequest *CDNUpdateTTLRequest) (*CDN, *Response, error) {
return c.update(ctx, id, updateRequest)
}
// UpdateCustomDomain sets or removes the custom domain of an individual CDN
func (c CDNServiceOp) UpdateCustomDomain(ctx context.Context, id string, updateRequest *CDNUpdateCustomDomainRequest) (*CDN, *Response, error) {
return c.update(ctx, id, updateRequest)
}
func (c CDNServiceOp) update(ctx context.Context, id string, updateRequest interface{}) (*CDN, *Response, error) {
if updateRequest == nil {
return nil, nil, NewArgError("updateRequest", "cannot be nil")
}
if len(id) == 0 {
return nil, nil, NewArgError("id", "cannot be an empty string")
}
path := fmt.Sprintf("%s/%s", cdnBasePath, id)
req, err := c.client.NewRequest(ctx, http.MethodPut, path, updateRequest)
if err != nil {
return nil, nil, err
}
root := new(cdnRoot)
resp, err := c.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Endpoint, resp, err
}
// FlushCache flushes the cache of an individual CDN. Requires a non-empty slice of file paths and/or wildcards
func (c CDNServiceOp) FlushCache(ctx context.Context, id string, flushCacheRequest *CDNFlushCacheRequest) (*Response, error) {
if flushCacheRequest == nil {
return nil, NewArgError("flushCacheRequest", "cannot be nil")
}
if len(id) == 0 {
return nil, NewArgError("id", "cannot be an empty string")
}
path := fmt.Sprintf("%s/%s/cache", cdnBasePath, id)
req, err := c.client.NewRequest(ctx, http.MethodDelete, path, flushCacheRequest)
if err != nil {
return nil, err
}
resp, err := c.client.Do(ctx, req, nil)
return resp, err
}
// Delete an individual CDN
func (c CDNServiceOp) Delete(ctx context.Context, id string) (*Response, error) {
if len(id) == 0 {
return nil, NewArgError("id", "cannot be an empty string")
}
path := fmt.Sprintf("%s/%s", cdnBasePath, id)
req, err := c.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := c.client.Do(ctx, req, nil)
return resp, err
}

130
vendor/github.com/digitalocean/godo/certificates.go generated vendored Normal file
View file

@ -0,0 +1,130 @@
package godo
import (
"context"
"net/http"
"path"
)
const certificatesBasePath = "/v2/certificates"
// CertificatesService is an interface for managing certificates with the DigitalOcean API.
// See: https://developers.digitalocean.com/documentation/v2/#certificates
type CertificatesService interface {
Get(context.Context, string) (*Certificate, *Response, error)
List(context.Context, *ListOptions) ([]Certificate, *Response, error)
Create(context.Context, *CertificateRequest) (*Certificate, *Response, error)
Delete(context.Context, string) (*Response, error)
}
// Certificate represents a DigitalOcean certificate configuration.
type Certificate struct {
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
DNSNames []string `json:"dns_names,omitempty"`
NotAfter string `json:"not_after,omitempty"`
SHA1Fingerprint string `json:"sha1_fingerprint,omitempty"`
Created string `json:"created_at,omitempty"`
State string `json:"state,omitempty"`
Type string `json:"type,omitempty"`
}
// CertificateRequest represents configuration for a new certificate.
type CertificateRequest struct {
Name string `json:"name,omitempty"`
DNSNames []string `json:"dns_names,omitempty"`
PrivateKey string `json:"private_key,omitempty"`
LeafCertificate string `json:"leaf_certificate,omitempty"`
CertificateChain string `json:"certificate_chain,omitempty"`
Type string `json:"type,omitempty"`
}
type certificateRoot struct {
Certificate *Certificate `json:"certificate"`
}
type certificatesRoot struct {
Certificates []Certificate `json:"certificates"`
Links *Links `json:"links"`
Meta *Meta `json:"meta"`
}
// CertificatesServiceOp handles communication with certificates methods of the DigitalOcean API.
type CertificatesServiceOp struct {
client *Client
}
var _ CertificatesService = &CertificatesServiceOp{}
// Get an existing certificate by its identifier.
func (c *CertificatesServiceOp) Get(ctx context.Context, cID string) (*Certificate, *Response, error) {
urlStr := path.Join(certificatesBasePath, cID)
req, err := c.client.NewRequest(ctx, http.MethodGet, urlStr, nil)
if err != nil {
return nil, nil, err
}
root := new(certificateRoot)
resp, err := c.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Certificate, resp, nil
}
// List all certificates.
func (c *CertificatesServiceOp) List(ctx context.Context, opt *ListOptions) ([]Certificate, *Response, error) {
urlStr, err := addOptions(certificatesBasePath, opt)
if err != nil {
return nil, nil, err
}
req, err := c.client.NewRequest(ctx, http.MethodGet, urlStr, nil)
if err != nil {
return nil, nil, err
}
root := new(certificatesRoot)
resp, err := c.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
if m := root.Meta; m != nil {
resp.Meta = m
}
return root.Certificates, resp, nil
}
// Create a new certificate with provided configuration.
func (c *CertificatesServiceOp) Create(ctx context.Context, cr *CertificateRequest) (*Certificate, *Response, error) {
req, err := c.client.NewRequest(ctx, http.MethodPost, certificatesBasePath, cr)
if err != nil {
return nil, nil, err
}
root := new(certificateRoot)
resp, err := c.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Certificate, resp, nil
}
// Delete a certificate by its identifier.
func (c *CertificatesServiceOp) Delete(ctx context.Context, cID string) (*Response, error) {
urlStr := path.Join(certificatesBasePath, cID)
req, err := c.client.NewRequest(ctx, http.MethodDelete, urlStr, nil)
if err != nil {
return nil, err
}
return c.client.Do(ctx, req, nil)
}

845
vendor/github.com/digitalocean/godo/databases.go generated vendored Normal file
View file

@ -0,0 +1,845 @@
package godo
import (
"context"
"fmt"
"net/http"
"strings"
"time"
)
const (
databaseBasePath = "/v2/databases"
databaseSinglePath = databaseBasePath + "/%s"
databaseResizePath = databaseBasePath + "/%s/resize"
databaseMigratePath = databaseBasePath + "/%s/migrate"
databaseMaintenancePath = databaseBasePath + "/%s/maintenance"
databaseBackupsPath = databaseBasePath + "/%s/backups"
databaseUsersPath = databaseBasePath + "/%s/users"
databaseUserPath = databaseBasePath + "/%s/users/%s"
databaseResetUserAuthPath = databaseUserPath + "/reset_auth"
databaseDBPath = databaseBasePath + "/%s/dbs/%s"
databaseDBsPath = databaseBasePath + "/%s/dbs"
databasePoolPath = databaseBasePath + "/%s/pools/%s"
databasePoolsPath = databaseBasePath + "/%s/pools"
databaseReplicaPath = databaseBasePath + "/%s/replicas/%s"
databaseReplicasPath = databaseBasePath + "/%s/replicas"
databaseEvictionPolicyPath = databaseBasePath + "/%s/eviction_policy"
databaseSQLModePath = databaseBasePath + "/%s/sql_mode"
databaseFirewallRulesPath = databaseBasePath + "/%s/firewall"
)
// SQL Mode constants allow for MySQL-specific SQL flavor configuration.
const (
SQLModeAllowInvalidDates = "ALLOW_INVALID_DATES"
SQLModeANSIQuotes = "ANSI_QUOTES"
SQLModeHighNotPrecedence = "HIGH_NOT_PRECEDENCE"
SQLModeIgnoreSpace = "IGNORE_SPACE"
SQLModeNoAuthCreateUser = "NO_AUTO_CREATE_USER"
SQLModeNoAutoValueOnZero = "NO_AUTO_VALUE_ON_ZERO"
SQLModeNoBackslashEscapes = "NO_BACKSLASH_ESCAPES"
SQLModeNoDirInCreate = "NO_DIR_IN_CREATE"
SQLModeNoEngineSubstitution = "NO_ENGINE_SUBSTITUTION"
SQLModeNoFieldOptions = "NO_FIELD_OPTIONS"
SQLModeNoKeyOptions = "NO_KEY_OPTIONS"
SQLModeNoTableOptions = "NO_TABLE_OPTIONS"
SQLModeNoUnsignedSubtraction = "NO_UNSIGNED_SUBTRACTION"
SQLModeNoZeroDate = "NO_ZERO_DATE"
SQLModeNoZeroInDate = "NO_ZERO_IN_DATE"
SQLModeOnlyFullGroupBy = "ONLY_FULL_GROUP_BY"
SQLModePadCharToFullLength = "PAD_CHAR_TO_FULL_LENGTH"
SQLModePipesAsConcat = "PIPES_AS_CONCAT"
SQLModeRealAsFloat = "REAL_AS_FLOAT"
SQLModeStrictAllTables = "STRICT_ALL_TABLES"
SQLModeStrictTransTables = "STRICT_TRANS_TABLES"
SQLModeANSI = "ANSI"
SQLModeDB2 = "DB2"
SQLModeMaxDB = "MAXDB"
SQLModeMSSQL = "MSSQL"
SQLModeMYSQL323 = "MYSQL323"
SQLModeMYSQL40 = "MYSQL40"
SQLModeOracle = "ORACLE"
SQLModePostgreSQL = "POSTGRESQL"
SQLModeTraditional = "TRADITIONAL"
)
// SQL Auth constants allow for MySQL-specific user auth plugins
const (
SQLAuthPluginNative = "mysql_native_password"
SQLAuthPluginCachingSHA2 = "caching_sha2_password"
)
// Redis eviction policies supported by the managed Redis product.
const (
EvictionPolicyNoEviction = "noeviction"
EvictionPolicyAllKeysLRU = "allkeys_lru"
EvictionPolicyAllKeysRandom = "allkeys_random"
EvictionPolicyVolatileLRU = "volatile_lru"
EvictionPolicyVolatileRandom = "volatile_random"
EvictionPolicyVolatileTTL = "volatile_ttl"
)
// The DatabasesService provides access to the DigitalOcean managed database
// suite of products through the public API. Customers can create new database
// clusters, migrate them between regions, create replicas and interact with
// their configurations. Each database service is refered to as a Database. A
// SQL database service can have multiple databases residing in the system. To
// help make these entities distinct from Databases in godo, we refer to them
// here as DatabaseDBs.
//
// See: https://developers.digitalocean.com/documentation/v2#databases
type DatabasesService interface {
List(context.Context, *ListOptions) ([]Database, *Response, error)
Get(context.Context, string) (*Database, *Response, error)
Create(context.Context, *DatabaseCreateRequest) (*Database, *Response, error)
Delete(context.Context, string) (*Response, error)
Resize(context.Context, string, *DatabaseResizeRequest) (*Response, error)
Migrate(context.Context, string, *DatabaseMigrateRequest) (*Response, error)
UpdateMaintenance(context.Context, string, *DatabaseUpdateMaintenanceRequest) (*Response, error)
ListBackups(context.Context, string, *ListOptions) ([]DatabaseBackup, *Response, error)
GetUser(context.Context, string, string) (*DatabaseUser, *Response, error)
ListUsers(context.Context, string, *ListOptions) ([]DatabaseUser, *Response, error)
CreateUser(context.Context, string, *DatabaseCreateUserRequest) (*DatabaseUser, *Response, error)
DeleteUser(context.Context, string, string) (*Response, error)
ResetUserAuth(context.Context, string, string, *DatabaseResetUserAuthRequest) (*DatabaseUser, *Response, error)
ListDBs(context.Context, string, *ListOptions) ([]DatabaseDB, *Response, error)
CreateDB(context.Context, string, *DatabaseCreateDBRequest) (*DatabaseDB, *Response, error)
GetDB(context.Context, string, string) (*DatabaseDB, *Response, error)
DeleteDB(context.Context, string, string) (*Response, error)
ListPools(context.Context, string, *ListOptions) ([]DatabasePool, *Response, error)
CreatePool(context.Context, string, *DatabaseCreatePoolRequest) (*DatabasePool, *Response, error)
GetPool(context.Context, string, string) (*DatabasePool, *Response, error)
DeletePool(context.Context, string, string) (*Response, error)
GetReplica(context.Context, string, string) (*DatabaseReplica, *Response, error)
ListReplicas(context.Context, string, *ListOptions) ([]DatabaseReplica, *Response, error)
CreateReplica(context.Context, string, *DatabaseCreateReplicaRequest) (*DatabaseReplica, *Response, error)
DeleteReplica(context.Context, string, string) (*Response, error)
GetEvictionPolicy(context.Context, string) (string, *Response, error)
SetEvictionPolicy(context.Context, string, string) (*Response, error)
GetSQLMode(context.Context, string) (string, *Response, error)
SetSQLMode(context.Context, string, ...string) (*Response, error)
GetFirewallRules(context.Context, string) ([]DatabaseFirewallRule, *Response, error)
UpdateFirewallRules(context.Context, string, *DatabaseUpdateFirewallRulesRequest) (*Response, error)
}
// DatabasesServiceOp handles communication with the Databases related methods
// of the DigitalOcean API.
type DatabasesServiceOp struct {
client *Client
}
var _ DatabasesService = &DatabasesServiceOp{}
// Database represents a DigitalOcean managed database product. These managed databases
// are usually comprised of a cluster of database nodes, a primary and 0 or more replicas.
// The EngineSlug is a string which indicates the type of database service. Some examples are
// "pg", "mysql" or "redis". A Database also includes connection information and other
// properties of the service like region, size and current status.
type Database struct {
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
EngineSlug string `json:"engine,omitempty"`
VersionSlug string `json:"version,omitempty"`
Connection *DatabaseConnection `json:"connection,omitempty"`
PrivateConnection *DatabaseConnection `json:"private_connection,omitempty"`
Users []DatabaseUser `json:"users,omitempty"`
NumNodes int `json:"num_nodes,omitempty"`
SizeSlug string `json:"size,omitempty"`
DBNames []string `json:"db_names,omitempty"`
RegionSlug string `json:"region,omitempty"`
Status string `json:"status,omitempty"`
MaintenanceWindow *DatabaseMaintenanceWindow `json:"maintenance_window,omitempty"`
CreatedAt time.Time `json:"created_at,omitempty"`
PrivateNetworkUUID string `json:"private_network_uuid,omitempty"`
Tags []string `json:"tags,omitempty"`
}
// DatabaseConnection represents a database connection
type DatabaseConnection struct {
URI string `json:"uri,omitempty"`
Database string `json:"database,omitempty"`
Host string `json:"host,omitempty"`
Port int `json:"port,omitempty"`
User string `json:"user,omitempty"`
Password string `json:"password,omitempty"`
SSL bool `json:"ssl,omitempty"`
}
// DatabaseUser represents a user in the database
type DatabaseUser struct {
Name string `json:"name,omitempty"`
Role string `json:"role,omitempty"`
Password string `json:"password,omitempty"`
MySQLSettings *DatabaseMySQLUserSettings `json:"mysql_settings,omitempty"`
}
// DatabaseMySQLUserSettings contains MySQL-specific user settings
type DatabaseMySQLUserSettings struct {
AuthPlugin string `json:"auth_plugin"`
}
// DatabaseMaintenanceWindow represents the maintenance_window of a database
// cluster
type DatabaseMaintenanceWindow struct {
Day string `json:"day,omitempty"`
Hour string `json:"hour,omitempty"`
Pending bool `json:"pending,omitempty"`
Description []string `json:"description,omitempty"`
}
// DatabaseBackup represents a database backup.
type DatabaseBackup struct {
CreatedAt time.Time `json:"created_at,omitempty"`
SizeGigabytes float64 `json:"size_gigabytes,omitempty"`
}
// DatabaseCreateRequest represents a request to create a database cluster
type DatabaseCreateRequest struct {
Name string `json:"name,omitempty"`
EngineSlug string `json:"engine,omitempty"`
Version string `json:"version,omitempty"`
SizeSlug string `json:"size,omitempty"`
Region string `json:"region,omitempty"`
NumNodes int `json:"num_nodes,omitempty"`
PrivateNetworkUUID string `json:"private_network_uuid"`
Tags []string `json:"tags,omitempty"`
}
// DatabaseResizeRequest can be used to initiate a database resize operation.
type DatabaseResizeRequest struct {
SizeSlug string `json:"size,omitempty"`
NumNodes int `json:"num_nodes,omitempty"`
}
// DatabaseMigrateRequest can be used to initiate a database migrate operation.
type DatabaseMigrateRequest struct {
Region string `json:"region,omitempty"`
PrivateNetworkUUID string `json:"private_network_uuid"`
}
// DatabaseUpdateMaintenanceRequest can be used to update the database's maintenance window.
type DatabaseUpdateMaintenanceRequest struct {
Day string `json:"day,omitempty"`
Hour string `json:"hour,omitempty"`
}
// DatabaseDB represents an engine-specific database created within a database cluster. For SQL
// databases like PostgreSQL or MySQL, a "DB" refers to a database created on the RDBMS. For instance,
// a PostgreSQL database server can contain many database schemas, each with it's own settings, access
// permissions and data. ListDBs will return all databases present on the server.
type DatabaseDB struct {
Name string `json:"name"`
}
// DatabaseReplica represents a read-only replica of a particular database
type DatabaseReplica struct {
Name string `json:"name"`
Connection *DatabaseConnection `json:"connection"`
PrivateConnection *DatabaseConnection `json:"private_connection,omitempty"`
Region string `json:"region"`
Status string `json:"status"`
CreatedAt time.Time `json:"created_at"`
PrivateNetworkUUID string `json:"private_network_uuid,omitempty"`
Tags []string `json:"tags,omitempty"`
}
// DatabasePool represents a database connection pool
type DatabasePool struct {
User string `json:"user"`
Name string `json:"name"`
Size int `json:"size"`
Database string `json:"db"`
Mode string `json:"mode"`
Connection *DatabaseConnection `json:"connection"`
PrivateConnection *DatabaseConnection `json:"private_connection,omitempty"`
}
// DatabaseCreatePoolRequest is used to create a new database connection pool
type DatabaseCreatePoolRequest struct {
User string `json:"user"`
Name string `json:"name"`
Size int `json:"size"`
Database string `json:"db"`
Mode string `json:"mode"`
}
// DatabaseCreateUserRequest is used to create a new database user
type DatabaseCreateUserRequest struct {
Name string `json:"name"`
MySQLSettings *DatabaseMySQLUserSettings `json:"mysql_settings,omitempty"`
}
// DatabaseResetUserAuth request is used to reset a users DB auth
type DatabaseResetUserAuthRequest struct {
MySQLSettings *DatabaseMySQLUserSettings `json:"mysql_settings,omitempty"`
}
// DatabaseCreateDBRequest is used to create a new engine-specific database within the cluster
type DatabaseCreateDBRequest struct {
Name string `json:"name"`
}
// DatabaseCreateReplicaRequest is used to create a new read-only replica
type DatabaseCreateReplicaRequest struct {
Name string `json:"name"`
Region string `json:"region"`
Size string `json:"size"`
PrivateNetworkUUID string `json:"private_network_uuid"`
Tags []string `json:"tags,omitempty"`
}
// DatabaseUpdateFirewallRulesRequest is used to set the firewall rules for a database
type DatabaseUpdateFirewallRulesRequest struct {
Rules []*DatabaseFirewallRule `json:"rules"`
}
// DatabaseFirewallRule is a rule describing an inbound source to a database
type DatabaseFirewallRule struct {
UUID string `json:"uuid"`
ClusterUUID string `json:"cluster_uuid"`
Type string `json:"type"`
Value string `json:"value"`
CreatedAt time.Time `json:"created_at"`
}
type databaseUserRoot struct {
User *DatabaseUser `json:"user"`
}
type databaseUsersRoot struct {
Users []DatabaseUser `json:"users"`
}
type databaseDBRoot struct {
DB *DatabaseDB `json:"db"`
}
type databaseDBsRoot struct {
DBs []DatabaseDB `json:"dbs"`
}
type databasesRoot struct {
Databases []Database `json:"databases"`
}
type databaseRoot struct {
Database *Database `json:"database"`
}
type databaseBackupsRoot struct {
Backups []DatabaseBackup `json:"backups"`
}
type databasePoolRoot struct {
Pool *DatabasePool `json:"pool"`
}
type databasePoolsRoot struct {
Pools []DatabasePool `json:"pools"`
}
type databaseReplicaRoot struct {
Replica *DatabaseReplica `json:"replica"`
}
type databaseReplicasRoot struct {
Replicas []DatabaseReplica `json:"replicas"`
}
type evictionPolicyRoot struct {
EvictionPolicy string `json:"eviction_policy"`
}
type sqlModeRoot struct {
SQLMode string `json:"sql_mode"`
}
type databaseFirewallRuleRoot struct {
Rules []DatabaseFirewallRule `json:"rules"`
}
// URN returns a URN identifier for the database
func (d Database) URN() string {
return ToURN("dbaas", d.ID)
}
// List returns a list of the Databases visible with the caller's API token
func (svc *DatabasesServiceOp) List(ctx context.Context, opts *ListOptions) ([]Database, *Response, error) {
path := databaseBasePath
path, err := addOptions(path, opts)
if err != nil {
return nil, nil, err
}
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(databasesRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Databases, resp, nil
}
// Get retrieves the details of a database cluster
func (svc *DatabasesServiceOp) Get(ctx context.Context, databaseID string) (*Database, *Response, error) {
path := fmt.Sprintf(databaseSinglePath, databaseID)
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(databaseRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Database, resp, nil
}
// Create creates a database cluster
func (svc *DatabasesServiceOp) Create(ctx context.Context, create *DatabaseCreateRequest) (*Database, *Response, error) {
path := databaseBasePath
req, err := svc.client.NewRequest(ctx, http.MethodPost, path, create)
if err != nil {
return nil, nil, err
}
root := new(databaseRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Database, resp, nil
}
// Delete deletes a database cluster. There is no way to recover a cluster once
// it has been destroyed.
func (svc *DatabasesServiceOp) Delete(ctx context.Context, databaseID string) (*Response, error) {
path := fmt.Sprintf("%s/%s", databaseBasePath, databaseID)
req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := svc.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// Resize resizes a database cluster by number of nodes or size
func (svc *DatabasesServiceOp) Resize(ctx context.Context, databaseID string, resize *DatabaseResizeRequest) (*Response, error) {
path := fmt.Sprintf(databaseResizePath, databaseID)
req, err := svc.client.NewRequest(ctx, http.MethodPut, path, resize)
if err != nil {
return nil, err
}
resp, err := svc.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// Migrate migrates a database cluster to a new region
func (svc *DatabasesServiceOp) Migrate(ctx context.Context, databaseID string, migrate *DatabaseMigrateRequest) (*Response, error) {
path := fmt.Sprintf(databaseMigratePath, databaseID)
req, err := svc.client.NewRequest(ctx, http.MethodPut, path, migrate)
if err != nil {
return nil, err
}
resp, err := svc.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// UpdateMaintenance updates the maintenance window on a cluster
func (svc *DatabasesServiceOp) UpdateMaintenance(ctx context.Context, databaseID string, maintenance *DatabaseUpdateMaintenanceRequest) (*Response, error) {
path := fmt.Sprintf(databaseMaintenancePath, databaseID)
req, err := svc.client.NewRequest(ctx, http.MethodPut, path, maintenance)
if err != nil {
return nil, err
}
resp, err := svc.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// ListBackups returns a list of the current backups of a database
func (svc *DatabasesServiceOp) ListBackups(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseBackup, *Response, error) {
path := fmt.Sprintf(databaseBackupsPath, databaseID)
path, err := addOptions(path, opts)
if err != nil {
return nil, nil, err
}
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(databaseBackupsRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Backups, resp, nil
}
// GetUser returns the database user identified by userID
func (svc *DatabasesServiceOp) GetUser(ctx context.Context, databaseID, userID string) (*DatabaseUser, *Response, error) {
path := fmt.Sprintf(databaseUserPath, databaseID, userID)
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(databaseUserRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.User, resp, nil
}
// ListUsers returns all database users for the database
func (svc *DatabasesServiceOp) ListUsers(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseUser, *Response, error) {
path := fmt.Sprintf(databaseUsersPath, databaseID)
path, err := addOptions(path, opts)
if err != nil {
return nil, nil, err
}
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(databaseUsersRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Users, resp, nil
}
// CreateUser will create a new database user
func (svc *DatabasesServiceOp) CreateUser(ctx context.Context, databaseID string, createUser *DatabaseCreateUserRequest) (*DatabaseUser, *Response, error) {
path := fmt.Sprintf(databaseUsersPath, databaseID)
req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createUser)
if err != nil {
return nil, nil, err
}
root := new(databaseUserRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.User, resp, nil
}
func (svc *DatabasesServiceOp) ResetUserAuth(ctx context.Context, databaseID, userID string, resetAuth *DatabaseResetUserAuthRequest) (*DatabaseUser, *Response, error) {
path := fmt.Sprintf(databaseResetUserAuthPath, databaseID, userID)
req, err := svc.client.NewRequest(ctx, http.MethodPost, path, resetAuth)
if err != nil {
return nil, nil, err
}
root := new(databaseUserRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.User, resp, nil
}
// DeleteUser will delete an existing database user
func (svc *DatabasesServiceOp) DeleteUser(ctx context.Context, databaseID, userID string) (*Response, error) {
path := fmt.Sprintf(databaseUserPath, databaseID, userID)
req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := svc.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// ListDBs returns all databases for a given database cluster
func (svc *DatabasesServiceOp) ListDBs(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseDB, *Response, error) {
path := fmt.Sprintf(databaseDBsPath, databaseID)
path, err := addOptions(path, opts)
if err != nil {
return nil, nil, err
}
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(databaseDBsRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.DBs, resp, nil
}
// GetDB returns a single database by name
func (svc *DatabasesServiceOp) GetDB(ctx context.Context, databaseID, name string) (*DatabaseDB, *Response, error) {
path := fmt.Sprintf(databaseDBPath, databaseID, name)
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(databaseDBRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.DB, resp, nil
}
// CreateDB will create a new database
func (svc *DatabasesServiceOp) CreateDB(ctx context.Context, databaseID string, createDB *DatabaseCreateDBRequest) (*DatabaseDB, *Response, error) {
path := fmt.Sprintf(databaseDBsPath, databaseID)
req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createDB)
if err != nil {
return nil, nil, err
}
root := new(databaseDBRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.DB, resp, nil
}
// DeleteDB will delete an existing database
func (svc *DatabasesServiceOp) DeleteDB(ctx context.Context, databaseID, name string) (*Response, error) {
path := fmt.Sprintf(databaseDBPath, databaseID, name)
req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := svc.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// ListPools returns all connection pools for a given database cluster
func (svc *DatabasesServiceOp) ListPools(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabasePool, *Response, error) {
path := fmt.Sprintf(databasePoolsPath, databaseID)
path, err := addOptions(path, opts)
if err != nil {
return nil, nil, err
}
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(databasePoolsRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Pools, resp, nil
}
// GetPool returns a single database connection pool by name
func (svc *DatabasesServiceOp) GetPool(ctx context.Context, databaseID, name string) (*DatabasePool, *Response, error) {
path := fmt.Sprintf(databasePoolPath, databaseID, name)
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(databasePoolRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Pool, resp, nil
}
// CreatePool will create a new database connection pool
func (svc *DatabasesServiceOp) CreatePool(ctx context.Context, databaseID string, createPool *DatabaseCreatePoolRequest) (*DatabasePool, *Response, error) {
path := fmt.Sprintf(databasePoolsPath, databaseID)
req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createPool)
if err != nil {
return nil, nil, err
}
root := new(databasePoolRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Pool, resp, nil
}
// DeletePool will delete an existing database connection pool
func (svc *DatabasesServiceOp) DeletePool(ctx context.Context, databaseID, name string) (*Response, error) {
path := fmt.Sprintf(databasePoolPath, databaseID, name)
req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := svc.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// GetReplica returns a single database replica
func (svc *DatabasesServiceOp) GetReplica(ctx context.Context, databaseID, name string) (*DatabaseReplica, *Response, error) {
path := fmt.Sprintf(databaseReplicaPath, databaseID, name)
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(databaseReplicaRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Replica, resp, nil
}
// ListReplicas returns all read-only replicas for a given database cluster
func (svc *DatabasesServiceOp) ListReplicas(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseReplica, *Response, error) {
path := fmt.Sprintf(databaseReplicasPath, databaseID)
path, err := addOptions(path, opts)
if err != nil {
return nil, nil, err
}
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(databaseReplicasRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Replicas, resp, nil
}
// CreateReplica will create a new database connection pool
func (svc *DatabasesServiceOp) CreateReplica(ctx context.Context, databaseID string, createReplica *DatabaseCreateReplicaRequest) (*DatabaseReplica, *Response, error) {
path := fmt.Sprintf(databaseReplicasPath, databaseID)
req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createReplica)
if err != nil {
return nil, nil, err
}
root := new(databaseReplicaRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Replica, resp, nil
}
// DeleteReplica will delete an existing database replica
func (svc *DatabasesServiceOp) DeleteReplica(ctx context.Context, databaseID, name string) (*Response, error) {
path := fmt.Sprintf(databaseReplicaPath, databaseID, name)
req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := svc.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// GetEvictionPolicy loads the eviction policy for a given Redis cluster.
func (svc *DatabasesServiceOp) GetEvictionPolicy(ctx context.Context, databaseID string) (string, *Response, error) {
path := fmt.Sprintf(databaseEvictionPolicyPath, databaseID)
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return "", nil, err
}
root := new(evictionPolicyRoot)
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return "", resp, err
}
return root.EvictionPolicy, resp, nil
}
// SetEvictionPolicy updates the eviction policy for a given Redis cluster.
//
// The valid eviction policies are documented by the exported string constants
// with the prefix `EvictionPolicy`.
func (svc *DatabasesServiceOp) SetEvictionPolicy(ctx context.Context, databaseID, policy string) (*Response, error) {
path := fmt.Sprintf(databaseEvictionPolicyPath, databaseID)
root := &evictionPolicyRoot{EvictionPolicy: policy}
req, err := svc.client.NewRequest(ctx, http.MethodPut, path, root)
if err != nil {
return nil, err
}
resp, err := svc.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// GetSQLMode loads the SQL Mode settings for a given MySQL cluster.
func (svc *DatabasesServiceOp) GetSQLMode(ctx context.Context, databaseID string) (string, *Response, error) {
path := fmt.Sprintf(databaseSQLModePath, databaseID)
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return "", nil, err
}
root := &sqlModeRoot{}
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return "", resp, err
}
return root.SQLMode, resp, nil
}
// SetSQLMode updates the SQL Mode settings for a given MySQL cluster.
func (svc *DatabasesServiceOp) SetSQLMode(ctx context.Context, databaseID string, sqlModes ...string) (*Response, error) {
path := fmt.Sprintf(databaseSQLModePath, databaseID)
root := &sqlModeRoot{SQLMode: strings.Join(sqlModes, ",")}
req, err := svc.client.NewRequest(ctx, http.MethodPut, path, root)
if err != nil {
return nil, err
}
resp, err := svc.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// GetFirewallRules loads the inbound sources for a given cluster.
func (svc *DatabasesServiceOp) GetFirewallRules(ctx context.Context, databaseID string) ([]DatabaseFirewallRule, *Response, error) {
path := fmt.Sprintf(databaseFirewallRulesPath, databaseID)
root := new(databaseFirewallRuleRoot)
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
resp, err := svc.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Rules, resp, nil
}
// UpdateFirewallRules sets the inbound sources for a given cluster.
func (svc *DatabasesServiceOp) UpdateFirewallRules(ctx context.Context, databaseID string, firewallRulesReq *DatabaseUpdateFirewallRulesRequest) (*Response, error) {
path := fmt.Sprintf(databaseFirewallRulesPath, databaseID)
req, err := svc.client.NewRequest(ctx, http.MethodPut, path, firewallRulesReq)
if err != nil {
return nil, err
}
return svc.client.Do(ctx, req, nil)
}

2
vendor/github.com/digitalocean/godo/doc.go generated vendored Normal file
View file

@ -0,0 +1,2 @@
// Package godo is the DigtalOcean API v2 client for Go.
package godo

341
vendor/github.com/digitalocean/godo/domains.go generated vendored Normal file
View file

@ -0,0 +1,341 @@
package godo
import (
"context"
"fmt"
"net/http"
)
const domainsBasePath = "v2/domains"
// DomainsService is an interface for managing DNS with the DigitalOcean API.
// See: https://developers.digitalocean.com/documentation/v2#domains and
// https://developers.digitalocean.com/documentation/v2#domain-records
type DomainsService interface {
List(context.Context, *ListOptions) ([]Domain, *Response, error)
Get(context.Context, string) (*Domain, *Response, error)
Create(context.Context, *DomainCreateRequest) (*Domain, *Response, error)
Delete(context.Context, string) (*Response, error)
Records(context.Context, string, *ListOptions) ([]DomainRecord, *Response, error)
Record(context.Context, string, int) (*DomainRecord, *Response, error)
DeleteRecord(context.Context, string, int) (*Response, error)
EditRecord(context.Context, string, int, *DomainRecordEditRequest) (*DomainRecord, *Response, error)
CreateRecord(context.Context, string, *DomainRecordEditRequest) (*DomainRecord, *Response, error)
}
// DomainsServiceOp handles communication with the domain related methods of the
// DigitalOcean API.
type DomainsServiceOp struct {
client *Client
}
var _ DomainsService = &DomainsServiceOp{}
// Domain represents a DigitalOcean domain
type Domain struct {
Name string `json:"name"`
TTL int `json:"ttl"`
ZoneFile string `json:"zone_file"`
}
// domainRoot represents a response from the DigitalOcean API
type domainRoot struct {
Domain *Domain `json:"domain"`
}
type domainsRoot struct {
Domains []Domain `json:"domains"`
Links *Links `json:"links"`
Meta *Meta `json:"meta"`
}
// DomainCreateRequest respresents a request to create a domain.
type DomainCreateRequest struct {
Name string `json:"name"`
IPAddress string `json:"ip_address,omitempty"`
}
// DomainRecordRoot is the root of an individual Domain Record response
type domainRecordRoot struct {
DomainRecord *DomainRecord `json:"domain_record"`
}
// DomainRecordsRoot is the root of a group of Domain Record responses
type domainRecordsRoot struct {
DomainRecords []DomainRecord `json:"domain_records"`
Links *Links `json:"links"`
}
// DomainRecord represents a DigitalOcean DomainRecord
type DomainRecord struct {
ID int `json:"id,float64,omitempty"`
Type string `json:"type,omitempty"`
Name string `json:"name,omitempty"`
Data string `json:"data,omitempty"`
Priority int `json:"priority"`
Port int `json:"port,omitempty"`
TTL int `json:"ttl,omitempty"`
Weight int `json:"weight"`
Flags int `json:"flags"`
Tag string `json:"tag,omitempty"`
}
// DomainRecordEditRequest represents a request to update a domain record.
type DomainRecordEditRequest struct {
Type string `json:"type,omitempty"`
Name string `json:"name,omitempty"`
Data string `json:"data,omitempty"`
Priority int `json:"priority"`
Port int `json:"port,omitempty"`
TTL int `json:"ttl,omitempty"`
Weight int `json:"weight"`
Flags int `json:"flags"`
Tag string `json:"tag,omitempty"`
}
func (d Domain) String() string {
return Stringify(d)
}
func (d Domain) URN() string {
return ToURN("Domain", d.Name)
}
// List all domains.
func (s DomainsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Domain, *Response, error) {
path := domainsBasePath
path, err := addOptions(path, opt)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(domainsRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
if m := root.Meta; m != nil {
resp.Meta = m
}
return root.Domains, resp, err
}
// Get individual domain. It requires a non-empty domain name.
func (s *DomainsServiceOp) Get(ctx context.Context, name string) (*Domain, *Response, error) {
if len(name) < 1 {
return nil, nil, NewArgError("name", "cannot be an empty string")
}
path := fmt.Sprintf("%s/%s", domainsBasePath, name)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(domainRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Domain, resp, err
}
// Create a new domain
func (s *DomainsServiceOp) Create(ctx context.Context, createRequest *DomainCreateRequest) (*Domain, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := domainsBasePath
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(domainRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Domain, resp, err
}
// Delete domain
func (s *DomainsServiceOp) Delete(ctx context.Context, name string) (*Response, error) {
if len(name) < 1 {
return nil, NewArgError("name", "cannot be an empty string")
}
path := fmt.Sprintf("%s/%s", domainsBasePath, name)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}
// Converts a DomainRecord to a string.
func (d DomainRecord) String() string {
return Stringify(d)
}
// Converts a DomainRecordEditRequest to a string.
func (d DomainRecordEditRequest) String() string {
return Stringify(d)
}
// Records returns a slice of DomainRecords for a domain
func (s *DomainsServiceOp) Records(ctx context.Context, domain string, opt *ListOptions) ([]DomainRecord, *Response, error) {
if len(domain) < 1 {
return nil, nil, NewArgError("domain", "cannot be an empty string")
}
path := fmt.Sprintf("%s/%s/records", domainsBasePath, domain)
path, err := addOptions(path, opt)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(domainRecordsRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.DomainRecords, resp, err
}
// Record returns the record id from a domain
func (s *DomainsServiceOp) Record(ctx context.Context, domain string, id int) (*DomainRecord, *Response, error) {
if len(domain) < 1 {
return nil, nil, NewArgError("domain", "cannot be an empty string")
}
if id < 1 {
return nil, nil, NewArgError("id", "cannot be less than 1")
}
path := fmt.Sprintf("%s/%s/records/%d", domainsBasePath, domain, id)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
record := new(domainRecordRoot)
resp, err := s.client.Do(ctx, req, record)
if err != nil {
return nil, resp, err
}
return record.DomainRecord, resp, err
}
// DeleteRecord deletes a record from a domain identified by id
func (s *DomainsServiceOp) DeleteRecord(ctx context.Context, domain string, id int) (*Response, error) {
if len(domain) < 1 {
return nil, NewArgError("domain", "cannot be an empty string")
}
if id < 1 {
return nil, NewArgError("id", "cannot be less than 1")
}
path := fmt.Sprintf("%s/%s/records/%d", domainsBasePath, domain, id)
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}
// EditRecord edits a record using a DomainRecordEditRequest
func (s *DomainsServiceOp) EditRecord(ctx context.Context,
domain string,
id int,
editRequest *DomainRecordEditRequest,
) (*DomainRecord, *Response, error) {
if len(domain) < 1 {
return nil, nil, NewArgError("domain", "cannot be an empty string")
}
if id < 1 {
return nil, nil, NewArgError("id", "cannot be less than 1")
}
if editRequest == nil {
return nil, nil, NewArgError("editRequest", "cannot be nil")
}
path := fmt.Sprintf("%s/%s/records/%d", domainsBasePath, domain, id)
req, err := s.client.NewRequest(ctx, http.MethodPut, path, editRequest)
if err != nil {
return nil, nil, err
}
root := new(domainRecordRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.DomainRecord, resp, err
}
// CreateRecord creates a record using a DomainRecordEditRequest
func (s *DomainsServiceOp) CreateRecord(ctx context.Context,
domain string,
createRequest *DomainRecordEditRequest) (*DomainRecord, *Response, error) {
if len(domain) < 1 {
return nil, nil, NewArgError("domain", "cannot be empty string")
}
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := fmt.Sprintf("%s/%s/records", domainsBasePath, domain)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
d := new(domainRecordRoot)
resp, err := s.client.Do(ctx, req, d)
if err != nil {
return nil, resp, err
}
return d.DomainRecord, resp, err
}

329
vendor/github.com/digitalocean/godo/droplet_actions.go generated vendored Normal file
View file

@ -0,0 +1,329 @@
package godo
import (
"context"
"fmt"
"net/http"
"net/url"
)
// ActionRequest reprents DigitalOcean Action Request
type ActionRequest map[string]interface{}
// DropletActionsService is an interface for interfacing with the Droplet actions
// endpoints of the DigitalOcean API
// See: https://developers.digitalocean.com/documentation/v2#droplet-actions
type DropletActionsService interface {
Shutdown(context.Context, int) (*Action, *Response, error)
ShutdownByTag(context.Context, string) ([]Action, *Response, error)
PowerOff(context.Context, int) (*Action, *Response, error)
PowerOffByTag(context.Context, string) ([]Action, *Response, error)
PowerOn(context.Context, int) (*Action, *Response, error)
PowerOnByTag(context.Context, string) ([]Action, *Response, error)
PowerCycle(context.Context, int) (*Action, *Response, error)
PowerCycleByTag(context.Context, string) ([]Action, *Response, error)
Reboot(context.Context, int) (*Action, *Response, error)
Restore(context.Context, int, int) (*Action, *Response, error)
Resize(context.Context, int, string, bool) (*Action, *Response, error)
Rename(context.Context, int, string) (*Action, *Response, error)
Snapshot(context.Context, int, string) (*Action, *Response, error)
SnapshotByTag(context.Context, string, string) ([]Action, *Response, error)
EnableBackups(context.Context, int) (*Action, *Response, error)
EnableBackupsByTag(context.Context, string) ([]Action, *Response, error)
DisableBackups(context.Context, int) (*Action, *Response, error)
DisableBackupsByTag(context.Context, string) ([]Action, *Response, error)
PasswordReset(context.Context, int) (*Action, *Response, error)
RebuildByImageID(context.Context, int, int) (*Action, *Response, error)
RebuildByImageSlug(context.Context, int, string) (*Action, *Response, error)
ChangeKernel(context.Context, int, int) (*Action, *Response, error)
EnableIPv6(context.Context, int) (*Action, *Response, error)
EnableIPv6ByTag(context.Context, string) ([]Action, *Response, error)
EnablePrivateNetworking(context.Context, int) (*Action, *Response, error)
EnablePrivateNetworkingByTag(context.Context, string) ([]Action, *Response, error)
Get(context.Context, int, int) (*Action, *Response, error)
GetByURI(context.Context, string) (*Action, *Response, error)
}
// DropletActionsServiceOp handles communication with the Droplet action related
// methods of the DigitalOcean API.
type DropletActionsServiceOp struct {
client *Client
}
var _ DropletActionsService = &DropletActionsServiceOp{}
// Shutdown a Droplet
func (s *DropletActionsServiceOp) Shutdown(ctx context.Context, id int) (*Action, *Response, error) {
request := &ActionRequest{"type": "shutdown"}
return s.doAction(ctx, id, request)
}
// ShutdownByTag shuts down Droplets matched by a Tag.
func (s *DropletActionsServiceOp) ShutdownByTag(ctx context.Context, tag string) ([]Action, *Response, error) {
request := &ActionRequest{"type": "shutdown"}
return s.doActionByTag(ctx, tag, request)
}
// PowerOff a Droplet
func (s *DropletActionsServiceOp) PowerOff(ctx context.Context, id int) (*Action, *Response, error) {
request := &ActionRequest{"type": "power_off"}
return s.doAction(ctx, id, request)
}
// PowerOffByTag powers off Droplets matched by a Tag.
func (s *DropletActionsServiceOp) PowerOffByTag(ctx context.Context, tag string) ([]Action, *Response, error) {
request := &ActionRequest{"type": "power_off"}
return s.doActionByTag(ctx, tag, request)
}
// PowerOn a Droplet
func (s *DropletActionsServiceOp) PowerOn(ctx context.Context, id int) (*Action, *Response, error) {
request := &ActionRequest{"type": "power_on"}
return s.doAction(ctx, id, request)
}
// PowerOnByTag powers on Droplets matched by a Tag.
func (s *DropletActionsServiceOp) PowerOnByTag(ctx context.Context, tag string) ([]Action, *Response, error) {
request := &ActionRequest{"type": "power_on"}
return s.doActionByTag(ctx, tag, request)
}
// PowerCycle a Droplet
func (s *DropletActionsServiceOp) PowerCycle(ctx context.Context, id int) (*Action, *Response, error) {
request := &ActionRequest{"type": "power_cycle"}
return s.doAction(ctx, id, request)
}
// PowerCycleByTag power cycles Droplets matched by a Tag.
func (s *DropletActionsServiceOp) PowerCycleByTag(ctx context.Context, tag string) ([]Action, *Response, error) {
request := &ActionRequest{"type": "power_cycle"}
return s.doActionByTag(ctx, tag, request)
}
// Reboot a Droplet
func (s *DropletActionsServiceOp) Reboot(ctx context.Context, id int) (*Action, *Response, error) {
request := &ActionRequest{"type": "reboot"}
return s.doAction(ctx, id, request)
}
// Restore an image to a Droplet
func (s *DropletActionsServiceOp) Restore(ctx context.Context, id, imageID int) (*Action, *Response, error) {
requestType := "restore"
request := &ActionRequest{
"type": requestType,
"image": float64(imageID),
}
return s.doAction(ctx, id, request)
}
// Resize a Droplet
func (s *DropletActionsServiceOp) Resize(ctx context.Context, id int, sizeSlug string, resizeDisk bool) (*Action, *Response, error) {
requestType := "resize"
request := &ActionRequest{
"type": requestType,
"size": sizeSlug,
"disk": resizeDisk,
}
return s.doAction(ctx, id, request)
}
// Rename a Droplet
func (s *DropletActionsServiceOp) Rename(ctx context.Context, id int, name string) (*Action, *Response, error) {
requestType := "rename"
request := &ActionRequest{
"type": requestType,
"name": name,
}
return s.doAction(ctx, id, request)
}
// Snapshot a Droplet.
func (s *DropletActionsServiceOp) Snapshot(ctx context.Context, id int, name string) (*Action, *Response, error) {
requestType := "snapshot"
request := &ActionRequest{
"type": requestType,
"name": name,
}
return s.doAction(ctx, id, request)
}
// SnapshotByTag snapshots Droplets matched by a Tag.
func (s *DropletActionsServiceOp) SnapshotByTag(ctx context.Context, tag string, name string) ([]Action, *Response, error) {
requestType := "snapshot"
request := &ActionRequest{
"type": requestType,
"name": name,
}
return s.doActionByTag(ctx, tag, request)
}
// EnableBackups enables backups for a Droplet.
func (s *DropletActionsServiceOp) EnableBackups(ctx context.Context, id int) (*Action, *Response, error) {
request := &ActionRequest{"type": "enable_backups"}
return s.doAction(ctx, id, request)
}
// EnableBackupsByTag enables backups for Droplets matched by a Tag.
func (s *DropletActionsServiceOp) EnableBackupsByTag(ctx context.Context, tag string) ([]Action, *Response, error) {
request := &ActionRequest{"type": "enable_backups"}
return s.doActionByTag(ctx, tag, request)
}
// DisableBackups disables backups for a Droplet.
func (s *DropletActionsServiceOp) DisableBackups(ctx context.Context, id int) (*Action, *Response, error) {
request := &ActionRequest{"type": "disable_backups"}
return s.doAction(ctx, id, request)
}
// DisableBackupsByTag disables backups for Droplet matched by a Tag.
func (s *DropletActionsServiceOp) DisableBackupsByTag(ctx context.Context, tag string) ([]Action, *Response, error) {
request := &ActionRequest{"type": "disable_backups"}
return s.doActionByTag(ctx, tag, request)
}
// PasswordReset resets the password for a Droplet.
func (s *DropletActionsServiceOp) PasswordReset(ctx context.Context, id int) (*Action, *Response, error) {
request := &ActionRequest{"type": "password_reset"}
return s.doAction(ctx, id, request)
}
// RebuildByImageID rebuilds a Droplet from an image with a given id.
func (s *DropletActionsServiceOp) RebuildByImageID(ctx context.Context, id, imageID int) (*Action, *Response, error) {
request := &ActionRequest{"type": "rebuild", "image": imageID}
return s.doAction(ctx, id, request)
}
// RebuildByImageSlug rebuilds a Droplet from an Image matched by a given Slug.
func (s *DropletActionsServiceOp) RebuildByImageSlug(ctx context.Context, id int, slug string) (*Action, *Response, error) {
request := &ActionRequest{"type": "rebuild", "image": slug}
return s.doAction(ctx, id, request)
}
// ChangeKernel changes the kernel for a Droplet.
func (s *DropletActionsServiceOp) ChangeKernel(ctx context.Context, id, kernelID int) (*Action, *Response, error) {
request := &ActionRequest{"type": "change_kernel", "kernel": kernelID}
return s.doAction(ctx, id, request)
}
// EnableIPv6 enables IPv6 for a Droplet.
func (s *DropletActionsServiceOp) EnableIPv6(ctx context.Context, id int) (*Action, *Response, error) {
request := &ActionRequest{"type": "enable_ipv6"}
return s.doAction(ctx, id, request)
}
// EnableIPv6ByTag enables IPv6 for Droplets matched by a Tag.
func (s *DropletActionsServiceOp) EnableIPv6ByTag(ctx context.Context, tag string) ([]Action, *Response, error) {
request := &ActionRequest{"type": "enable_ipv6"}
return s.doActionByTag(ctx, tag, request)
}
// EnablePrivateNetworking enables private networking for a Droplet.
func (s *DropletActionsServiceOp) EnablePrivateNetworking(ctx context.Context, id int) (*Action, *Response, error) {
request := &ActionRequest{"type": "enable_private_networking"}
return s.doAction(ctx, id, request)
}
// EnablePrivateNetworkingByTag enables private networking for Droplets matched by a Tag.
func (s *DropletActionsServiceOp) EnablePrivateNetworkingByTag(ctx context.Context, tag string) ([]Action, *Response, error) {
request := &ActionRequest{"type": "enable_private_networking"}
return s.doActionByTag(ctx, tag, request)
}
func (s *DropletActionsServiceOp) doAction(ctx context.Context, id int, request *ActionRequest) (*Action, *Response, error) {
if id < 1 {
return nil, nil, NewArgError("id", "cannot be less than 1")
}
if request == nil {
return nil, nil, NewArgError("request", "request can't be nil")
}
path := dropletActionPath(id)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, request)
if err != nil {
return nil, nil, err
}
root := new(actionRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Event, resp, err
}
func (s *DropletActionsServiceOp) doActionByTag(ctx context.Context, tag string, request *ActionRequest) ([]Action, *Response, error) {
if tag == "" {
return nil, nil, NewArgError("tag", "cannot be empty")
}
if request == nil {
return nil, nil, NewArgError("request", "request can't be nil")
}
path := dropletActionPathByTag(tag)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, request)
if err != nil {
return nil, nil, err
}
root := new(actionsRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Actions, resp, err
}
// Get an action for a particular Droplet by id.
func (s *DropletActionsServiceOp) Get(ctx context.Context, dropletID, actionID int) (*Action, *Response, error) {
if dropletID < 1 {
return nil, nil, NewArgError("dropletID", "cannot be less than 1")
}
if actionID < 1 {
return nil, nil, NewArgError("actionID", "cannot be less than 1")
}
path := fmt.Sprintf("%s/%d", dropletActionPath(dropletID), actionID)
return s.get(ctx, path)
}
// GetByURI gets an action for a particular Droplet by id.
func (s *DropletActionsServiceOp) GetByURI(ctx context.Context, rawurl string) (*Action, *Response, error) {
u, err := url.Parse(rawurl)
if err != nil {
return nil, nil, err
}
return s.get(ctx, u.Path)
}
func (s *DropletActionsServiceOp) get(ctx context.Context, path string) (*Action, *Response, error) {
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(actionRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Event, resp, err
}
func dropletActionPath(dropletID int) string {
return fmt.Sprintf("v2/droplets/%d/actions", dropletID)
}
func dropletActionPathByTag(tag string) string {
return fmt.Sprintf("v2/droplets/actions?tag_name=%s", tag)
}

592
vendor/github.com/digitalocean/godo/droplets.go generated vendored Normal file
View file

@ -0,0 +1,592 @@
package godo
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
)
const dropletBasePath = "v2/droplets"
var errNoNetworks = errors.New("no networks have been defined")
// DropletsService is an interface for interfacing with the Droplet
// endpoints of the DigitalOcean API
// See: https://developers.digitalocean.com/documentation/v2#droplets
type DropletsService interface {
List(context.Context, *ListOptions) ([]Droplet, *Response, error)
ListByTag(context.Context, string, *ListOptions) ([]Droplet, *Response, error)
Get(context.Context, int) (*Droplet, *Response, error)
Create(context.Context, *DropletCreateRequest) (*Droplet, *Response, error)
CreateMultiple(context.Context, *DropletMultiCreateRequest) ([]Droplet, *Response, error)
Delete(context.Context, int) (*Response, error)
DeleteByTag(context.Context, string) (*Response, error)
Kernels(context.Context, int, *ListOptions) ([]Kernel, *Response, error)
Snapshots(context.Context, int, *ListOptions) ([]Image, *Response, error)
Backups(context.Context, int, *ListOptions) ([]Image, *Response, error)
Actions(context.Context, int, *ListOptions) ([]Action, *Response, error)
Neighbors(context.Context, int) ([]Droplet, *Response, error)
}
// DropletsServiceOp handles communication with the Droplet related methods of the
// DigitalOcean API.
type DropletsServiceOp struct {
client *Client
}
var _ DropletsService = &DropletsServiceOp{}
// Droplet represents a DigitalOcean Droplet
type Droplet struct {
ID int `json:"id,float64,omitempty"`
Name string `json:"name,omitempty"`
Memory int `json:"memory,omitempty"`
Vcpus int `json:"vcpus,omitempty"`
Disk int `json:"disk,omitempty"`
Region *Region `json:"region,omitempty"`
Image *Image `json:"image,omitempty"`
Size *Size `json:"size,omitempty"`
SizeSlug string `json:"size_slug,omitempty"`
BackupIDs []int `json:"backup_ids,omitempty"`
NextBackupWindow *BackupWindow `json:"next_backup_window,omitempty"`
SnapshotIDs []int `json:"snapshot_ids,omitempty"`
Features []string `json:"features,omitempty"`
Locked bool `json:"locked,bool,omitempty"`
Status string `json:"status,omitempty"`
Networks *Networks `json:"networks,omitempty"`
Created string `json:"created_at,omitempty"`
Kernel *Kernel `json:"kernel,omitempty"`
Tags []string `json:"tags,omitempty"`
VolumeIDs []string `json:"volume_ids"`
VPCUUID string `json:"vpc_uuid,omitempty"`
}
// PublicIPv4 returns the public IPv4 address for the Droplet.
func (d *Droplet) PublicIPv4() (string, error) {
if d.Networks == nil {
return "", errNoNetworks
}
for _, v4 := range d.Networks.V4 {
if v4.Type == "public" {
return v4.IPAddress, nil
}
}
return "", nil
}
// PrivateIPv4 returns the private IPv4 address for the Droplet.
func (d *Droplet) PrivateIPv4() (string, error) {
if d.Networks == nil {
return "", errNoNetworks
}
for _, v4 := range d.Networks.V4 {
if v4.Type == "private" {
return v4.IPAddress, nil
}
}
return "", nil
}
// PublicIPv6 returns the public IPv6 address for the Droplet.
func (d *Droplet) PublicIPv6() (string, error) {
if d.Networks == nil {
return "", errNoNetworks
}
for _, v6 := range d.Networks.V6 {
if v6.Type == "public" {
return v6.IPAddress, nil
}
}
return "", nil
}
// Kernel object
type Kernel struct {
ID int `json:"id,float64,omitempty"`
Name string `json:"name,omitempty"`
Version string `json:"version,omitempty"`
}
// BackupWindow object
type BackupWindow struct {
Start *Timestamp `json:"start,omitempty"`
End *Timestamp `json:"end,omitempty"`
}
// Convert Droplet to a string
func (d Droplet) String() string {
return Stringify(d)
}
func (d Droplet) URN() string {
return ToURN("Droplet", d.ID)
}
// DropletRoot represents a Droplet root
type dropletRoot struct {
Droplet *Droplet `json:"droplet"`
Links *Links `json:"links,omitempty"`
}
type dropletsRoot struct {
Droplets []Droplet `json:"droplets"`
Links *Links `json:"links"`
Meta *Meta `json:"meta"`
}
type kernelsRoot struct {
Kernels []Kernel `json:"kernels,omitempty"`
Links *Links `json:"links"`
Meta *Meta `json:"meta"`
}
type dropletSnapshotsRoot struct {
Snapshots []Image `json:"snapshots,omitempty"`
Links *Links `json:"links"`
Meta *Meta `json:"meta"`
}
type backupsRoot struct {
Backups []Image `json:"backups,omitempty"`
Links *Links `json:"links"`
Meta *Meta `json:"meta"`
}
// DropletCreateImage identifies an image for the create request. It prefers slug over ID.
type DropletCreateImage struct {
ID int
Slug string
}
// MarshalJSON returns either the slug or id of the image. It returns the id
// if the slug is empty.
func (d DropletCreateImage) MarshalJSON() ([]byte, error) {
if d.Slug != "" {
return json.Marshal(d.Slug)
}
return json.Marshal(d.ID)
}
// DropletCreateVolume identifies a volume to attach for the create request. It
// prefers Name over ID,
type DropletCreateVolume struct {
ID string
Name string
}
// MarshalJSON returns an object with either the name or id of the volume. It
// returns the id if the name is empty.
func (d DropletCreateVolume) MarshalJSON() ([]byte, error) {
if d.Name != "" {
return json.Marshal(struct {
Name string `json:"name"`
}{Name: d.Name})
}
return json.Marshal(struct {
ID string `json:"id"`
}{ID: d.ID})
}
// DropletCreateSSHKey identifies a SSH Key for the create request. It prefers fingerprint over ID.
type DropletCreateSSHKey struct {
ID int
Fingerprint string
}
// MarshalJSON returns either the fingerprint or id of the ssh key. It returns
// the id if the fingerprint is empty.
func (d DropletCreateSSHKey) MarshalJSON() ([]byte, error) {
if d.Fingerprint != "" {
return json.Marshal(d.Fingerprint)
}
return json.Marshal(d.ID)
}
// DropletCreateRequest represents a request to create a Droplet.
type DropletCreateRequest struct {
Name string `json:"name"`
Region string `json:"region"`
Size string `json:"size"`
Image DropletCreateImage `json:"image"`
SSHKeys []DropletCreateSSHKey `json:"ssh_keys"`
Backups bool `json:"backups"`
IPv6 bool `json:"ipv6"`
PrivateNetworking bool `json:"private_networking"`
Monitoring bool `json:"monitoring"`
UserData string `json:"user_data,omitempty"`
Volumes []DropletCreateVolume `json:"volumes,omitempty"`
Tags []string `json:"tags"`
VPCUUID string `json:"vpc_uuid,omitempty"`
}
// DropletMultiCreateRequest is a request to create multiple Droplets.
type DropletMultiCreateRequest struct {
Names []string `json:"names"`
Region string `json:"region"`
Size string `json:"size"`
Image DropletCreateImage `json:"image"`
SSHKeys []DropletCreateSSHKey `json:"ssh_keys"`
Backups bool `json:"backups"`
IPv6 bool `json:"ipv6"`
PrivateNetworking bool `json:"private_networking"`
Monitoring bool `json:"monitoring"`
UserData string `json:"user_data,omitempty"`
Tags []string `json:"tags"`
VPCUUID string `json:"vpc_uuid,omitempty"`
}
func (d DropletCreateRequest) String() string {
return Stringify(d)
}
func (d DropletMultiCreateRequest) String() string {
return Stringify(d)
}
// Networks represents the Droplet's Networks.
type Networks struct {
V4 []NetworkV4 `json:"v4,omitempty"`
V6 []NetworkV6 `json:"v6,omitempty"`
}
// NetworkV4 represents a DigitalOcean IPv4 Network.
type NetworkV4 struct {
IPAddress string `json:"ip_address,omitempty"`
Netmask string `json:"netmask,omitempty"`
Gateway string `json:"gateway,omitempty"`
Type string `json:"type,omitempty"`
}
func (n NetworkV4) String() string {
return Stringify(n)
}
// NetworkV6 represents a DigitalOcean IPv6 network.
type NetworkV6 struct {
IPAddress string `json:"ip_address,omitempty"`
Netmask int `json:"netmask,omitempty"`
Gateway string `json:"gateway,omitempty"`
Type string `json:"type,omitempty"`
}
func (n NetworkV6) String() string {
return Stringify(n)
}
// Performs a list request given a path.
func (s *DropletsServiceOp) list(ctx context.Context, path string) ([]Droplet, *Response, error) {
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(dropletsRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
if m := root.Meta; m != nil {
resp.Meta = m
}
return root.Droplets, resp, err
}
// List all Droplets.
func (s *DropletsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Droplet, *Response, error) {
path := dropletBasePath
path, err := addOptions(path, opt)
if err != nil {
return nil, nil, err
}
return s.list(ctx, path)
}
// ListByTag lists all Droplets matched by a Tag.
func (s *DropletsServiceOp) ListByTag(ctx context.Context, tag string, opt *ListOptions) ([]Droplet, *Response, error) {
path := fmt.Sprintf("%s?tag_name=%s", dropletBasePath, tag)
path, err := addOptions(path, opt)
if err != nil {
return nil, nil, err
}
return s.list(ctx, path)
}
// Get individual Droplet.
func (s *DropletsServiceOp) Get(ctx context.Context, dropletID int) (*Droplet, *Response, error) {
if dropletID < 1 {
return nil, nil, NewArgError("dropletID", "cannot be less than 1")
}
path := fmt.Sprintf("%s/%d", dropletBasePath, dropletID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(dropletRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Droplet, resp, err
}
// Create Droplet
func (s *DropletsServiceOp) Create(ctx context.Context, createRequest *DropletCreateRequest) (*Droplet, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := dropletBasePath
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(dropletRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.Droplet, resp, err
}
// CreateMultiple creates multiple Droplets.
func (s *DropletsServiceOp) CreateMultiple(ctx context.Context, createRequest *DropletMultiCreateRequest) ([]Droplet, *Response, error) {
if createRequest == nil {
return nil, nil, NewArgError("createRequest", "cannot be nil")
}
path := dropletBasePath
req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(dropletsRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.Droplets, resp, err
}
// Performs a delete request given a path
func (s *DropletsServiceOp) delete(ctx context.Context, path string) (*Response, error) {
req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
return resp, err
}
// Delete Droplet.
func (s *DropletsServiceOp) Delete(ctx context.Context, dropletID int) (*Response, error) {
if dropletID < 1 {
return nil, NewArgError("dropletID", "cannot be less than 1")
}
path := fmt.Sprintf("%s/%d", dropletBasePath, dropletID)
return s.delete(ctx, path)
}
// DeleteByTag deletes Droplets matched by a Tag.
func (s *DropletsServiceOp) DeleteByTag(ctx context.Context, tag string) (*Response, error) {
if tag == "" {
return nil, NewArgError("tag", "cannot be empty")
}
path := fmt.Sprintf("%s?tag_name=%s", dropletBasePath, tag)
return s.delete(ctx, path)
}
// Kernels lists kernels available for a Droplet.
func (s *DropletsServiceOp) Kernels(ctx context.Context, dropletID int, opt *ListOptions) ([]Kernel, *Response, error) {
if dropletID < 1 {
return nil, nil, NewArgError("dropletID", "cannot be less than 1")
}
path := fmt.Sprintf("%s/%d/kernels", dropletBasePath, dropletID)
path, err := addOptions(path, opt)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(kernelsRoot)
resp, err := s.client.Do(ctx, req, root)
if l := root.Links; l != nil {
resp.Links = l
}
if m := root.Meta; m != nil {
resp.Meta = m
}
return root.Kernels, resp, err
}
// Actions lists the actions for a Droplet.
func (s *DropletsServiceOp) Actions(ctx context.Context, dropletID int, opt *ListOptions) ([]Action, *Response, error) {
if dropletID < 1 {
return nil, nil, NewArgError("dropletID", "cannot be less than 1")
}
path := fmt.Sprintf("%s/%d/actions", dropletBasePath, dropletID)
path, err := addOptions(path, opt)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(actionsRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
if m := root.Meta; m != nil {
resp.Meta = m
}
return root.Actions, resp, err
}
// Backups lists the backups for a Droplet.
func (s *DropletsServiceOp) Backups(ctx context.Context, dropletID int, opt *ListOptions) ([]Image, *Response, error) {
if dropletID < 1 {
return nil, nil, NewArgError("dropletID", "cannot be less than 1")
}
path := fmt.Sprintf("%s/%d/backups", dropletBasePath, dropletID)
path, err := addOptions(path, opt)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(backupsRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
if m := root.Meta; m != nil {
resp.Meta = m
}
return root.Backups, resp, err
}
// Snapshots lists the snapshots available for a Droplet.
func (s *DropletsServiceOp) Snapshots(ctx context.Context, dropletID int, opt *ListOptions) ([]Image, *Response, error) {
if dropletID < 1 {
return nil, nil, NewArgError("dropletID", "cannot be less than 1")
}
path := fmt.Sprintf("%s/%d/snapshots", dropletBasePath, dropletID)
path, err := addOptions(path, opt)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(dropletSnapshotsRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
if m := root.Meta; m != nil {
resp.Meta = m
}
return root.Snapshots, resp, err
}
// Neighbors lists the neighbors for a Droplet.
func (s *DropletsServiceOp) Neighbors(ctx context.Context, dropletID int) ([]Droplet, *Response, error) {
if dropletID < 1 {
return nil, nil, NewArgError("dropletID", "cannot be less than 1")
}
path := fmt.Sprintf("%s/%d/neighbors", dropletBasePath, dropletID)
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(dropletsRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Droplets, resp, err
}
func (s *DropletsServiceOp) dropletActionStatus(ctx context.Context, uri string) (string, error) {
action, _, err := s.client.DropletActions.GetByURI(ctx, uri)
if err != nil {
return "", err
}
return action.Status, nil
}

24
vendor/github.com/digitalocean/godo/errors.go generated vendored Normal file
View file

@ -0,0 +1,24 @@
package godo
import "fmt"
// ArgError is an error that represents an error with an input to godo. It
// identifies the argument and the cause (if possible).
type ArgError struct {
arg string
reason string
}
var _ error = &ArgError{}
// NewArgError creates an InputError.
func NewArgError(arg, reason string) *ArgError {
return &ArgError{
arg: arg,
reason: reason,
}
}
func (e *ArgError) Error() string {
return fmt.Sprintf("%s is invalid because %s", e.arg, e.reason)
}

271
vendor/github.com/digitalocean/godo/firewalls.go generated vendored Normal file
View file

@ -0,0 +1,271 @@
package godo
import (
"context"
"net/http"
"path"
"strconv"
)
const firewallsBasePath = "/v2/firewalls"
// FirewallsService is an interface for managing Firewalls with the DigitalOcean API.
// See: https://developers.digitalocean.com/documentation/v2/#firewalls
type FirewallsService interface {
Get(context.Context, string) (*Firewall, *Response, error)
Create(context.Context, *FirewallRequest) (*Firewall, *Response, error)
Update(context.Context, string, *FirewallRequest) (*Firewall, *Response, error)
Delete(context.Context, string) (*Response, error)
List(context.Context, *ListOptions) ([]Firewall, *Response, error)
ListByDroplet(context.Context, int, *ListOptions) ([]Firewall, *Response, error)
AddDroplets(context.Context, string, ...int) (*Response, error)
RemoveDroplets(context.Context, string, ...int) (*Response, error)
AddTags(context.Context, string, ...string) (*Response, error)
RemoveTags(context.Context, string, ...string) (*Response, error)
AddRules(context.Context, string, *FirewallRulesRequest) (*Response, error)
RemoveRules(context.Context, string, *FirewallRulesRequest) (*Response, error)
}
// FirewallsServiceOp handles communication with Firewalls methods of the DigitalOcean API.
type FirewallsServiceOp struct {
client *Client
}
// Firewall represents a DigitalOcean Firewall configuration.
type Firewall struct {
ID string `json:"id"`
Name string `json:"name"`
Status string `json:"status"`
InboundRules []InboundRule `json:"inbound_rules"`
OutboundRules []OutboundRule `json:"outbound_rules"`
DropletIDs []int `json:"droplet_ids"`
Tags []string `json:"tags"`
Created string `json:"created_at"`
PendingChanges []PendingChange `json:"pending_changes"`
}
// String creates a human-readable description of a Firewall.
func (fw Firewall) String() string {
return Stringify(fw)
}
func (fw Firewall) URN() string {
return ToURN("Firewall", fw.ID)
}
// FirewallRequest represents the configuration to be applied to an existing or a new Firewall.
type FirewallRequest struct {
Name string `json:"name"`
InboundRules []InboundRule `json:"inbound_rules"`
OutboundRules []OutboundRule `json:"outbound_rules"`
DropletIDs []int `json:"droplet_ids"`
Tags []string `json:"tags"`
}
// FirewallRulesRequest represents rules configuration to be applied to an existing Firewall.
type FirewallRulesRequest struct {
InboundRules []InboundRule `json:"inbound_rules"`
OutboundRules []OutboundRule `json:"outbound_rules"`
}
// InboundRule represents a DigitalOcean Firewall inbound rule.
type InboundRule struct {
Protocol string `json:"protocol,omitempty"`
PortRange string `json:"ports,omitempty"`
Sources *Sources `json:"sources"`
}
// OutboundRule represents a DigitalOcean Firewall outbound rule.
type OutboundRule struct {
Protocol string `json:"protocol,omitempty"`
PortRange string `json:"ports,omitempty"`
Destinations *Destinations `json:"destinations"`
}
// Sources represents a DigitalOcean Firewall InboundRule sources.
type Sources struct {
Addresses []string `json:"addresses,omitempty"`
Tags []string `json:"tags,omitempty"`
DropletIDs []int `json:"droplet_ids,omitempty"`
LoadBalancerUIDs []string `json:"load_balancer_uids,omitempty"`
}
// PendingChange represents a DigitalOcean Firewall status details.
type PendingChange struct {
DropletID int `json:"droplet_id,omitempty"`
Removing bool `json:"removing,omitempty"`
Status string `json:"status,omitempty"`
}
// Destinations represents a DigitalOcean Firewall OutboundRule destinations.
type Destinations struct {
Addresses []string `json:"addresses,omitempty"`
Tags []string `json:"tags,omitempty"`
DropletIDs []int `json:"droplet_ids,omitempty"`
LoadBalancerUIDs []string `json:"load_balancer_uids,omitempty"`
}
var _ FirewallsService = &FirewallsServiceOp{}
// Get an existing Firewall by its identifier.
func (fw *FirewallsServiceOp) Get(ctx context.Context, fID string) (*Firewall, *Response, error) {
path := path.Join(firewallsBasePath, fID)
req, err := fw.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(firewallRoot)
resp, err := fw.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Firewall, resp, err
}
// Create a new Firewall with a given configuration.
func (fw *FirewallsServiceOp) Create(ctx context.Context, fr *FirewallRequest) (*Firewall, *Response, error) {
req, err := fw.client.NewRequest(ctx, http.MethodPost, firewallsBasePath, fr)
if err != nil {
return nil, nil, err
}
root := new(firewallRoot)
resp, err := fw.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Firewall, resp, err
}
// Update an existing Firewall with new configuration.
func (fw *FirewallsServiceOp) Update(ctx context.Context, fID string, fr *FirewallRequest) (*Firewall, *Response, error) {
path := path.Join(firewallsBasePath, fID)
req, err := fw.client.NewRequest(ctx, "PUT", path, fr)
if err != nil {
return nil, nil, err
}
root := new(firewallRoot)
resp, err := fw.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Firewall, resp, err
}
// Delete a Firewall by its identifier.
func (fw *FirewallsServiceOp) Delete(ctx context.Context, fID string) (*Response, error) {
path := path.Join(firewallsBasePath, fID)
return fw.createAndDoReq(ctx, http.MethodDelete, path, nil)
}
// List Firewalls.
func (fw *FirewallsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Firewall, *Response, error) {
path, err := addOptions(firewallsBasePath, opt)
if err != nil {
return nil, nil, err
}
return fw.listHelper(ctx, path)
}
// ListByDroplet Firewalls.
func (fw *FirewallsServiceOp) ListByDroplet(ctx context.Context, dID int, opt *ListOptions) ([]Firewall, *Response, error) {
basePath := path.Join(dropletBasePath, strconv.Itoa(dID), "firewalls")
path, err := addOptions(basePath, opt)
if err != nil {
return nil, nil, err
}
return fw.listHelper(ctx, path)
}
// AddDroplets to a Firewall.
func (fw *FirewallsServiceOp) AddDroplets(ctx context.Context, fID string, dropletIDs ...int) (*Response, error) {
path := path.Join(firewallsBasePath, fID, "droplets")
return fw.createAndDoReq(ctx, http.MethodPost, path, &dropletsRequest{IDs: dropletIDs})
}
// RemoveDroplets from a Firewall.
func (fw *FirewallsServiceOp) RemoveDroplets(ctx context.Context, fID string, dropletIDs ...int) (*Response, error) {
path := path.Join(firewallsBasePath, fID, "droplets")
return fw.createAndDoReq(ctx, http.MethodDelete, path, &dropletsRequest{IDs: dropletIDs})
}
// AddTags to a Firewall.
func (fw *FirewallsServiceOp) AddTags(ctx context.Context, fID string, tags ...string) (*Response, error) {
path := path.Join(firewallsBasePath, fID, "tags")
return fw.createAndDoReq(ctx, http.MethodPost, path, &tagsRequest{Tags: tags})
}
// RemoveTags from a Firewall.
func (fw *FirewallsServiceOp) RemoveTags(ctx context.Context, fID string, tags ...string) (*Response, error) {
path := path.Join(firewallsBasePath, fID, "tags")
return fw.createAndDoReq(ctx, http.MethodDelete, path, &tagsRequest{Tags: tags})
}
// AddRules to a Firewall.
func (fw *FirewallsServiceOp) AddRules(ctx context.Context, fID string, rr *FirewallRulesRequest) (*Response, error) {
path := path.Join(firewallsBasePath, fID, "rules")
return fw.createAndDoReq(ctx, http.MethodPost, path, rr)
}
// RemoveRules from a Firewall.
func (fw *FirewallsServiceOp) RemoveRules(ctx context.Context, fID string, rr *FirewallRulesRequest) (*Response, error) {
path := path.Join(firewallsBasePath, fID, "rules")
return fw.createAndDoReq(ctx, http.MethodDelete, path, rr)
}
type dropletsRequest struct {
IDs []int `json:"droplet_ids"`
}
type tagsRequest struct {
Tags []string `json:"tags"`
}
type firewallRoot struct {
Firewall *Firewall `json:"firewall"`
}
type firewallsRoot struct {
Firewalls []Firewall `json:"firewalls"`
Links *Links `json:"links"`
Meta *Meta `json:"meta"`
}
func (fw *FirewallsServiceOp) createAndDoReq(ctx context.Context, method, path string, v interface{}) (*Response, error) {
req, err := fw.client.NewRequest(ctx, method, path, v)
if err != nil {
return nil, err
}
return fw.client.Do(ctx, req, nil)
}
func (fw *FirewallsServiceOp) listHelper(ctx context.Context, path string) ([]Firewall, *Response, error) {
req, err := fw.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(firewallsRoot)
resp, err := fw.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
if m := root.Meta; m != nil {
resp.Meta = m
}
return root.Firewalls, resp, err
}

143
vendor/github.com/digitalocean/godo/floating_ips.go generated vendored Normal file
View file

@ -0,0 +1,143 @@
package godo
import (
"context"
"fmt"
"net/http"
)
const floatingBasePath = "v2/floating_ips"
// FloatingIPsService is an interface for interfacing with the floating IPs
// endpoints of the Digital Ocean API.
// See: https://developers.digitalocean.com/documentation/v2#floating-ips
type FloatingIPsService interface {
List(context.Context, *ListOptions) ([]FloatingIP, *Response, error)
Get(context.Context, string) (*FloatingIP, *Response, error)
Create(context.Context, *FloatingIPCreateRequest) (*FloatingIP, *Response, error)
Delete(context.Context, string) (*Response, error)
}
// FloatingIPsServiceOp handles communication with the floating IPs related methods of the
// DigitalOcean API.
type FloatingIPsServiceOp struct {
client *Client
}
var _ FloatingIPsService = &FloatingIPsServiceOp{}
// FloatingIP represents a Digital Ocean floating IP.
type FloatingIP struct {
Region *Region `json:"region"`
Droplet *Droplet `json:"droplet"`
IP string `json:"ip"`
}
func (f FloatingIP) String() string {
return Stringify(f)
}
func (f FloatingIP) URN() string {
return ToURN("FloatingIP", f.IP)
}
type floatingIPsRoot struct {
FloatingIPs []FloatingIP `json:"floating_ips"`
Links *Links `json:"links"`
Meta *Meta `json:"meta"`
}
type floatingIPRoot struct {
FloatingIP *FloatingIP `json:"floating_ip"`
Links *Links `json:"links,omitempty"`
}
// FloatingIPCreateRequest represents a request to create a floating IP.
// If DropletID is not empty, the floating IP will be assigned to the
// droplet.
type FloatingIPCreateRequest struct {
Region string `json:"region"`
DropletID int `json:"droplet_id,omitempty"`
}
// List all floating IPs.
func (f *FloatingIPsServiceOp) List(ctx context.Context, opt *ListOptions) ([]FloatingIP, *Response, error) {
path := floatingBasePath
path, err := addOptions(path, opt)
if err != nil {
return nil, nil, err
}
req, err := f.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(floatingIPsRoot)
resp, err := f.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
if m := root.Meta; m != nil {
resp.Meta = m
}
return root.FloatingIPs, resp, err
}
// Get an individual floating IP.
func (f *FloatingIPsServiceOp) Get(ctx context.Context, ip string) (*FloatingIP, *Response, error) {
path := fmt.Sprintf("%s/%s", floatingBasePath, ip)
req, err := f.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(floatingIPRoot)
resp, err := f.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.FloatingIP, resp, err
}
// Create a floating IP. If the DropletID field of the request is not empty,
// the floating IP will also be assigned to the droplet.
func (f *FloatingIPsServiceOp) Create(ctx context.Context, createRequest *FloatingIPCreateRequest) (*FloatingIP, *Response, error) {
path := floatingBasePath
req, err := f.client.NewRequest(ctx, http.MethodPost, path, createRequest)
if err != nil {
return nil, nil, err
}
root := new(floatingIPRoot)
resp, err := f.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.FloatingIP, resp, err
}
// Delete a floating IP.
func (f *FloatingIPsServiceOp) Delete(ctx context.Context, ip string) (*Response, error) {
path := fmt.Sprintf("%s/%s", floatingBasePath, ip)
req, err := f.client.NewRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return nil, err
}
resp, err := f.client.Do(ctx, req, nil)
return resp, err
}

View file

@ -0,0 +1,109 @@
package godo
import (
"context"
"fmt"
"net/http"
)
// FloatingIPActionsService is an interface for interfacing with the
// floating IPs actions endpoints of the Digital Ocean API.
// See: https://developers.digitalocean.com/documentation/v2#floating-ips-action
type FloatingIPActionsService interface {
Assign(ctx context.Context, ip string, dropletID int) (*Action, *Response, error)
Unassign(ctx context.Context, ip string) (*Action, *Response, error)
Get(ctx context.Context, ip string, actionID int) (*Action, *Response, error)
List(ctx context.Context, ip string, opt *ListOptions) ([]Action, *Response, error)
}
// FloatingIPActionsServiceOp handles communication with the floating IPs
// action related methods of the DigitalOcean API.
type FloatingIPActionsServiceOp struct {
client *Client
}
// Assign a floating IP to a droplet.
func (s *FloatingIPActionsServiceOp) Assign(ctx context.Context, ip string, dropletID int) (*Action, *Response, error) {
request := &ActionRequest{
"type": "assign",
"droplet_id": dropletID,
}
return s.doAction(ctx, ip, request)
}
// Unassign a floating IP from the droplet it is currently assigned to.
func (s *FloatingIPActionsServiceOp) Unassign(ctx context.Context, ip string) (*Action, *Response, error) {
request := &ActionRequest{"type": "unassign"}
return s.doAction(ctx, ip, request)
}
// Get an action for a particular floating IP by id.
func (s *FloatingIPActionsServiceOp) Get(ctx context.Context, ip string, actionID int) (*Action, *Response, error) {
path := fmt.Sprintf("%s/%d", floatingIPActionPath(ip), actionID)
return s.get(ctx, path)
}
// List the actions for a particular floating IP.
func (s *FloatingIPActionsServiceOp) List(ctx context.Context, ip string, opt *ListOptions) ([]Action, *Response, error) {
path := floatingIPActionPath(ip)
path, err := addOptions(path, opt)
if err != nil {
return nil, nil, err
}
return s.list(ctx, path)
}
func (s *FloatingIPActionsServiceOp) doAction(ctx context.Context, ip string, request *ActionRequest) (*Action, *Response, error) {
path := floatingIPActionPath(ip)
req, err := s.client.NewRequest(ctx, http.MethodPost, path, request)
if err != nil {
return nil, nil, err
}
root := new(actionRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Event, resp, err
}
func (s *FloatingIPActionsServiceOp) get(ctx context.Context, path string) (*Action, *Response, error) {
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(actionRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
return root.Event, resp, err
}
func (s *FloatingIPActionsServiceOp) list(ctx context.Context, path string) ([]Action, *Response, error) {
req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, nil, err
}
root := new(actionsRoot)
resp, err := s.client.Do(ctx, req, root)
if err != nil {
return nil, resp, err
}
if l := root.Links; l != nil {
resp.Links = l
}
return root.Actions, resp, err
}
func floatingIPActionPath(ip string) string {
return fmt.Sprintf("%s/%s/actions", floatingBasePath, ip)
}

16
vendor/github.com/digitalocean/godo/go.mod generated vendored Normal file
View file

@ -0,0 +1,16 @@
module github.com/digitalocean/godo
go 1.14
require (
github.com/golang/protobuf v1.3.5 // indirect
github.com/google/go-querystring v1.0.0
github.com/stretchr/testify v1.4.0
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
google.golang.org/appengine v1.6.5 // indirect
)
replace github.com/stretchr/objx => github.com/stretchr/objx v0.2.0
replace golang.org/x/crypto => golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a

42
vendor/github.com/digitalocean/godo/go.sum generated vendored Normal file
View file

@ -0,0 +1,42 @@
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

Some files were not shown because too many files have changed in this diff Show more