From 4f8673aa883438f3283627be12b6b781c0436f60 Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Sun, 19 Apr 2015 08:39:33 +0200 Subject: [PATCH 01/27] Simplify update sync for targets, format config fixtures. --- config/fixtures/invalid_label_name.conf.input | 8 +- .../fixtures/invalid_proto_format.conf.input | 8 +- .../invalid_scrape_interval.conf.input | 8 +- config/fixtures/minimal.conf.input | 18 ++-- config/fixtures/sample.conf.input | 54 ++++++------ retrieval/target.go | 82 +++++++------------ retrieval/targetpool_test.go | 11 +-- 7 files changed, 79 insertions(+), 110 deletions(-) diff --git a/config/fixtures/invalid_label_name.conf.input b/config/fixtures/invalid_label_name.conf.input index bfc384bd0..f85538649 100644 --- a/config/fixtures/invalid_label_name.conf.input +++ b/config/fixtures/invalid_label_name.conf.input @@ -1,10 +1,10 @@ global < - scrape_interval: "30s" - evaluation_interval: "30s" - labels: < + scrape_interval: "30s" + evaluation_interval: "30s" + labels: < label: < name: "monitor-test" value: "test" > - > + > > diff --git a/config/fixtures/invalid_proto_format.conf.input b/config/fixtures/invalid_proto_format.conf.input index 9d15ddec1..ba311005b 100644 --- a/config/fixtures/invalid_proto_format.conf.input +++ b/config/fixtures/invalid_proto_format.conf.input @@ -1,11 +1,11 @@ global < - scrape_interval: "30s" - evaluation_interval: "30s" + scrape_interval: "30s" + evaluation_interval: "30s" unknown_field: "foo" - labels: < + labels: < label: < name: "monitor" value: "test" > - > + > > diff --git a/config/fixtures/invalid_scrape_interval.conf.input b/config/fixtures/invalid_scrape_interval.conf.input index e9274948a..537d50996 100644 --- a/config/fixtures/invalid_scrape_interval.conf.input +++ b/config/fixtures/invalid_scrape_interval.conf.input @@ -1,10 +1,10 @@ global < - scrape_interval: "30" - evaluation_interval: "30s" - labels: < + scrape_interval: "30" + evaluation_interval: "30s" + labels: < label: < name: "monitor" value: "test" > - > + > > diff --git a/config/fixtures/minimal.conf.input b/config/fixtures/minimal.conf.input index 135c316a7..c08f51009 100644 --- a/config/fixtures/minimal.conf.input +++ b/config/fixtures/minimal.conf.input @@ -1,20 +1,20 @@ global < - scrape_interval: "30s" - evaluation_interval: "30s" - labels: < + scrape_interval: "30s" + evaluation_interval: "30s" + labels: < label: < name: "monitor" value: "test" > - > - rule_file: "prometheus.rules" + > + rule_file: "prometheus.rules" > job: < - name: "prometheus" - scrape_interval: "15s" + name: "prometheus" + scrape_interval: "15s" - target_group: < - target: "http://localhost:9090/metrics.json" + target_group: < + target: "http://localhost:9090/metrics.json" > > diff --git a/config/fixtures/sample.conf.input b/config/fixtures/sample.conf.input index 5c03d674d..1a4ec17bf 100644 --- a/config/fixtures/sample.conf.input +++ b/config/fixtures/sample.conf.input @@ -1,55 +1,55 @@ global < - scrape_interval: "30s" - evaluation_interval: "30s" - labels: < + scrape_interval: "30s" + evaluation_interval: "30s" + labels: < label: < name: "monitor" value: "test" > - > - rule_file: "prometheus.rules" + > + rule_file: "prometheus.rules" > job: < - name: "prometheus" - scrape_interval: "15s" + name: "prometheus" + scrape_interval: "15s" - target_group: < - target: "http://localhost:9090/metrics.json" - labels: < + target_group: < + target: "http://localhost:9090/metrics.json" + labels: < label: < name: "group" value: "canary" > - > - > + > + > > job: < - name: "random" - scrape_interval: "30s" + name: "random" + scrape_interval: "30s" - target_group: < - target: "http://random.com:8080/metrics.json" + target_group: < + target: "http://random.com:8080/metrics.json" target: "http://random.com:8081/metrics.json" - target: "http://random.com:8082/metrics.json" - target: "http://random.com:8083/metrics.json" - target: "http://random.com:8084/metrics.json" - labels: < + target: "http://random.com:8082/metrics.json" + target: "http://random.com:8083/metrics.json" + target: "http://random.com:8084/metrics.json" + labels: < label: < name: "group" value: "production" > - > - > - target_group: < - target: "http://random.com:8085/metrics.json" + > + > + target_group: < + target: "http://random.com:8085/metrics.json" target: "http://random.com:8086/metrics.json" - labels: < + labels: < label: < name: "group" value: "canary" > - > - > + > + > > diff --git a/retrieval/target.go b/retrieval/target.go index 6cdfedf07..dba7f0b8e 100644 --- a/retrieval/target.go +++ b/retrieval/target.go @@ -109,6 +109,8 @@ const ( // // Target implements extraction.Ingester. type Target interface { + extraction.Ingester + // Return the last encountered scrape error, if any. LastError() error // Return the health of the target. @@ -129,18 +131,13 @@ type Target interface { // Return the target's base labels without job and instance label. That's // useful for display purposes. BaseLabelsWithoutJobAndInstance() clientmodel.LabelSet - // SetBaseLabelsFrom queues a replacement of the current base labels by - // the labels of the given target. The method returns immediately after - // queuing. The actual replacement of the base labels happens - // asynchronously (but most likely before the next scrape for the target - // begins). + // SetBaseLabelsFrom sets the target's base labels to the base labels + // of the provided target. SetBaseLabelsFrom(Target) // Scrape target at the specified interval. RunScraper(storage.SampleAppender, time.Duration) // Stop scraping, synchronous. StopScraper() - // Ingest implements extraction.Ingester. - Ingest(clientmodel.Samples) error } // target is a Target that refers to a singular HTTP or HTTPS endpoint. @@ -155,8 +152,6 @@ type target struct { scraperStopping chan struct{} // Closing scraperStopped signals that scraping has been stopped. scraperStopped chan struct{} - // Channel to queue base labels to be replaced. - newBaseLabels chan clientmodel.LabelSet // Channel to buffer ingested samples. ingestedSamples chan clientmodel.Samples @@ -168,12 +163,8 @@ type target struct { // The HTTP client used to scrape the target's endpoint. httpClient *http.Client - // Mutex protects lastError, lastScrape, state, and baseLabels. Writing - // the above must only happen in the goroutine running the RunScraper - // loop, and it must happen under the lock. In that way, no mutex lock - // is required for reading the above in the goroutine running the - // RunScraper loop, but only for reading in other goroutines. - sync.Mutex + // Mutex protects lastError, lastScrape, state, and baseLabels. + sync.RWMutex } // NewTarget creates a reasonably configured target for querying. @@ -184,7 +175,6 @@ func NewTarget(url string, deadline time.Duration, baseLabels clientmodel.LabelS httpClient: utility.NewDeadlineClient(deadline), scraperStopping: make(chan struct{}), scraperStopped: make(chan struct{}), - newBaseLabels: make(chan clientmodel.LabelSet, 1), } t.baseLabels = clientmodel.LabelSet{InstanceLabel: clientmodel.LabelValue(t.InstanceIdentifier())} for baseLabel, baseValue := range baseLabels { @@ -213,18 +203,7 @@ func (t *target) Ingest(s clientmodel.Samples) error { // RunScraper implements Target. func (t *target) RunScraper(sampleAppender storage.SampleAppender, interval time.Duration) { - defer func() { - // Need to drain t.newBaseLabels to not make senders block during shutdown. - for { - select { - case <-t.newBaseLabels: - // Do nothing. - default: - close(t.scraperStopped) - return - } - } - }() + defer close(t.scraperStopped) jitterTimer := time.NewTimer(time.Duration(float64(interval) * rand.Float64())) select { @@ -245,31 +224,22 @@ func (t *target) RunScraper(sampleAppender storage.SampleAppender, interval time // Explanation of the contraption below: // - // In case t.newBaseLabels or t.scraperStopping have something to receive, - // we want to read from those channels rather than starting a new scrape - // (which might take very long). That's why the outer select has no - // ticker.C. Should neither t.newBaseLabels nor t.scraperStopping have - // anything to receive, we go into the inner select, where ticker.C is - // in the mix. + // In case t.scraperStopping has something to receive, we want to read + // from that channel rather than starting a new scrape (which might take very + // long). That's why the outer select has no ticker.C. Should t.scraperStopping + // not have anything to receive, we go into the inner select, where ticker.C + // is in the mix. for { select { - case newBaseLabels := <-t.newBaseLabels: - t.Lock() // Writing t.baseLabels requires the lock. - t.baseLabels = newBaseLabels - t.Unlock() case <-t.scraperStopping: return default: select { - case newBaseLabels := <-t.newBaseLabels: - t.Lock() // Writing t.baseLabels requires the lock. - t.baseLabels = newBaseLabels - t.Unlock() case <-t.scraperStopping: return case <-ticker.C: - took := time.Since(t.lastScrape) t.Lock() // Write t.lastScrape requires locking. + took := time.Since(t.lastScrape) t.lastScrape = time.Now() t.Unlock() targetIntervalLength.WithLabelValues(interval.String()).Observe( @@ -290,8 +260,13 @@ func (t *target) StopScraper() { const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,application/json;schema="prometheus/telemetry";version=0.0.2;q=0.2,*/*;q=0.1` func (t *target) scrape(sampleAppender storage.SampleAppender) (err error) { + t.RLock() timestamp := clientmodel.Now() + defer func(start time.Time) { + t.recordScrapeHealth(sampleAppender, timestamp, err == nil, time.Since(start)) + t.RUnlock() + t.Lock() // Writing t.state and t.lastError requires the lock. if err == nil { t.state = Healthy @@ -300,7 +275,6 @@ func (t *target) scrape(sampleAppender storage.SampleAppender) (err error) { } t.lastError = err t.Unlock() - t.recordScrapeHealth(sampleAppender, timestamp, err == nil, time.Since(start)) }(time.Now()) req, err := http.NewRequest("GET", t.URL(), nil) @@ -344,22 +318,22 @@ func (t *target) scrape(sampleAppender storage.SampleAppender) (err error) { // LastError implements Target. func (t *target) LastError() error { - t.Lock() - defer t.Unlock() + t.RLock() + defer t.RUnlock() return t.lastError } // State implements Target. func (t *target) State() TargetState { - t.Lock() - defer t.Unlock() + t.RLock() + defer t.RUnlock() return t.state } // LastScrape implements Target. func (t *target) LastScrape() time.Time { - t.Lock() - defer t.Unlock() + t.RLock() + defer t.RUnlock() return t.lastScrape } @@ -406,8 +380,8 @@ func (t *target) GlobalURL() string { // BaseLabels implements Target. func (t *target) BaseLabels() clientmodel.LabelSet { - t.Lock() - defer t.Unlock() + t.RLock() + defer t.RUnlock() return t.baseLabels } @@ -427,7 +401,9 @@ func (t *target) SetBaseLabelsFrom(newTarget Target) { if t.URL() != newTarget.URL() { panic("targets don't refer to the same endpoint") } - t.newBaseLabels <- newTarget.BaseLabels() + t.Lock() + defer t.Unlock() + t.baseLabels = newTarget.BaseLabels() } func (t *target) recordScrapeHealth(sampleAppender storage.SampleAppender, timestamp clientmodel.Timestamp, healthy bool, scrapeDuration time.Duration) { diff --git a/retrieval/targetpool_test.go b/retrieval/targetpool_test.go index 2c9d3738e..56e9ea09b 100644 --- a/retrieval/targetpool_test.go +++ b/retrieval/targetpool_test.go @@ -17,8 +17,6 @@ import ( "net/http" "testing" "time" - - clientmodel "github.com/prometheus/client_golang/model" ) func testTargetPool(t testing.TB) { @@ -84,9 +82,8 @@ func testTargetPool(t testing.TB) { for _, input := range scenario.inputs { target := target{ - url: input.url, - newBaseLabels: make(chan clientmodel.LabelSet, 1), - httpClient: &http.Client{}, + url: input.url, + httpClient: &http.Client{}, } pool.addTarget(&target) } @@ -118,7 +115,6 @@ func TestTargetPoolReplaceTargets(t *testing.T) { state: Unhealthy, scraperStopping: make(chan struct{}), scraperStopped: make(chan struct{}), - newBaseLabels: make(chan clientmodel.LabelSet, 1), httpClient: &http.Client{}, } oldTarget2 := &target{ @@ -126,7 +122,6 @@ func TestTargetPoolReplaceTargets(t *testing.T) { state: Unhealthy, scraperStopping: make(chan struct{}), scraperStopped: make(chan struct{}), - newBaseLabels: make(chan clientmodel.LabelSet, 1), httpClient: &http.Client{}, } newTarget1 := &target{ @@ -134,7 +129,6 @@ func TestTargetPoolReplaceTargets(t *testing.T) { state: Healthy, scraperStopping: make(chan struct{}), scraperStopped: make(chan struct{}), - newBaseLabels: make(chan clientmodel.LabelSet, 1), httpClient: &http.Client{}, } newTarget2 := &target{ @@ -142,7 +136,6 @@ func TestTargetPoolReplaceTargets(t *testing.T) { state: Healthy, scraperStopping: make(chan struct{}), scraperStopped: make(chan struct{}), - newBaseLabels: make(chan clientmodel.LabelSet, 1), httpClient: &http.Client{}, } From 5015c2a0e8f4f63cd949f1aa2bb4eba0b382189f Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Mon, 20 Apr 2015 12:24:25 +0200 Subject: [PATCH 02/27] Make target manager source based. This commit shifts responsibility for maintaining targets from providers and pools to the target manager. Target groups have a source name that identifies them for updates. --- .../client_golang/model/labelname.go | 11 + config/config.go | 42 +- config/config.proto | 4 +- config/fixtures/minimal.conf.input | 4 +- .../mixing_sd_and_manual_targets.conf.input | 2 +- config/fixtures/sample.conf.input | 18 +- config/generated/config.pb.go | 14 +- main.go | 12 +- .../{target_provider.go => discovery/dns.go} | 152 +++--- retrieval/helpers_test.go | 24 + retrieval/interface_test.go | 25 - retrieval/target.go | 163 ++++--- retrieval/target_test.go | 114 ++--- retrieval/targetmanager.go | 445 ++++++++++++++---- retrieval/targetmanager_test.go | 326 +++++++++---- retrieval/targetpool.go | 164 ------- retrieval/targetpool_test.go | 164 ------- web/status.go | 2 +- web/templates/status.html | 4 +- 19 files changed, 938 insertions(+), 752 deletions(-) rename retrieval/{target_provider.go => discovery/dns.go} (57%) delete mode 100644 retrieval/interface_test.go delete mode 100644 retrieval/targetpool.go delete mode 100644 retrieval/targetpool_test.go diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go index 75b2e79da..cebc14de3 100644 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go @@ -26,14 +26,25 @@ const ( // timeseries. MetricNameLabel LabelName = "__name__" + // AddressLabel is the name of the label that holds the address of + // a scrape target. + AddressLabel LabelName = "__address__" + // ReservedLabelPrefix is a prefix which is not legal in user-supplied // label names. ReservedLabelPrefix = "__" + // MetaLabelPrefix is a prefix for labels that provide meta information. + // Labels with the prefix will not be attached to time series. + MetaLabelPrefix = "__meta_" + // JobLabel is the label name indicating the job from which a timeseries // was scraped. JobLabel LabelName = "job" + // InstanceLabel is the label name used for the instance label. + InstanceLabel LabelName = "instance" + // BucketLabel is used for the label that defines the upper bound of a // bucket of a histogram ("le" -> "less or equal"). BucketLabel = "le" diff --git a/config/config.go b/config/config.go index c844f3a0c..1e582e36d 100644 --- a/config/config.go +++ b/config/config.go @@ -16,6 +16,7 @@ package config import ( "fmt" "regexp" + "strings" "time" "github.com/golang/protobuf/proto" @@ -55,6 +56,21 @@ func (c Config) validateLabels(labels *pb.LabelPairs) error { return nil } +// validateHosts validates whether a target group contains valid hosts. +func (c Config) validateHosts(hosts []string) error { + if hosts == nil { + return nil + } + for _, host := range hosts { + // Make sure that this does not contain any paths or schemes. + // This ensures that old configurations error. + if strings.Contains(host, "/") { + return fmt.Errorf("invalid host '%s', no schemes or paths allowed", host) + } + } + return nil +} + // Validate checks an entire parsed Config for the validity of its fields. func (c Config) Validate() error { // Check the global configuration section for validity. @@ -93,6 +109,9 @@ func (c Config) Validate() error { if err := c.validateLabels(targetGroup.Labels); err != nil { return fmt.Errorf("invalid labels for job '%s': %s", job.GetName(), err) } + if err := c.validateHosts(targetGroup.Target); err != nil { + return fmt.Errorf("invalid targets for job '%s': %s", job.GetName(), err) + } } if job.SdName != nil && len(job.TargetGroup) > 0 { return fmt.Errorf("specified both DNS-SD name and target group for job: %s", job.GetName()) @@ -115,7 +134,7 @@ func (c Config) GetJobByName(name string) *JobConfig { // GlobalLabels returns the global labels as a LabelSet. func (c Config) GlobalLabels() clientmodel.LabelSet { labels := clientmodel.LabelSet{} - if c.Global.Labels != nil { + if c.Global != nil && c.Global.Labels != nil { for _, label := range c.Global.Labels.Label { labels[clientmodel.LabelName(label.GetName())] = clientmodel.LabelValue(label.GetValue()) } @@ -156,6 +175,11 @@ type JobConfig struct { pb.JobConfig } +// SDRefreshInterval gets the the SD refresh interval for a job. +func (c JobConfig) SDRefreshInterval() time.Duration { + return stringToDuration(c.GetSdRefreshInterval()) +} + // ScrapeInterval gets the scrape interval for a job. func (c JobConfig) ScrapeInterval() time.Duration { return stringToDuration(c.GetScrapeInterval()) @@ -165,3 +189,19 @@ func (c JobConfig) ScrapeInterval() time.Duration { func (c JobConfig) ScrapeTimeout() time.Duration { return stringToDuration(c.GetScrapeTimeout()) } + +// TargetGroup is derived from a protobuf TargetGroup and attaches a source to it +// that identifies the origin of the group. +type TargetGroup struct { + // Source is an identifier that describes a group of targets. + Source string + // Labels is a set of labels that is common across all targets in the group. + Labels clientmodel.LabelSet + // Targets is a list of targets identified by a label set. Each target is + // uniquely identifiable in the group by its address label. + Targets []clientmodel.LabelSet +} + +func (tg *TargetGroup) String() string { + return tg.Source +} diff --git a/config/config.proto b/config/config.proto index edfce2a7a..d2f741491 100644 --- a/config/config.proto +++ b/config/config.proto @@ -71,8 +71,10 @@ message JobConfig { // List of labeled target groups for this job. Only legal when DNS-SD isn't // used for a job. repeated TargetGroup target_group = 5; - // The HTTP resource path to fetch metrics from on targets. + // The HTTP resource path on which to fetch metrics from targets. optional string metrics_path = 6 [default = "/metrics"]; + // The URL scheme with which to fetch metrics from targets. + optional string scheme = 8 [default = "http"]; } // The top-level Prometheus configuration. diff --git a/config/fixtures/minimal.conf.input b/config/fixtures/minimal.conf.input index c08f51009..637d95e50 100644 --- a/config/fixtures/minimal.conf.input +++ b/config/fixtures/minimal.conf.input @@ -13,8 +13,10 @@ global < job: < name: "prometheus" scrape_interval: "15s" + metrics_path: "/metrics" + scheme: "http" target_group: < - target: "http://localhost:9090/metrics.json" + target: "localhost:9090" > > diff --git a/config/fixtures/mixing_sd_and_manual_targets.conf.input b/config/fixtures/mixing_sd_and_manual_targets.conf.input index 919beb0c5..0d564234e 100644 --- a/config/fixtures/mixing_sd_and_manual_targets.conf.input +++ b/config/fixtures/mixing_sd_and_manual_targets.conf.input @@ -2,6 +2,6 @@ job: < name: "testjob" sd_name: "sd_name" target_group: < - target: "http://sampletarget:8080/metrics.json" + target: "sampletarget:8080" > > diff --git a/config/fixtures/sample.conf.input b/config/fixtures/sample.conf.input index 1a4ec17bf..8ea3a069d 100644 --- a/config/fixtures/sample.conf.input +++ b/config/fixtures/sample.conf.input @@ -15,7 +15,7 @@ job: < scrape_interval: "15s" target_group: < - target: "http://localhost:9090/metrics.json" + target: "localhost:9090" labels: < label: < name: "group" @@ -30,11 +30,12 @@ job: < scrape_interval: "30s" target_group: < - target: "http://random.com:8080/metrics.json" - target: "http://random.com:8081/metrics.json" - target: "http://random.com:8082/metrics.json" - target: "http://random.com:8083/metrics.json" - target: "http://random.com:8084/metrics.json" + target: "random.com:8080" + target: "random.com:8081" + target: "random.com:8082" + target: "random.com:8083" + target: "random.com:8084" + labels: < label: < name: "group" @@ -43,8 +44,9 @@ job: < > > target_group: < - target: "http://random.com:8085/metrics.json" - target: "http://random.com:8086/metrics.json" + target: "random.com:8085" + target: "random.com:8086" + labels: < label: < name: "group" diff --git a/config/generated/config.pb.go b/config/generated/config.pb.go index adc7dd272..089f9053b 100644 --- a/config/generated/config.pb.go +++ b/config/generated/config.pb.go @@ -169,8 +169,10 @@ type JobConfig struct { // List of labeled target groups for this job. Only legal when DNS-SD isn't // used for a job. TargetGroup []*TargetGroup `protobuf:"bytes,5,rep,name=target_group" json:"target_group,omitempty"` - // The HTTP resource path to fetch metrics from on targets. - MetricsPath *string `protobuf:"bytes,6,opt,name=metrics_path,def=/metrics" json:"metrics_path,omitempty"` + // The HTTP resource path on which to fetch metrics from targets. + MetricsPath *string `protobuf:"bytes,6,opt,name=metrics_path,def=/metrics" json:"metrics_path,omitempty"` + // The URL scheme with which to fetch metrics from targets. + Scheme *string `protobuf:"bytes,8,opt,name=scheme,def=http" json:"scheme,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -181,6 +183,7 @@ func (*JobConfig) ProtoMessage() {} const Default_JobConfig_ScrapeTimeout string = "10s" const Default_JobConfig_SdRefreshInterval string = "30s" const Default_JobConfig_MetricsPath string = "/metrics" +const Default_JobConfig_Scheme string = "http" func (m *JobConfig) GetName() string { if m != nil && m.Name != nil { @@ -231,6 +234,13 @@ func (m *JobConfig) GetMetricsPath() string { return Default_JobConfig_MetricsPath } +func (m *JobConfig) GetScheme() string { + if m != nil && m.Scheme != nil { + return *m.Scheme + } + return Default_JobConfig_Scheme +} + // The top-level Prometheus configuration. type PrometheusConfig struct { // Global Prometheus configuration options. If omitted, an empty global diff --git a/main.go b/main.go index 1deabd339..83cceb2ec 100644 --- a/main.go +++ b/main.go @@ -77,7 +77,7 @@ var ( type prometheus struct { ruleManager manager.RuleManager - targetManager retrieval.TargetManager + targetManager *retrieval.TargetManager notificationHandler *notification.NotificationHandler storage local.Storage remoteStorageQueues []*remote.StorageQueueManager @@ -152,8 +152,11 @@ func NewPrometheus() *prometheus { sampleAppender = fanout } - targetManager := retrieval.NewTargetManager(sampleAppender, conf.GlobalLabels()) - targetManager.AddTargetsFromConfig(conf) + targetManager, err := retrieval.NewTargetManager(conf, sampleAppender) + if err != nil { + glog.Errorf("Error creating target manager: %s", err) + os.Exit(1) + } ruleManager := manager.NewRuleManager(&manager.RuleManagerOptions{ SampleAppender: sampleAppender, @@ -176,7 +179,7 @@ func NewPrometheus() *prometheus { BuildInfo: BuildInfo, Config: conf.String(), RuleManager: ruleManager, - TargetPools: targetManager.Pools(), + TargetPools: targetManager.Pools, Flags: flags, Birth: time.Now(), PathPrefix: *pathPrefix, @@ -231,6 +234,7 @@ func (p *prometheus) Serve() { } go p.ruleManager.Run() go p.notificationHandler.Run() + go p.targetManager.Run() p.storage.Start() diff --git a/retrieval/target_provider.go b/retrieval/discovery/dns.go similarity index 57% rename from retrieval/target_provider.go rename to retrieval/discovery/dns.go index 209b7c5ce..6b71dc0d1 100644 --- a/retrieval/target_provider.go +++ b/retrieval/discovery/dns.go @@ -1,4 +1,4 @@ -// Copyright 2013 The Prometheus Authors +// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,13 +11,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -package retrieval +package discovery import ( "fmt" "net" - "net/url" "strings" + "sync" "time" "github.com/golang/glog" @@ -25,12 +25,18 @@ import ( "github.com/prometheus/client_golang/prometheus" clientmodel "github.com/prometheus/client_golang/model" - "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/utility" ) -const resolvConf = "/etc/resolv.conf" +const ( + resolvConf = "/etc/resolv.conf" + + dnsSourcePrefix = "dns" + + // Constants for instrumentation. + namespace = "prometheus" + interval = "interval" +) var ( dnsSDLookupsCount = prometheus.NewCounter( @@ -52,65 +58,70 @@ func init() { prometheus.MustRegister(dnsSDLookupsCount) } -// TargetProvider encapsulates retrieving all targets for a job. -type TargetProvider interface { - // Retrieves the current list of targets for this provider. - Targets() ([]Target, error) +// DNSDiscovery periodically performs DNS-SD requests. It implements +// the TargetProvider interface. +type DNSDiscovery struct { + name string + + done chan struct{} + ticker *time.Ticker + m sync.RWMutex } -type sdTargetProvider struct { - job config.JobConfig - globalLabels clientmodel.LabelSet - targets []Target - - lastRefresh time.Time - refreshInterval time.Duration -} - -// NewSdTargetProvider constructs a new sdTargetProvider for a job. -func NewSdTargetProvider(job config.JobConfig, globalLabels clientmodel.LabelSet) *sdTargetProvider { - i, err := utility.StringToDuration(job.GetSdRefreshInterval()) - if err != nil { - panic(fmt.Sprintf("illegal refresh duration string %s: %s", job.GetSdRefreshInterval(), err)) - } - return &sdTargetProvider{ - job: job, - globalLabels: globalLabels, - refreshInterval: i, +// NewDNSDiscovery returns a new DNSDiscovery which periodically refreshes its targets. +func NewDNSDiscovery(name string, refreshInterval time.Duration) *DNSDiscovery { + return &DNSDiscovery{ + name: name, + done: make(chan struct{}), + ticker: time.NewTicker(refreshInterval), } } -func (p *sdTargetProvider) Targets() ([]Target, error) { - var err error - defer func() { - dnsSDLookupsCount.Inc() - if err != nil { - dnsSDLookupFailuresCount.Inc() +// Run implements the TargetProvider interface. +func (dd *DNSDiscovery) Run(ch chan<- *config.TargetGroup) { + defer close(ch) + + // Get an initial set right away. + if err := dd.refresh(ch); err != nil { + glog.Errorf("Error refreshing DNS targets: %s", err) + } + + for { + select { + case <-dd.ticker.C: + if err := dd.refresh(ch); err != nil { + glog.Errorf("Error refreshing DNS targets: %s", err) + } + case <-dd.done: + return } - }() - - if time.Since(p.lastRefresh) < p.refreshInterval { - return p.targets, nil } +} - response, err := lookupSRV(p.job.GetSdName()) +// Stop implements the TargetProvider interface. +func (dd *DNSDiscovery) Stop() { + glog.V(1).Info("Stopping DNS discovery for %s...", dd.name) + dd.ticker.Stop() + dd.done <- struct{}{} + + glog.V(1).Info("DNS discovery for %s stopped.", dd.name) +} + +// Sources implements the TargetProvider interface. +func (dd *DNSDiscovery) Sources() []string { + return []string{dnsSourcePrefix + ":" + dd.name} +} + +func (dd *DNSDiscovery) refresh(ch chan<- *config.TargetGroup) error { + response, err := lookupSRV(dd.name) + dnsSDLookupsCount.Inc() if err != nil { - return nil, err + dnsSDLookupFailuresCount.Inc() + return err } - baseLabels := clientmodel.LabelSet{ - clientmodel.JobLabel: clientmodel.LabelValue(p.job.GetName()), - } - for n, v := range p.globalLabels { - baseLabels[n] = v - } - - targets := make([]Target, 0, len(response.Answer)) - endpoint := &url.URL{ - Scheme: "http", - Path: p.job.GetMetricsPath(), - } + tg := &config.TargetGroup{} for _, record := range response.Answer { addr, ok := record.(*dns.SRV) if !ok { @@ -118,22 +129,24 @@ func (p *sdTargetProvider) Targets() ([]Target, error) { continue } // Remove the final dot from rooted DNS names to make them look more usual. - if addr.Target[len(addr.Target)-1] == '.' { - addr.Target = addr.Target[:len(addr.Target)-1] - } - endpoint.Host = fmt.Sprintf("%s:%d", addr.Target, addr.Port) - t := NewTarget(endpoint.String(), p.job.ScrapeTimeout(), baseLabels) - targets = append(targets, t) + addr.Target = strings.TrimRight(addr.Target, ".") + + target := clientmodel.LabelValue(fmt.Sprintf("%s:%d", addr.Target, addr.Port)) + tg.Targets = append(tg.Targets, clientmodel.LabelSet{ + clientmodel.AddressLabel: target, + }) } - p.targets = targets - return targets, nil + tg.Source = dnsSourcePrefix + ":" + dd.name + ch <- tg + + return nil } func lookupSRV(name string) (*dns.Msg, error) { conf, err := dns.ClientConfigFromFile(resolvConf) if err != nil { - return nil, fmt.Errorf("couldn't load resolv.conf: %s", err) + return nil, fmt.Errorf("could not load resolv.conf: %s", err) } client := &dns.Client{} @@ -143,12 +156,12 @@ func lookupSRV(name string) (*dns.Msg, error) { servAddr := net.JoinHostPort(server, conf.Port) for _, suffix := range conf.Search { response, err = lookup(name, dns.TypeSRV, client, servAddr, suffix, false) - if err == nil { - if len(response.Answer) > 0 { - return response, nil - } - } else { + if err != nil { glog.Warningf("resolving %s.%s failed: %s", name, suffix, err) + continue + } + if len(response.Answer) > 0 { + return response, nil } } response, err = lookup(name, dns.TypeSRV, client, servAddr, "", false) @@ -156,7 +169,7 @@ func lookupSRV(name string) (*dns.Msg, error) { return response, nil } } - return response, fmt.Errorf("couldn't resolve %s: No server responded", name) + return response, fmt.Errorf("could not resolve %s: No server responded", name) } func lookup(name string, queryType uint16, client *dns.Client, servAddr string, suffix string, edns bool) (*dns.Msg, error) { @@ -179,7 +192,6 @@ func lookup(name string, queryType uint16, client *dns.Client, servAddr string, if err != nil { return nil, err } - if msg.Id != response.Id { return nil, fmt.Errorf("DNS ID mismatch, request: %d, response: %d", msg.Id, response.Id) } @@ -188,11 +200,9 @@ func lookup(name string, queryType uint16, client *dns.Client, servAddr string, if client.Net == "tcp" { return nil, fmt.Errorf("got truncated message on tcp") } - if edns { // Truncated even though EDNS is used client.Net = "tcp" } - return lookup(name, queryType, client, servAddr, suffix, !edns) } diff --git a/retrieval/helpers_test.go b/retrieval/helpers_test.go index ce96aa2e8..f477bbd89 100644 --- a/retrieval/helpers_test.go +++ b/retrieval/helpers_test.go @@ -17,6 +17,8 @@ import ( "time" clientmodel "github.com/prometheus/client_golang/model" + + "github.com/prometheus/prometheus/config" ) type nopAppender struct{} @@ -38,3 +40,25 @@ type collectResultAppender struct { func (a *collectResultAppender) Append(s *clientmodel.Sample) { a.result = append(a.result, s) } + +// fakeTargetProvider implements a TargetProvider and allows manual injection +// of TargetGroups through the update channel. +type fakeTargetProvider struct { + sources []string + update chan *config.TargetGroup +} + +func (tp *fakeTargetProvider) Run(ch chan<- *config.TargetGroup) { + defer close(ch) + for tg := range tp.update { + ch <- tg + } +} + +func (tp *fakeTargetProvider) Stop() { + close(tp.update) +} + +func (tp *fakeTargetProvider) Sources() []string { + return tp.sources +} diff --git a/retrieval/interface_test.go b/retrieval/interface_test.go deleted file mode 100644 index 5377e8235..000000000 --- a/retrieval/interface_test.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package retrieval - -import ( - "testing" -) - -func TestInterfaces(t *testing.T) { - var ( - _ Target = &target{} - _ TargetManager = &targetManager{} - ) -} diff --git a/retrieval/target.go b/retrieval/target.go index dba7f0b8e..a87a5ddb4 100644 --- a/retrieval/target.go +++ b/retrieval/target.go @@ -30,13 +30,12 @@ import ( clientmodel "github.com/prometheus/client_golang/model" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/utility" ) const ( - // InstanceLabel is the label value used for the instance label. - InstanceLabel clientmodel.LabelName = "instance" // ScrapeHealthMetricName is the metric name for the synthetic health // variable. scrapeHealthMetricName clientmodel.LabelValue = "up" @@ -54,7 +53,7 @@ const ( var ( errIngestChannelFull = errors.New("ingestion channel full") - localhostRepresentations = []string{"http://127.0.0.1", "http://localhost"} + localhostRepresentations = []string{"127.0.0.1", "localhost"} targetIntervalLength = prometheus.NewSummaryVec( prometheus.SummaryOpts{ @@ -131,23 +130,16 @@ type Target interface { // Return the target's base labels without job and instance label. That's // useful for display purposes. BaseLabelsWithoutJobAndInstance() clientmodel.LabelSet - // SetBaseLabelsFrom sets the target's base labels to the base labels - // of the provided target. - SetBaseLabelsFrom(Target) - // Scrape target at the specified interval. - RunScraper(storage.SampleAppender, time.Duration) + // Start scraping the target in regular intervals. + RunScraper(storage.SampleAppender) // Stop scraping, synchronous. StopScraper() + // Update the target's state. + Update(config.JobConfig, clientmodel.LabelSet) } // target is a Target that refers to a singular HTTP or HTTPS endpoint. type target struct { - // The current health state of the target. - state TargetState - // The last encountered scrape error, if any. - lastError error - // The last time a scrape was attempted. - lastScrape time.Time // Closing scraperStopping signals that scraping should stop. scraperStopping chan struct{} // Closing scraperStopped signals that scraping has been stopped. @@ -155,34 +147,67 @@ type target struct { // Channel to buffer ingested samples. ingestedSamples chan clientmodel.Samples - url string - // What is the deadline for the HTTP or HTTPS against this endpoint. - deadline time.Duration - // Any base labels that are added to this target and its metrics. - baseLabels clientmodel.LabelSet // The HTTP client used to scrape the target's endpoint. httpClient *http.Client - // Mutex protects lastError, lastScrape, state, and baseLabels. + // Mutex protects the members below. sync.RWMutex + + url *url.URL + // Any base labels that are added to this target and its metrics. + baseLabels clientmodel.LabelSet + // The current health state of the target. + state TargetState + // The last encountered scrape error, if any. + lastError error + // The last time a scrape was attempted. + lastScrape time.Time + // What is the deadline for the HTTP or HTTPS against this endpoint. + deadline time.Duration + // The time between two scrapes. + scrapeInterval time.Duration } // NewTarget creates a reasonably configured target for querying. -func NewTarget(url string, deadline time.Duration, baseLabels clientmodel.LabelSet) Target { +func NewTarget(address string, cfg config.JobConfig, baseLabels clientmodel.LabelSet) Target { t := &target{ - url: url, - deadline: deadline, - httpClient: utility.NewDeadlineClient(deadline), + url: &url.URL{ + Host: address, + }, scraperStopping: make(chan struct{}), scraperStopped: make(chan struct{}), } - t.baseLabels = clientmodel.LabelSet{InstanceLabel: clientmodel.LabelValue(t.InstanceIdentifier())} - for baseLabel, baseValue := range baseLabels { - t.baseLabels[baseLabel] = baseValue - } + t.Update(cfg, baseLabels) return t } +// Update overwrites settings in the target that are derived from the job config +// it belongs to. +func (t *target) Update(cfg config.JobConfig, baseLabels clientmodel.LabelSet) { + t.Lock() + defer t.Unlock() + + t.url.Scheme = cfg.GetScheme() + t.url.Path = cfg.GetMetricsPath() + + t.scrapeInterval = cfg.ScrapeInterval() + t.deadline = cfg.ScrapeTimeout() + t.httpClient = utility.NewDeadlineClient(cfg.ScrapeTimeout()) + + t.baseLabels = clientmodel.LabelSet{ + clientmodel.InstanceLabel: clientmodel.LabelValue(t.InstanceIdentifier()), + } + for name, val := range baseLabels { + t.baseLabels[name] = val + } +} + +func (t *target) String() string { + t.RLock() + defer t.RUnlock() + return t.url.Host +} + // Ingest implements Target and extraction.Ingester. func (t *target) Ingest(s clientmodel.Samples) error { // Since the regular case is that ingestedSamples is ready to receive, @@ -202,10 +227,16 @@ func (t *target) Ingest(s clientmodel.Samples) error { } // RunScraper implements Target. -func (t *target) RunScraper(sampleAppender storage.SampleAppender, interval time.Duration) { +func (t *target) RunScraper(sampleAppender storage.SampleAppender) { defer close(t.scraperStopped) - jitterTimer := time.NewTimer(time.Duration(float64(interval) * rand.Float64())) + t.RLock() + lastScrapeInterval := t.scrapeInterval + t.RUnlock() + + glog.V(1).Infof("Starting scraper for target %v...", t) + + jitterTimer := time.NewTimer(time.Duration(float64(lastScrapeInterval) * rand.Float64())) select { case <-jitterTimer.C: case <-t.scraperStopping: @@ -214,7 +245,7 @@ func (t *target) RunScraper(sampleAppender storage.SampleAppender, interval time } jitterTimer.Stop() - ticker := time.NewTicker(interval) + ticker := time.NewTicker(lastScrapeInterval) defer ticker.Stop() t.Lock() // Writing t.lastScrape requires the lock. @@ -238,11 +269,21 @@ func (t *target) RunScraper(sampleAppender storage.SampleAppender, interval time case <-t.scraperStopping: return case <-ticker.C: - t.Lock() // Write t.lastScrape requires locking. + t.Lock() took := time.Since(t.lastScrape) t.lastScrape = time.Now() + + intervalStr := lastScrapeInterval.String() + + // On changed scrape interval the new interval becomes effective + // after the next scrape. + if lastScrapeInterval != t.scrapeInterval { + ticker = time.NewTicker(t.scrapeInterval) + lastScrapeInterval = t.scrapeInterval + } t.Unlock() - targetIntervalLength.WithLabelValues(interval.String()).Observe( + + targetIntervalLength.WithLabelValues(intervalStr).Observe( float64(took) / float64(time.Second), // Sub-second precision. ) t.scrape(sampleAppender) @@ -253,8 +294,12 @@ func (t *target) RunScraper(sampleAppender storage.SampleAppender, interval time // StopScraper implements Target. func (t *target) StopScraper() { + glog.V(1).Infof("Stopping scraper for target %v...", t) + close(t.scraperStopping) <-t.scraperStopped + + glog.V(1).Infof("Scraper for target %v stopped.", t) } const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,application/json;schema="prometheus/telemetry";version=0.0.2;q=0.2,*/*;q=0.1` @@ -277,7 +322,7 @@ func (t *target) scrape(sampleAppender storage.SampleAppender) (err error) { t.Unlock() }(time.Now()) - req, err := http.NewRequest("GET", t.URL(), nil) + req, err := http.NewRequest("GET", t.url.String(), nil) if err != nil { panic(err) } @@ -339,41 +384,43 @@ func (t *target) LastScrape() time.Time { // URL implements Target. func (t *target) URL() string { - return t.url + t.RLock() + defer t.RUnlock() + return t.url.String() } // InstanceIdentifier implements Target. func (t *target) InstanceIdentifier() string { - u, err := url.Parse(t.url) - if err != nil { - glog.Warningf("Could not parse instance URL when generating identifier, using raw URL: %s", err) - return t.url - } // If we are given a port in the host port, use that. - if strings.Contains(u.Host, ":") { - return u.Host - } - // Otherwise, deduce port based on protocol. - if u.Scheme == "http" { - return fmt.Sprintf("%s:80", u.Host) - } else if u.Scheme == "https" { - return fmt.Sprintf("%s:443", u.Host) + if strings.Contains(t.url.Host, ":") { + return t.url.Host } - glog.Warningf("Unknown scheme %s when generating identifier, using raw URL.", u.Scheme) - return t.url + t.RLock() + defer t.RUnlock() + + // Otherwise, deduce port based on protocol. + if t.url.Scheme == "http" { + return fmt.Sprintf("%s:80", t.url.Host) + } else if t.url.Scheme == "https" { + return fmt.Sprintf("%s:443", t.url.Host) + } + + glog.Warningf("Unknown scheme %s when generating identifier, using host without port number.", t.url.Scheme) + return t.url.Host } // GlobalURL implements Target. func (t *target) GlobalURL() string { - url := t.url + url := t.URL() + hostname, err := os.Hostname() if err != nil { glog.Warningf("Couldn't get hostname: %s, returning target.URL()", err) return url } for _, localhostRepresentation := range localhostRepresentations { - url = strings.Replace(url, localhostRepresentation, fmt.Sprintf("http://%s", hostname), -1) + url = strings.Replace(url, "//"+localhostRepresentation, "//"+hostname, 1) } return url } @@ -389,23 +436,13 @@ func (t *target) BaseLabels() clientmodel.LabelSet { func (t *target) BaseLabelsWithoutJobAndInstance() clientmodel.LabelSet { ls := clientmodel.LabelSet{} for ln, lv := range t.BaseLabels() { - if ln != clientmodel.JobLabel && ln != InstanceLabel { + if ln != clientmodel.JobLabel && ln != clientmodel.InstanceLabel { ls[ln] = lv } } return ls } -// SetBaseLabelsFrom implements Target. -func (t *target) SetBaseLabelsFrom(newTarget Target) { - if t.URL() != newTarget.URL() { - panic("targets don't refer to the same endpoint") - } - t.Lock() - defer t.Unlock() - t.baseLabels = newTarget.BaseLabels() -} - func (t *target) recordScrapeHealth(sampleAppender storage.SampleAppender, timestamp clientmodel.Timestamp, healthy bool, scrapeDuration time.Duration) { healthMetric := clientmodel.Metric{} durationMetric := clientmodel.Metric{} diff --git a/retrieval/target_test.go b/retrieval/target_test.go index 427f63036..6ab80050b 100644 --- a/retrieval/target_test.go +++ b/retrieval/target_test.go @@ -18,56 +18,46 @@ import ( "fmt" "net/http" "net/http/httptest" + "net/url" "reflect" + "strings" "testing" "time" clientmodel "github.com/prometheus/client_golang/model" + "github.com/golang/protobuf/proto" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/utility" ) +func TestTargetInterface(t *testing.T) { + var _ Target = &target{} +} + func TestBaseLabels(t *testing.T) { - target := NewTarget("http://example.com/metrics", 0, clientmodel.LabelSet{"job": "some_job", "foo": "bar"}) - want := clientmodel.LabelSet{"job": "some_job", "foo": "bar", "instance": "example.com:80"} + target := newTestTarget("example.com", 0, clientmodel.LabelSet{"job": "some_job", "foo": "bar"}) + want := clientmodel.LabelSet{ + clientmodel.JobLabel: "some_job", + clientmodel.InstanceLabel: "example.com:80", + "foo": "bar", + } got := target.BaseLabels() if !reflect.DeepEqual(want, got) { t.Errorf("want base labels %v, got %v", want, got) } - delete(want, "job") - delete(want, "instance") + delete(want, clientmodel.JobLabel) + delete(want, clientmodel.InstanceLabel) + got = target.BaseLabelsWithoutJobAndInstance() if !reflect.DeepEqual(want, got) { t.Errorf("want base labels %v, got %v", want, got) } } -func TestTargetHidesURLAuth(t *testing.T) { - testVectors := []string{"http://secret:data@host.com/query?args#fragment", "https://example.net/foo", "http://foo.com:31337/bar"} - testResults := []string{"host.com:80", "example.net:443", "foo.com:31337"} - if len(testVectors) != len(testResults) { - t.Errorf("Test vector length does not match test result length.") - } - - for i := 0; i < len(testVectors); i++ { - testTarget := target{ - state: Unknown, - url: testVectors[i], - httpClient: utility.NewDeadlineClient(0), - } - u := testTarget.InstanceIdentifier() - if u != testResults[i] { - t.Errorf("Expected InstanceIdentifier to be %v, actual %v", testResults[i], u) - } - } -} - func TestTargetScrapeUpdatesState(t *testing.T) { - testTarget := target{ - state: Unknown, - url: "bad schema", - httpClient: utility.NewDeadlineClient(0), - } + testTarget := newTestTarget("bad schema", 0, nil) + testTarget.scrape(nopAppender{}) if testTarget.state != Unhealthy { t.Errorf("Expected target state %v, actual: %v", Unhealthy, testTarget.state) @@ -89,11 +79,7 @@ func TestTargetScrapeWithFullChannel(t *testing.T) { ) defer server.Close() - testTarget := NewTarget( - server.URL, - 10*time.Millisecond, - clientmodel.LabelSet{"dings": "bums"}, - ).(*target) + testTarget := newTestTarget(server.URL, 10*time.Millisecond, clientmodel.LabelSet{"dings": "bums"}) testTarget.scrape(slowAppender{}) if testTarget.state != Unhealthy { @@ -105,9 +91,10 @@ func TestTargetScrapeWithFullChannel(t *testing.T) { } func TestTargetRecordScrapeHealth(t *testing.T) { - testTarget := NewTarget( - "http://example.url", 0, clientmodel.LabelSet{clientmodel.JobLabel: "testjob"}, - ).(*target) + jcfg := config.JobConfig{} + proto.SetDefaults(&jcfg.JobConfig) + + testTarget := newTestTarget("example.url", 0, clientmodel.LabelSet{clientmodel.JobLabel: "testjob"}) now := clientmodel.Now() appender := &collectResultAppender{} @@ -123,7 +110,7 @@ func TestTargetRecordScrapeHealth(t *testing.T) { expected := &clientmodel.Sample{ Metric: clientmodel.Metric{ clientmodel.MetricNameLabel: scrapeHealthMetricName, - InstanceLabel: "example.url:80", + clientmodel.InstanceLabel: "example.url:80", clientmodel.JobLabel: "testjob", }, Timestamp: now, @@ -138,7 +125,7 @@ func TestTargetRecordScrapeHealth(t *testing.T) { expected = &clientmodel.Sample{ Metric: clientmodel.Metric{ clientmodel.MetricNameLabel: scrapeDurationMetricName, - InstanceLabel: "example.url:80", + clientmodel.InstanceLabel: "example.url:80", clientmodel.JobLabel: "testjob", }, Timestamp: now, @@ -163,7 +150,11 @@ func TestTargetScrapeTimeout(t *testing.T) { ) defer server.Close() - testTarget := NewTarget(server.URL, 10*time.Millisecond, clientmodel.LabelSet{}) + jcfg := config.JobConfig{} + proto.SetDefaults(&jcfg.JobConfig) + + var testTarget Target = newTestTarget(server.URL, 10*time.Millisecond, clientmodel.LabelSet{}) + appender := nopAppender{} // scrape once without timeout @@ -205,25 +196,20 @@ func TestTargetScrape404(t *testing.T) { ) defer server.Close() - testTarget := NewTarget(server.URL, 10*time.Millisecond, clientmodel.LabelSet{}) + testTarget := newTestTarget(server.URL, 10*time.Millisecond, clientmodel.LabelSet{}) appender := nopAppender{} want := errors.New("server returned HTTP status 404 Not Found") - got := testTarget.(*target).scrape(appender) + got := testTarget.scrape(appender) if got == nil || want.Error() != got.Error() { t.Fatalf("want err %q, got %q", want, got) } } func TestTargetRunScraperScrapes(t *testing.T) { - testTarget := target{ - state: Unknown, - url: "bad schema", - httpClient: utility.NewDeadlineClient(0), - scraperStopping: make(chan struct{}), - scraperStopped: make(chan struct{}), - } - go testTarget.RunScraper(nopAppender{}, time.Duration(time.Millisecond)) + testTarget := newTestTarget("bad schema", 0, nil) + + go testTarget.RunScraper(nopAppender{}) // Enough time for a scrape to happen. time.Sleep(2 * time.Millisecond) @@ -253,11 +239,7 @@ func BenchmarkScrape(b *testing.B) { ) defer server.Close() - testTarget := NewTarget( - server.URL, - 100*time.Millisecond, - clientmodel.LabelSet{"dings": "bums"}, - ) + var testTarget Target = newTestTarget(server.URL, 100*time.Millisecond, clientmodel.LabelSet{"dings": "bums"}) appender := nopAppender{} b.ResetTimer() @@ -267,3 +249,25 @@ func BenchmarkScrape(b *testing.B) { } } } + +func newTestTarget(targetURL string, deadline time.Duration, baseLabels clientmodel.LabelSet) *target { + t := &target{ + url: &url.URL{ + Scheme: "http", + Host: strings.TrimLeft(targetURL, "http://"), + Path: "/metrics", + }, + deadline: deadline, + scrapeInterval: 1 * time.Millisecond, + httpClient: utility.NewDeadlineClient(deadline), + scraperStopping: make(chan struct{}), + scraperStopped: make(chan struct{}), + } + t.baseLabels = clientmodel.LabelSet{ + clientmodel.InstanceLabel: clientmodel.LabelValue(t.InstanceIdentifier()), + } + for baseLabel, baseValue := range baseLabels { + t.baseLabels[baseLabel] = baseValue + } + return t +} diff --git a/retrieval/targetmanager.go b/retrieval/targetmanager.go index 32f64947a..2a3e3b325 100644 --- a/retrieval/targetmanager.go +++ b/retrieval/targetmanager.go @@ -14,6 +14,8 @@ package retrieval import ( + "fmt" + "strings" "sync" "github.com/golang/glog" @@ -21,132 +23,385 @@ import ( clientmodel "github.com/prometheus/client_golang/model" "github.com/prometheus/prometheus/config" + pb "github.com/prometheus/prometheus/config/generated" + "github.com/prometheus/prometheus/retrieval/discovery" "github.com/prometheus/prometheus/storage" ) -// TargetManager manages all scrape targets. All methods are goroutine-safe. -type TargetManager interface { - AddTarget(job config.JobConfig, t Target) - ReplaceTargets(job config.JobConfig, newTargets []Target) - Remove(t Target) - AddTargetsFromConfig(config config.Config) +// A TargetProvider provides information about target groups. It maintains a set +// of sources from which TargetGroups can originate. Whenever a target provider +// detects a potential change it sends the TargetGroup through its provided channel. +// +// The TargetProvider does not have to guarantee that an actual change happened. +// It does guarantee that it sends the new TargetGroup whenever a change happens. +// On startup it sends all TargetGroups it can see. +type TargetProvider interface { + // Sources returns the source identifiers the provider is currently aware of. + Sources() []string + // Run hands a channel to the target provider through which it can send + // updated target groups. The channel must be closed by the target provider + // if no more updates will be sent. + Run(chan<- *config.TargetGroup) + // Stop terminates any potential computation of the target provider. The + // channel received on Run must be closed afterwards. Stop() - Pools() map[string]*TargetPool // Returns a copy of the name -> TargetPool mapping. } -type targetManager struct { - sync.Mutex // Protects poolByJob. +// TargetManager maintains a set of targets, starts and stops their scraping and +// creates the new targets based on the target groups it receives from various +// target providers. +type TargetManager struct { + m sync.RWMutex globalLabels clientmodel.LabelSet sampleAppender storage.SampleAppender - poolsByJob map[string]*TargetPool + running bool + + // Targets by their source ID. + targets map[string][]Target + // Providers and configs by their job name. + // TODO(fabxc): turn this into map[*ScrapeConfig][]TargetProvider eventually. + providers map[string][]TargetProvider + configs map[string]config.JobConfig } -// NewTargetManager returns a newly initialized TargetManager ready to use. -func NewTargetManager(sampleAppender storage.SampleAppender, globalLabels clientmodel.LabelSet) TargetManager { - return &targetManager{ +// NewTargetManager creates a new TargetManager based on the given config. +func NewTargetManager(cfg config.Config, sampleAppender storage.SampleAppender) (*TargetManager, error) { + tm := &TargetManager{ sampleAppender: sampleAppender, - globalLabels: globalLabels, - poolsByJob: make(map[string]*TargetPool), + targets: make(map[string][]Target), } + if err := tm.applyConfig(cfg); err != nil { + return nil, err + } + return tm, nil } -func (m *targetManager) targetPoolForJob(job config.JobConfig) *TargetPool { - targetPool, ok := m.poolsByJob[job.GetName()] +// Run starts background processing to handle target updates. +func (tm *TargetManager) Run() { + glog.Info("Starting target manager...") - if !ok { - var provider TargetProvider - if job.SdName != nil { - provider = NewSdTargetProvider(job, m.globalLabels) + sources := map[string]struct{}{} + + for name, provs := range tm.providers { + for _, p := range provs { + jcfg := tm.configs[name] + + ch := make(chan *config.TargetGroup) + go tm.handleTargetUpdates(tm.configs[name], ch) + + for _, src := range p.Sources() { + src = fullSource(jcfg, src) + sources[src] = struct{}{} + } + + // Run the target provider after cleanup of the stale targets is done. + defer func(c chan *config.TargetGroup) { + go p.Run(c) + }(ch) } - - interval := job.ScrapeInterval() - targetPool = NewTargetPool(provider, m.sampleAppender, interval) - glog.Infof("Pool for job %s does not exist; creating and starting...", job.GetName()) - - m.poolsByJob[job.GetName()] = targetPool - go targetPool.Run() } - return targetPool -} - -func (m *targetManager) AddTarget(job config.JobConfig, t Target) { - m.Lock() - defer m.Unlock() - - targetPool := m.targetPoolForJob(job) - targetPool.AddTarget(t) - m.poolsByJob[job.GetName()] = targetPool -} - -func (m *targetManager) ReplaceTargets(job config.JobConfig, newTargets []Target) { - m.Lock() - defer m.Unlock() - - targetPool := m.targetPoolForJob(job) - targetPool.ReplaceTargets(newTargets) -} - -func (m *targetManager) Remove(t Target) { - panic("not implemented") -} - -func (m *targetManager) AddTargetsFromConfig(config config.Config) { - for _, job := range config.Jobs() { - if job.SdName != nil { - m.Lock() - m.targetPoolForJob(job) - m.Unlock() - continue + tm.removeTargets(func(src string) bool { + if _, ok := sources[src]; ok { + return false } + return true + }) - for _, targetGroup := range job.TargetGroup { - baseLabels := clientmodel.LabelSet{ - clientmodel.JobLabel: clientmodel.LabelValue(job.GetName()), - } - for n, v := range m.globalLabels { - baseLabels[n] = v - } - if targetGroup.Labels != nil { - for _, label := range targetGroup.Labels.Label { - baseLabels[clientmodel.LabelName(label.GetName())] = clientmodel.LabelValue(label.GetValue()) - } - } + tm.running = true +} - for _, endpoint := range targetGroup.Target { - target := NewTarget(endpoint, job.ScrapeTimeout(), baseLabels) - m.AddTarget(job, target) - } +// handleTargetUpdates receives target group updates and handles them in the +// context of the given job config. +func (tm *TargetManager) handleTargetUpdates(cfg config.JobConfig, ch <-chan *config.TargetGroup) { + for tg := range ch { + glog.V(1).Infof("Received potential update for target group %q", tg.Source) + + if err := tm.updateTargetGroup(tg, cfg); err != nil { + glog.Errorf("Error updating targets: %s", err) } } } -func (m *targetManager) Stop() { - m.Lock() - defer m.Unlock() +// fullSource prepends the unique job name to the source. +// +// Thus, oscilliating label sets for targets with the same source, +// but providers from different configs, are prevented. +func fullSource(cfg config.JobConfig, src string) string { + return cfg.GetName() + ":" + src +} + +// Stop all background processing. +func (tm *TargetManager) Stop() { + tm.stop(true) +} + +// stop background processing of the target manager. If removeTargets is true, +// existing targets will be stopped and removed. +func (tm *TargetManager) stop(removeTargets bool) { + tm.m.Lock() + defer tm.m.Unlock() + + if !tm.running { + return + } glog.Info("Stopping target manager...") - var wg sync.WaitGroup - for j, p := range m.poolsByJob { - wg.Add(1) - go func(j string, p *TargetPool) { - defer wg.Done() - glog.Infof("Stopping target pool %q...", j) + defer glog.Info("Target manager stopped.") + + for _, provs := range tm.providers { + for _, p := range provs { p.Stop() - glog.Infof("Target pool %q stopped.", j) - }(j, p) + } + } + + if removeTargets { + tm.removeTargets(nil) + } + + tm.running = false +} + +// removeTargets stops and removes targets for sources where f(source) is true +// or if f is nil. This method is not thread-safe. +func (tm *TargetManager) removeTargets(f func(string) bool) { + if f == nil { + f = func(string) bool { return true } + } + var wg sync.WaitGroup + for src, targets := range tm.targets { + if !f(src) { + continue + } + wg.Add(len(targets)) + for _, target := range targets { + go func(t Target) { + t.StopScraper() + wg.Done() + }(target) + } + delete(tm.targets, src) } wg.Wait() - glog.Info("Target manager stopped.") } -func (m *targetManager) Pools() map[string]*TargetPool { - m.Lock() - defer m.Unlock() - - result := make(map[string]*TargetPool, len(m.poolsByJob)) - for k, v := range m.poolsByJob { - result[k] = v +// updateTargetGroup creates new targets for the group and replaces the old targets +// for the source ID. +func (tm *TargetManager) updateTargetGroup(tgroup *config.TargetGroup, cfg config.JobConfig) error { + newTargets, err := tm.targetsFromGroup(tgroup, cfg) + if err != nil { + return err } - return result + src := fullSource(cfg, tgroup.Source) + + tm.m.Lock() + defer tm.m.Unlock() + + oldTargets, ok := tm.targets[src] + if ok { + var wg sync.WaitGroup + // Replace the old targets with the new ones while keeping the state + // of intersecting targets. + for i, tnew := range newTargets { + var match Target + for j, told := range oldTargets { + if told == nil { + continue + } + if tnew.InstanceIdentifier() == told.InstanceIdentifier() { + match = told + oldTargets[j] = nil + break + } + } + // Update the exisiting target and discard the new equivalent. + // Otherwise start scraping the new target. + if match != nil { + // Updating is blocked during a scrape. We don't want those wait times + // to build up. + wg.Add(1) + go func(t Target) { + match.Update(cfg, t.BaseLabels()) + wg.Done() + }(tnew) + newTargets[i] = match + } else { + go tnew.RunScraper(tm.sampleAppender) + } + } + // Remove all old targets that disappeared. + for _, told := range oldTargets { + if told != nil { + wg.Add(1) + go func(t Target) { + t.StopScraper() + wg.Done() + }(told) + } + } + wg.Wait() + } else { + // The source ID is new, start all target scrapers. + for _, tnew := range newTargets { + go tnew.RunScraper(tm.sampleAppender) + } + } + + if len(newTargets) > 0 { + tm.targets[src] = newTargets + } else { + delete(tm.targets, src) + } + return nil +} + +// Pools returns the targets currently being scraped bucketed by their job name. +func (tm *TargetManager) Pools() map[string][]Target { + tm.m.RLock() + defer tm.m.RUnlock() + + pools := map[string][]Target{} + + for _, ts := range tm.targets { + for _, t := range ts { + job := string(t.BaseLabels()[clientmodel.JobLabel]) + pools[job] = append(pools[job], t) + } + } + return pools +} + +// ApplyConfig resets the manager's target providers and job configurations as defined +// by the new cfg. The state of targets that are valid in the new configuration remains unchanged. +func (tm *TargetManager) ApplyConfig(cfg config.Config) error { + tm.stop(false) + // Even if updating the config failed, we want to continue rather than stop scraping anything. + defer tm.Run() + + if err := tm.applyConfig(cfg); err != nil { + glog.Warningf("Error updating config, changes not applied: %s", err) + return err + } + return nil +} + +func (tm *TargetManager) applyConfig(cfg config.Config) error { + // Only apply changes if everything was successful. + providers := map[string][]TargetProvider{} + configs := map[string]config.JobConfig{} + + for _, jcfg := range cfg.Jobs() { + provs, err := ProvidersFromConfig(jcfg) + if err != nil { + return err + } + configs[jcfg.GetName()] = jcfg + providers[jcfg.GetName()] = provs + } + tm.m.Lock() + defer tm.m.Unlock() + + tm.globalLabels = cfg.GlobalLabels() + tm.providers = providers + tm.configs = configs + return nil +} + +// targetsFromGroup builds targets based on the given TargetGroup and config. +func (tm *TargetManager) targetsFromGroup(tg *config.TargetGroup, cfg config.JobConfig) ([]Target, error) { + tm.m.RLock() + defer tm.m.RUnlock() + + targets := make([]Target, 0, len(tg.Targets)) + for i, labels := range tg.Targets { + for ln, lv := range tg.Labels { + if _, ok := labels[ln]; !ok { + labels[ln] = lv + } + } + for ln, lv := range tm.globalLabels { + if _, ok := labels[ln]; !ok { + labels[ln] = lv + } + } + address, ok := labels[clientmodel.AddressLabel] + if !ok { + return nil, fmt.Errorf("Instance %d in target group %s has no address", i, tg) + } + if _, ok := labels[clientmodel.JobLabel]; !ok { + labels[clientmodel.JobLabel] = clientmodel.LabelValue(cfg.GetName()) + } + + for ln := range labels { + // There are currently no internal labels we want to take over to time series. + if strings.HasPrefix(string(ln), clientmodel.ReservedLabelPrefix) { + delete(labels, ln) + } + } + targets = append(targets, NewTarget(string(address), cfg, labels)) + } + return targets, nil +} + +// ProvidersFromConfig returns all TargetProviders configured in cfg. +func ProvidersFromConfig(cfg config.JobConfig) ([]TargetProvider, error) { + var providers []TargetProvider + + if name := cfg.GetSdName(); name != "" { + dnsSD := discovery.NewDNSDiscovery(name, cfg.SDRefreshInterval()) + providers = append(providers, dnsSD) + } + + if tgs := cfg.GetTargetGroup(); tgs != nil { + static := NewStaticProvider(tgs) + providers = append(providers, static) + } + return providers, nil +} + +// StaticProvider holds a list of target groups that never change. +type StaticProvider struct { + TargetGroups []*config.TargetGroup +} + +// NewStaticProvider returns a StaticProvider configured with the given +// target groups. +func NewStaticProvider(groups []*pb.TargetGroup) *StaticProvider { + prov := &StaticProvider{} + + for i, tg := range groups { + g := &config.TargetGroup{ + Source: fmt.Sprintf("static:%d", i), + Labels: clientmodel.LabelSet{}, + } + for _, pair := range tg.GetLabels().GetLabel() { + g.Labels[clientmodel.LabelName(pair.GetName())] = clientmodel.LabelValue(pair.GetValue()) + } + for _, t := range tg.GetTarget() { + g.Targets = append(g.Targets, clientmodel.LabelSet{ + clientmodel.AddressLabel: clientmodel.LabelValue(t), + }) + } + prov.TargetGroups = append(prov.TargetGroups, g) + } + return prov +} + +// Run implements the TargetProvider interface. +func (sd *StaticProvider) Run(ch chan<- *config.TargetGroup) { + for _, tg := range sd.TargetGroups { + ch <- tg + } + close(ch) // This provider never sends any updates. +} + +// Stop implements the TargetProvider interface. +func (sd *StaticProvider) Stop() {} + +// TargetGroups returns the provider's target groups. +func (sd *StaticProvider) Sources() (srcs []string) { + for _, tg := range sd.TargetGroups { + srcs = append(srcs, tg.Source) + } + return srcs } diff --git a/retrieval/targetmanager_test.go b/retrieval/targetmanager_test.go index 981fa3a3b..ed1b1cd00 100644 --- a/retrieval/targetmanager_test.go +++ b/retrieval/targetmanager_test.go @@ -14,6 +14,7 @@ package retrieval import ( + "reflect" "testing" "time" @@ -21,110 +22,247 @@ import ( clientmodel "github.com/prometheus/client_golang/model" - pb "github.com/prometheus/prometheus/config/generated" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/config" + pb "github.com/prometheus/prometheus/config/generated" ) -type fakeTarget struct { - scrapeCount int - lastScrape time.Time - interval time.Duration -} - -func (t fakeTarget) LastError() error { - return nil -} - -func (t fakeTarget) URL() string { - return "fake" -} - -func (t fakeTarget) InstanceIdentifier() string { - return "fake" -} - -func (t fakeTarget) GlobalURL() string { - return t.URL() -} - -func (t fakeTarget) BaseLabels() clientmodel.LabelSet { - return clientmodel.LabelSet{} -} - -func (t fakeTarget) BaseLabelsWithoutJobAndInstance() clientmodel.LabelSet { - return clientmodel.LabelSet{} -} - -func (t fakeTarget) Interval() time.Duration { - return t.interval -} - -func (t fakeTarget) LastScrape() time.Time { - return t.lastScrape -} - -func (t fakeTarget) scrape(storage.SampleAppender) error { - t.scrapeCount++ - - return nil -} - -func (t fakeTarget) RunScraper(storage.SampleAppender, time.Duration) { - return -} - -func (t fakeTarget) StopScraper() { - return -} - -func (t fakeTarget) State() TargetState { - return Healthy -} - -func (t *fakeTarget) SetBaseLabelsFrom(newTarget Target) {} - -func (t *fakeTarget) Ingest(clientmodel.Samples) error { return nil } - -func testTargetManager(t testing.TB) { - targetManager := NewTargetManager(nopAppender{}, nil) - testJob1 := config.JobConfig{ - JobConfig: pb.JobConfig{ - Name: proto.String("test_job1"), - ScrapeInterval: proto.String("1m"), +func TestTargetManagerChan(t *testing.T) { + testJob1 := pb.JobConfig{ + Name: proto.String("test_job1"), + ScrapeInterval: proto.String("1m"), + TargetGroup: []*pb.TargetGroup{ + {Target: []string{"example.org:80", "example.com:80"}}, }, } - testJob2 := config.JobConfig{ - JobConfig: pb.JobConfig{ - Name: proto.String("test_job2"), - ScrapeInterval: proto.String("1m"), + prov1 := &fakeTargetProvider{ + sources: []string{"src1", "src2"}, + update: make(chan *config.TargetGroup), + } + + targetManager := &TargetManager{ + sampleAppender: nopAppender{}, + providers: map[string][]TargetProvider{ + *testJob1.Name: []TargetProvider{prov1}, + }, + configs: map[string]config.JobConfig{ + *testJob1.Name: config.JobConfig{testJob1}, + }, + targets: make(map[string][]Target), + } + go targetManager.Run() + defer targetManager.Stop() + + sequence := []struct { + tgroup *config.TargetGroup + expected map[string][]clientmodel.LabelSet + }{ + { + tgroup: &config.TargetGroup{ + Source: "src1", + Targets: []clientmodel.LabelSet{ + {clientmodel.AddressLabel: "test-1:1234"}, + {clientmodel.AddressLabel: "test-2:1234", "label": "set"}, + {clientmodel.AddressLabel: "test-3:1234"}, + }, + }, + expected: map[string][]clientmodel.LabelSet{ + "test_job1:src1": { + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "test-1:1234"}, + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "test-2:1234", "label": "set"}, + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "test-3:1234"}, + }, + }, + }, { + tgroup: &config.TargetGroup{ + Source: "src2", + Targets: []clientmodel.LabelSet{ + {clientmodel.AddressLabel: "test-1:1235"}, + {clientmodel.AddressLabel: "test-2:1235"}, + {clientmodel.AddressLabel: "test-3:1235"}, + }, + Labels: clientmodel.LabelSet{"group": "label"}, + }, + expected: map[string][]clientmodel.LabelSet{ + "test_job1:src1": { + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "test-1:1234"}, + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "test-2:1234", "label": "set"}, + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "test-3:1234"}, + }, + "test_job1:src2": { + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "test-1:1235", "group": "label"}, + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "test-2:1235", "group": "label"}, + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "test-3:1235", "group": "label"}, + }, + }, + }, { + tgroup: &config.TargetGroup{ + Source: "src2", + Targets: []clientmodel.LabelSet{}, + }, + expected: map[string][]clientmodel.LabelSet{ + "test_job1:src1": { + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "test-1:1234"}, + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "test-2:1234", "label": "set"}, + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "test-3:1234"}, + }, + }, + }, { + tgroup: &config.TargetGroup{ + Source: "src1", + Targets: []clientmodel.LabelSet{ + {clientmodel.AddressLabel: "test-1:1234", "added": "label"}, + {clientmodel.AddressLabel: "test-3:1234"}, + {clientmodel.AddressLabel: "test-4:1234", "fancy": "label"}, + }, + }, + expected: map[string][]clientmodel.LabelSet{ + "test_job1:src1": { + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "test-1:1234", "added": "label"}, + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "test-3:1234"}, + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "test-4:1234", "fancy": "label"}, + }, + }, }, } - target1GroupA := &fakeTarget{ - interval: time.Minute, - } - target2GroupA := &fakeTarget{ - interval: time.Minute, - } + for i, step := range sequence { + prov1.update <- step.tgroup - targetManager.AddTarget(testJob1, target1GroupA) - targetManager.AddTarget(testJob1, target2GroupA) + <-time.After(1 * time.Millisecond) - target1GroupB := &fakeTarget{ - interval: time.Minute * 2, - } + if len(targetManager.targets) != len(step.expected) { + t.Fatalf("step %d: sources mismatch %v, %v", targetManager.targets, step.expected) + } - targetManager.AddTarget(testJob2, target1GroupB) -} - -func TestTargetManager(t *testing.T) { - testTargetManager(t) -} - -func BenchmarkTargetManager(b *testing.B) { - for i := 0; i < b.N; i++ { - testTargetManager(b) + for source, actTargets := range targetManager.targets { + expTargets, ok := step.expected[source] + if !ok { + t.Fatalf("step %d: unexpected source %q: %v", i, source, actTargets) + } + for _, expt := range expTargets { + found := false + for _, actt := range actTargets { + if reflect.DeepEqual(expt, actt.BaseLabels()) { + found = true + break + } + } + if !found { + t.Errorf("step %d: expected target %v not found in actual targets", i, expt) + } + } + } + } +} + +func TestTargetManagerConfigUpdate(t *testing.T) { + testJob1 := &pb.JobConfig{ + Name: proto.String("test_job1"), + ScrapeInterval: proto.String("1m"), + TargetGroup: []*pb.TargetGroup{ + {Target: []string{"example.org:80", "example.com:80"}}, + }, + } + testJob2 := &pb.JobConfig{ + Name: proto.String("test_job2"), + ScrapeInterval: proto.String("1m"), + TargetGroup: []*pb.TargetGroup{ + {Target: []string{"example.org:8080", "example.com:8081"}}, + {Target: []string{"test.com:1234"}}, + }, + } + + sequence := []struct { + jobConfigs []*pb.JobConfig + expected map[string][]clientmodel.LabelSet + }{ + { + jobConfigs: []*pb.JobConfig{testJob1}, + expected: map[string][]clientmodel.LabelSet{ + "test_job1:static:0": { + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "example.org:80"}, + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "example.com:80"}, + }, + }, + }, { + jobConfigs: []*pb.JobConfig{testJob1}, + expected: map[string][]clientmodel.LabelSet{ + "test_job1:static:0": { + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "example.org:80"}, + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "example.com:80"}, + }, + }, + }, { + jobConfigs: []*pb.JobConfig{testJob1, testJob2}, + expected: map[string][]clientmodel.LabelSet{ + "test_job1:static:0": { + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "example.org:80"}, + {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "example.com:80"}, + }, + "test_job2:static:0": { + {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "example.org:8080"}, + {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "example.com:8081"}, + }, + "test_job2:static:1": { + {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "test.com:1234"}, + }, + }, + }, { + jobConfigs: []*pb.JobConfig{}, + expected: map[string][]clientmodel.LabelSet{}, + }, { + jobConfigs: []*pb.JobConfig{testJob2}, + expected: map[string][]clientmodel.LabelSet{ + "test_job2:static:0": { + {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "example.org:8080"}, + {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "example.com:8081"}, + }, + "test_job2:static:1": { + {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "test.com:1234"}, + }, + }, + }, + } + + targetManager, err := NewTargetManager(config.Config{}, nopAppender{}) + if err != nil { + t.Fatal(err) + } + targetManager.Run() + defer targetManager.Stop() + + for i, step := range sequence { + cfg := pb.PrometheusConfig{ + Job: step.jobConfigs, + } + err := targetManager.ApplyConfig(config.Config{cfg}) + if err != nil { + t.Fatal(err) + } + + <-time.After(1 * time.Millisecond) + + if len(targetManager.targets) != len(step.expected) { + t.Fatalf("step %d: sources mismatch %v, %v", targetManager.targets, step.expected) + } + + for source, actTargets := range targetManager.targets { + expTargets, ok := step.expected[source] + if !ok { + t.Fatalf("step %d: unexpected source %q: %v", i, source, actTargets) + } + for _, expt := range expTargets { + found := false + for _, actt := range actTargets { + if reflect.DeepEqual(expt, actt.BaseLabels()) { + found = true + break + } + } + if !found { + t.Errorf("step %d: expected target %v for %q not found in actual targets", i, expt, source) + } + } + } } } diff --git a/retrieval/targetpool.go b/retrieval/targetpool.go deleted file mode 100644 index 9b1b005b6..000000000 --- a/retrieval/targetpool.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package retrieval - -import ( - "sort" - "sync" - "time" - - "github.com/golang/glog" - - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/utility" -) - -const ( - targetAddQueueSize = 100 - targetReplaceQueueSize = 1 -) - -// TargetPool is a pool of targets for the same job. -type TargetPool struct { - sync.RWMutex - - manager TargetManager - targetsByURL map[string]Target - interval time.Duration - sampleAppender storage.SampleAppender - addTargetQueue chan Target - - targetProvider TargetProvider - - stopping, stopped chan struct{} -} - -// NewTargetPool creates a TargetPool, ready to be started by calling Run. -func NewTargetPool(p TargetProvider, app storage.SampleAppender, i time.Duration) *TargetPool { - return &TargetPool{ - interval: i, - sampleAppender: app, - targetsByURL: make(map[string]Target), - addTargetQueue: make(chan Target, targetAddQueueSize), - targetProvider: p, - stopping: make(chan struct{}), - stopped: make(chan struct{}), - } -} - -// Run starts the target pool. It returns when the target pool has stopped -// (after calling Stop). Run is usually called as a goroutine. -func (p *TargetPool) Run() { - ticker := time.NewTicker(p.interval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - if p.targetProvider != nil { - targets, err := p.targetProvider.Targets() - if err != nil { - glog.Warningf("Error looking up targets, keeping old list: %s", err) - } else { - p.ReplaceTargets(targets) - } - } - case newTarget := <-p.addTargetQueue: - p.addTarget(newTarget) - case <-p.stopping: - p.ReplaceTargets([]Target{}) - close(p.stopped) - return - } - } -} - -// Stop stops the target pool and returns once the shutdown is complete. -func (p *TargetPool) Stop() { - close(p.stopping) - <-p.stopped -} - -// AddTarget adds a target by queuing it in the target queue. -func (p *TargetPool) AddTarget(target Target) { - p.addTargetQueue <- target -} - -func (p *TargetPool) addTarget(target Target) { - p.Lock() - defer p.Unlock() - - p.targetsByURL[target.URL()] = target - go target.RunScraper(p.sampleAppender, p.interval) -} - -// ReplaceTargets replaces the old targets by the provided new ones but reuses -// old targets that are also present in newTargets to preserve scheduling and -// health state. Targets no longer present are stopped. -func (p *TargetPool) ReplaceTargets(newTargets []Target) { - p.Lock() - defer p.Unlock() - - newTargetURLs := make(utility.Set) - for _, newTarget := range newTargets { - newTargetURLs.Add(newTarget.URL()) - oldTarget, ok := p.targetsByURL[newTarget.URL()] - if ok { - oldTarget.SetBaseLabelsFrom(newTarget) - } else { - p.targetsByURL[newTarget.URL()] = newTarget - go newTarget.RunScraper(p.sampleAppender, p.interval) - } - } - - var wg sync.WaitGroup - for k, oldTarget := range p.targetsByURL { - if !newTargetURLs.Has(k) { - wg.Add(1) - go func(k string, oldTarget Target) { - defer wg.Done() - glog.V(1).Infof("Stopping scraper for target %s...", k) - oldTarget.StopScraper() - glog.V(1).Infof("Scraper for target %s stopped.", k) - }(k, oldTarget) - delete(p.targetsByURL, k) - } - } - wg.Wait() -} - -type targetsByURL []Target - -func (s targetsByURL) Len() int { - return len(s) -} -func (s targetsByURL) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s targetsByURL) Less(i, j int) bool { - return s[i].URL() < s[j].URL() -} - -// Targets returns a sorted copy of the current target list. -func (p *TargetPool) Targets() []Target { - p.RLock() - defer p.RUnlock() - - targets := make(targetsByURL, 0, len(p.targetsByURL)) - for _, v := range p.targetsByURL { - targets = append(targets, v) - } - sort.Sort(targets) - return targets -} diff --git a/retrieval/targetpool_test.go b/retrieval/targetpool_test.go deleted file mode 100644 index 56e9ea09b..000000000 --- a/retrieval/targetpool_test.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package retrieval - -import ( - "net/http" - "testing" - "time" -) - -func testTargetPool(t testing.TB) { - type expectation struct { - size int - } - - type input struct { - url string - scheduledFor time.Time - } - - type output struct { - url string - } - - var scenarios = []struct { - name string - inputs []input - outputs []output - }{ - { - name: "empty", - inputs: []input{}, - outputs: []output{}, - }, - { - name: "single element", - inputs: []input{ - { - url: "single1", - }, - }, - outputs: []output{ - { - url: "single1", - }, - }, - }, - { - name: "plural schedules", - inputs: []input{ - { - url: "plural1", - }, - { - url: "plural2", - }, - }, - outputs: []output{ - { - url: "plural1", - }, - { - url: "plural2", - }, - }, - }, - } - - for i, scenario := range scenarios { - pool := NewTargetPool(nil, nopAppender{}, time.Duration(1)) - - for _, input := range scenario.inputs { - target := target{ - url: input.url, - httpClient: &http.Client{}, - } - pool.addTarget(&target) - } - - if len(pool.targetsByURL) != len(scenario.outputs) { - t.Errorf("%s %d. expected TargetPool size to be %d but was %d", scenario.name, i, len(scenario.outputs), len(pool.targetsByURL)) - } else { - for j, output := range scenario.outputs { - if target, ok := pool.targetsByURL[output.url]; !ok { - t.Errorf("%s %d.%d. expected Target url to be %s but was %s", scenario.name, i, j, output.url, target.URL()) - } - } - - if len(pool.targetsByURL) != len(scenario.outputs) { - t.Errorf("%s %d. expected to repopulated with %d elements, got %d", scenario.name, i, len(scenario.outputs), len(pool.targetsByURL)) - } - } - } -} - -func TestTargetPool(t *testing.T) { - testTargetPool(t) -} - -func TestTargetPoolReplaceTargets(t *testing.T) { - pool := NewTargetPool(nil, nopAppender{}, time.Duration(1)) - oldTarget1 := &target{ - url: "example1", - state: Unhealthy, - scraperStopping: make(chan struct{}), - scraperStopped: make(chan struct{}), - httpClient: &http.Client{}, - } - oldTarget2 := &target{ - url: "example2", - state: Unhealthy, - scraperStopping: make(chan struct{}), - scraperStopped: make(chan struct{}), - httpClient: &http.Client{}, - } - newTarget1 := &target{ - url: "example1", - state: Healthy, - scraperStopping: make(chan struct{}), - scraperStopped: make(chan struct{}), - httpClient: &http.Client{}, - } - newTarget2 := &target{ - url: "example3", - state: Healthy, - scraperStopping: make(chan struct{}), - scraperStopped: make(chan struct{}), - httpClient: &http.Client{}, - } - - pool.addTarget(oldTarget1) - pool.addTarget(oldTarget2) - - pool.ReplaceTargets([]Target{newTarget1, newTarget2}) - - if len(pool.targetsByURL) != 2 { - t.Errorf("Expected 2 elements in pool, had %d", len(pool.targetsByURL)) - } - - if pool.targetsByURL["example1"].State() != oldTarget1.State() { - t.Errorf("target1 channel has changed") - } - if pool.targetsByURL["example3"].State() == oldTarget2.State() { - t.Errorf("newTarget2 channel same as oldTarget2's") - } - -} - -func BenchmarkTargetPool(b *testing.B) { - for i := 0; i < b.N; i++ { - testTargetPool(b) - } -} diff --git a/web/status.go b/web/status.go index c641c9aac..9de2fab98 100644 --- a/web/status.go +++ b/web/status.go @@ -30,7 +30,7 @@ type PrometheusStatusHandler struct { Config string Flags map[string]string RuleManager manager.RuleManager - TargetPools map[string]*retrieval.TargetPool + TargetPools func() map[string][]retrieval.Target Birth time.Time PathPrefix string diff --git a/web/templates/status.html b/web/templates/status.html index 91332c601..c6e2de472 100644 --- a/web/templates/status.html +++ b/web/templates/status.html @@ -33,7 +33,7 @@

Targets

{{$stateToClass := .TargetStateToClass}} - {{range $job, $pool := .TargetPools}} + {{range $job, $pool := call .TargetPools}} @@ -45,7 +45,7 @@ - {{range $pool.Targets}} + {{range $pool}} From 1a2d57b45ca6986c5c298407731044c356c91115 Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Mon, 18 May 2015 12:16:25 +0200 Subject: [PATCH 17/27] Move template functionality out of target. The target implementation and interface contain methods only serving a specific purpose of the templates. They were moved to the template as they operate on more fundamental target data. --- retrieval/target.go | 73 +++++++++++--------------------------- retrieval/target_test.go | 7 ---- retrieval/targetmanager.go | 2 +- web/templates/status.html | 4 +-- web/web.go | 43 ++++++++++++++++++---- 5 files changed, 59 insertions(+), 70 deletions(-) diff --git a/retrieval/target.go b/retrieval/target.go index 8ad2d8f0c..aae723789 100644 --- a/retrieval/target.go +++ b/retrieval/target.go @@ -19,7 +19,6 @@ import ( "math/rand" "net/http" "net/url" - "os" "strings" "sync" "time" @@ -53,8 +52,6 @@ const ( var ( errIngestChannelFull = errors.New("ingestion channel full") - localhostRepresentations = []string{"127.0.0.1", "localhost"} - targetIntervalLength = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: namespace, @@ -118,17 +115,11 @@ type Target interface { URL() string // Used to populate the `instance` label in metrics. InstanceIdentifier() string - // The URL as seen from other hosts. References to localhost are resolved - // to the address of the prometheus server. - GlobalURL() string // Return the labels describing the targets. These are the base labels // as well as internal labels. - Labels() clientmodel.LabelSet + fullLabels() clientmodel.LabelSet // Return the target's base labels. BaseLabels() clientmodel.LabelSet - // Return the target's base labels without job and instance label. That's - // useful for display purposes. - BaseLabelsWithoutJobAndInstance() clientmodel.LabelSet // Start scraping the target in regular intervals. RunScraper(storage.SampleAppender) // Stop scraping, synchronous. @@ -195,7 +186,6 @@ type target struct { // The status object for the target. It is only set once on initialization. status *TargetStatus - // The HTTP client used to scrape the target's endpoint. httpClient *http.Client @@ -419,66 +409,43 @@ func (t *target) InstanceIdentifier() string { return t.url.Host } -// GlobalURL implements Target. -func (t *target) GlobalURL() string { - url := t.URL() - - hostname, err := os.Hostname() - if err != nil { - glog.Warningf("Couldn't get hostname: %s, returning target.URL()", err) - return url - } - for _, localhostRepresentation := range localhostRepresentations { - url = strings.Replace(url, "//"+localhostRepresentation, "//"+hostname, 1) - } - return url -} - -// Labels implements Target. -func (t *target) Labels() clientmodel.LabelSet { +// fullLabels implements Target. +func (t *target) fullLabels() clientmodel.LabelSet { t.RLock() defer t.RUnlock() - ls := clientmodel.LabelSet{} + lset := make(clientmodel.LabelSet, len(t.baseLabels)+2) for ln, lv := range t.baseLabels { - ls[ln] = lv + lset[ln] = lv } - ls[clientmodel.MetricsPathLabel] = clientmodel.LabelValue(t.url.Path) - ls[clientmodel.AddressLabel] = clientmodel.LabelValue(t.url.Host) - return ls + lset[clientmodel.MetricsPathLabel] = clientmodel.LabelValue(t.url.Path) + lset[clientmodel.AddressLabel] = clientmodel.LabelValue(t.url.Host) + return lset } // BaseLabels implements Target. func (t *target) BaseLabels() clientmodel.LabelSet { t.RLock() defer t.RUnlock() - return t.baseLabels -} - -// BaseLabelsWithoutJobAndInstance implements Target. -// -// TODO(fabxc): This method does not have to be part of the interface. Implement this -// as a template filter func for the single use case. -func (t *target) BaseLabelsWithoutJobAndInstance() clientmodel.LabelSet { - t.RLock() - defer t.RUnlock() - ls := clientmodel.LabelSet{} + lset := make(clientmodel.LabelSet, len(t.baseLabels)) for ln, lv := range t.baseLabels { - if ln != clientmodel.JobLabel && ln != clientmodel.InstanceLabel { - ls[ln] = lv - } + lset[ln] = lv } - return ls + return lset } func (t *target) recordScrapeHealth(sampleAppender storage.SampleAppender, timestamp clientmodel.Timestamp, scrapeDuration time.Duration) { - healthMetric := clientmodel.Metric{} - durationMetric := clientmodel.Metric{} - for label, value := range t.BaseLabels() { + t.RLock() + healthMetric := make(clientmodel.Metric, len(t.baseLabels)+1) + durationMetric := make(clientmodel.Metric, len(t.baseLabels)+1) + + healthMetric[clientmodel.MetricNameLabel] = clientmodel.LabelValue(scrapeHealthMetricName) + durationMetric[clientmodel.MetricNameLabel] = clientmodel.LabelValue(scrapeDurationMetricName) + + for label, value := range t.baseLabels { healthMetric[label] = value durationMetric[label] = value } - healthMetric[clientmodel.MetricNameLabel] = clientmodel.LabelValue(scrapeHealthMetricName) - durationMetric[clientmodel.MetricNameLabel] = clientmodel.LabelValue(scrapeDurationMetricName) + t.RUnlock() healthValue := clientmodel.SampleValue(0) if t.status.State() == Healthy { diff --git a/retrieval/target_test.go b/retrieval/target_test.go index 7d2c1327d..a70837791 100644 --- a/retrieval/target_test.go +++ b/retrieval/target_test.go @@ -44,13 +44,6 @@ func TestBaseLabels(t *testing.T) { if !reflect.DeepEqual(want, got) { t.Errorf("want base labels %v, got %v", want, got) } - delete(want, clientmodel.JobLabel) - delete(want, clientmodel.InstanceLabel) - - got = target.BaseLabelsWithoutJobAndInstance() - if !reflect.DeepEqual(want, got) { - t.Errorf("want base labels %v, got %v", want, got) - } } func TestTargetScrapeUpdatesState(t *testing.T) { diff --git a/retrieval/targetmanager.go b/retrieval/targetmanager.go index cacc2126e..8b71790bc 100644 --- a/retrieval/targetmanager.go +++ b/retrieval/targetmanager.go @@ -215,7 +215,7 @@ func (tm *TargetManager) updateTargetGroup(tgroup *config.TargetGroup, cfg *conf // to build up. wg.Add(1) go func(t Target) { - match.Update(cfg, t.Labels()) + match.Update(cfg, t.fullLabels()) wg.Done() }(tnew) newTargets[i] = match diff --git a/web/templates/status.html b/web/templates/status.html index 900470978..ba37f458b 100644 --- a/web/templates/status.html +++ b/web/templates/status.html @@ -48,7 +48,7 @@ {{range $pool}}
{{$job}}
{{.URL}} From 0b619b46d678f586f84b89cdd95e1fa10b024adf Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Sat, 25 Apr 2015 12:59:05 +0200 Subject: [PATCH 03/27] Change JobConfig to ScrapeConfig. This commit changes the configuration interface from job configs to scrape configs. This includes allowing multiple ways of target definition at once and moving DNS SD to its own config message. DNS SD can now contain multiple DNS names per configured discovery. --- .../client_golang/model/labelname.go | 4 + config/config.go | 158 +++++++++++------- config/config.proto | 32 ++-- config/config_test.go | 7 +- config/fixtures/invalid_job_name.conf.input | 4 +- config/fixtures/minimal.conf.input | 4 +- .../mixing_sd_and_manual_targets.conf.input | 7 - config/fixtures/repeated_job_name.conf.input | 12 +- config/fixtures/sample.conf.input | 8 +- config/fixtures/sd_targets.conf.input | 8 +- config/generated/config.pb.go | 112 +++++++------ config/load.go | 6 +- retrieval/discovery/dns.go | 44 +++-- retrieval/target.go | 37 +++- retrieval/target_test.go | 8 +- retrieval/targetmanager.go | 81 ++++----- retrieval/targetmanager_test.go | 39 ++--- 17 files changed, 328 insertions(+), 243 deletions(-) delete mode 100644 config/fixtures/mixing_sd_and_manual_targets.conf.input diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go index cebc14de3..5ea4258aa 100644 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go @@ -30,6 +30,10 @@ const ( // a scrape target. AddressLabel LabelName = "__address__" + // MetricsPathLabel is the name of the label that holds the path on which to + // scrape a target. + MetricsPathLabel LabelName = "__metrics_path__" + // ReservedLabelPrefix is a prefix which is not legal in user-supplied // label names. ReservedLabelPrefix = "__" diff --git a/config/config.go b/config/config.go index 1e582e36d..844bd79e2 100644 --- a/config/config.go +++ b/config/config.go @@ -39,12 +39,12 @@ type Config struct { } // String returns an ASCII serialization of the loaded configuration protobuf. -func (c Config) String() string { +func (c *Config) String() string { return proto.MarshalTextString(&c.PrometheusConfig) } // validateLabels validates whether label names have the correct format. -func (c Config) validateLabels(labels *pb.LabelPairs) error { +func validateLabels(labels *pb.LabelPairs) error { if labels == nil { return nil } @@ -57,7 +57,7 @@ func (c Config) validateLabels(labels *pb.LabelPairs) error { } // validateHosts validates whether a target group contains valid hosts. -func (c Config) validateHosts(hosts []string) error { +func validateHosts(hosts []string) error { if hosts == nil { return nil } @@ -72,7 +72,7 @@ func (c Config) validateHosts(hosts []string) error { } // Validate checks an entire parsed Config for the validity of its fields. -func (c Config) Validate() error { +func (c *Config) Validate() error { // Check the global configuration section for validity. global := c.Global if _, err := utility.StringToDuration(global.GetScrapeInterval()); err != nil { @@ -81,58 +81,30 @@ func (c Config) Validate() error { if _, err := utility.StringToDuration(global.GetEvaluationInterval()); err != nil { return fmt.Errorf("invalid rule evaluation interval: %s", err) } - if err := c.validateLabels(global.Labels); err != nil { + if err := validateLabels(global.Labels); err != nil { return fmt.Errorf("invalid global labels: %s", err) } - // Check each job configuration for validity. - jobNames := map[string]bool{} - for _, job := range c.Job { - if jobNames[job.GetName()] { - return fmt.Errorf("found multiple jobs configured with the same name: '%s'", job.GetName()) - } - jobNames[job.GetName()] = true + // Check each scrape configuration for validity. + jobNames := map[string]struct{}{} + for _, sc := range c.ScrapeConfigs() { + name := sc.GetJobName() - if !jobNameRE.MatchString(job.GetName()) { - return fmt.Errorf("invalid job name '%s'", job.GetName()) + if _, ok := jobNames[name]; ok { + return fmt.Errorf("found multiple scrape configs configured with the same job name: %q", name) } - if _, err := utility.StringToDuration(job.GetScrapeInterval()); err != nil { - return fmt.Errorf("invalid scrape interval for job '%s': %s", job.GetName(), err) - } - if _, err := utility.StringToDuration(job.GetSdRefreshInterval()); err != nil { - return fmt.Errorf("invalid SD refresh interval for job '%s': %s", job.GetName(), err) - } - if _, err := utility.StringToDuration(job.GetScrapeTimeout()); err != nil { - return fmt.Errorf("invalid scrape timeout for job '%s': %s", job.GetName(), err) - } - for _, targetGroup := range job.TargetGroup { - if err := c.validateLabels(targetGroup.Labels); err != nil { - return fmt.Errorf("invalid labels for job '%s': %s", job.GetName(), err) - } - if err := c.validateHosts(targetGroup.Target); err != nil { - return fmt.Errorf("invalid targets for job '%s': %s", job.GetName(), err) - } - } - if job.SdName != nil && len(job.TargetGroup) > 0 { - return fmt.Errorf("specified both DNS-SD name and target group for job: %s", job.GetName()) + jobNames[name] = struct{}{} + + if err := sc.Validate(); err != nil { + return fmt.Errorf("error in scrape config %q: %s", name, err) } } return nil } -// GetJobByName finds a job by its name in a Config object. -func (c Config) GetJobByName(name string) *JobConfig { - for _, job := range c.Job { - if job.GetName() == name { - return &JobConfig{*job} - } - } - return nil -} - // GlobalLabels returns the global labels as a LabelSet. -func (c Config) GlobalLabels() clientmodel.LabelSet { +func (c *Config) GlobalLabels() clientmodel.LabelSet { labels := clientmodel.LabelSet{} if c.Global != nil && c.Global.Labels != nil { for _, label := range c.Global.Labels.Label { @@ -142,10 +114,10 @@ func (c Config) GlobalLabels() clientmodel.LabelSet { return labels } -// Jobs returns all the jobs in a Config object. -func (c Config) Jobs() (jobs []JobConfig) { - for _, job := range c.Job { - jobs = append(jobs, JobConfig{*job}) +// ScrapeConfigs returns all scrape configurations. +func (c *Config) ScrapeConfigs() (cfgs []*ScrapeConfig) { + for _, sc := range c.GetScrapeConfig() { + cfgs = append(cfgs, &ScrapeConfig{*sc}) } return } @@ -160,36 +132,96 @@ func stringToDuration(intervalStr string) time.Duration { } // ScrapeInterval gets the default scrape interval for a Config. -func (c Config) ScrapeInterval() time.Duration { +func (c *Config) ScrapeInterval() time.Duration { return stringToDuration(c.Global.GetScrapeInterval()) } // EvaluationInterval gets the default evaluation interval for a Config. -func (c Config) EvaluationInterval() time.Duration { +func (c *Config) EvaluationInterval() time.Duration { return stringToDuration(c.Global.GetEvaluationInterval()) } -// JobConfig encapsulates the configuration of a single job. It wraps the raw -// job protocol buffer to be able to add custom methods to it. -type JobConfig struct { - pb.JobConfig +// ScrapeConfig encapsulates a protobuf scrape configuration. +type ScrapeConfig struct { + pb.ScrapeConfig } -// SDRefreshInterval gets the the SD refresh interval for a job. -func (c JobConfig) SDRefreshInterval() time.Duration { - return stringToDuration(c.GetSdRefreshInterval()) -} - -// ScrapeInterval gets the scrape interval for a job. -func (c JobConfig) ScrapeInterval() time.Duration { +// ScrapeInterval gets the scrape interval for the scrape config. +func (c *ScrapeConfig) ScrapeInterval() time.Duration { return stringToDuration(c.GetScrapeInterval()) } -// ScrapeTimeout gets the scrape timeout for a job. -func (c JobConfig) ScrapeTimeout() time.Duration { +// ScrapeTimeout gets the scrape timeout for the scrape config. +func (c *ScrapeConfig) ScrapeTimeout() time.Duration { return stringToDuration(c.GetScrapeTimeout()) } +// Labels returns a label set for the targets that is implied by the scrape config. +func (c *ScrapeConfig) Labels() clientmodel.LabelSet { + return clientmodel.LabelSet{ + clientmodel.MetricsPathLabel: clientmodel.LabelValue(c.GetMetricsPath()), + clientmodel.JobLabel: clientmodel.LabelValue(c.GetJobName()), + } +} + +// Validate checks the ScrapeConfig for the validity of its fields +func (c *ScrapeConfig) Validate() error { + name := c.GetJobName() + + if !jobNameRE.MatchString(name) { + return fmt.Errorf("invalid job name %q", name) + } + if _, err := utility.StringToDuration(c.GetScrapeInterval()); err != nil { + return fmt.Errorf("invalid scrape interval: %s", err) + } + if _, err := utility.StringToDuration(c.GetScrapeTimeout()); err != nil { + return fmt.Errorf("invalid scrape timeout: %s", err) + } + for _, tgroup := range c.GetTargetGroup() { + if err := validateLabels(tgroup.Labels); err != nil { + return fmt.Errorf("invalid labels: %s", err) + } + if err := validateHosts(tgroup.Target); err != nil { + return fmt.Errorf("invalid targets: %s", err) + } + } + for _, dnscfg := range c.DNSConfigs() { + if err := dnscfg.Validate(); err != nil { + return fmt.Errorf("invalid DNS config: %s", err) + } + } + return nil +} + +// DNSConfigs returns the list of DNS service discovery configurations +// for the scrape config. +func (c *ScrapeConfig) DNSConfigs() []*DNSConfig { + var dnscfgs []*DNSConfig + for _, dc := range c.GetDnsConfig() { + dnscfgs = append(dnscfgs, &DNSConfig{*dc}) + } + return dnscfgs +} + +// DNSConfig encapsulates the protobuf configuration object for DNS based +// service discovery. +type DNSConfig struct { + pb.DNSConfig +} + +// Validate checks the DNSConfig for the validity of its fields. +func (c *DNSConfig) Validate() error { + if _, err := utility.StringToDuration(c.GetRefreshInterval()); err != nil { + return fmt.Errorf("invalid refresh interval: %s", err) + } + return nil +} + +// SDRefreshInterval gets the the SD refresh interval for the scrape config. +func (c *DNSConfig) RefreshInterval() time.Duration { + return stringToDuration(c.GetRefreshInterval()) +} + // TargetGroup is derived from a protobuf TargetGroup and attaches a source to it // that identifies the origin of the group. type TargetGroup struct { diff --git a/config/config.proto b/config/config.proto index d2f741491..3de910005 100644 --- a/config/config.proto +++ b/config/config.proto @@ -48,12 +48,22 @@ message TargetGroup { optional LabelPairs labels = 2; } +// The configuration for DNS based service discovery. +message DNSConfig { + // The list of DNS-SD service names pointing to SRV records + // containing endpoint information. + repeated string name = 1; + // Discovery refresh period when using DNS-SD to discover targets. Must be a + // valid Prometheus duration string in the form "[0-9]+[smhdwy]". + optional string refresh_interval = 2 [default = "30s"]; +} + // The configuration for a Prometheus job to scrape. // -// The next field no. is 8. -message JobConfig { +// The next field no. is 10. +message ScrapeConfig { // The job name. Must adhere to the regex "[a-zA-Z_][a-zA-Z0-9_-]*". - required string name = 1; + required string job_name = 1; // How frequently to scrape targets from this job. Overrides the global // default. Must be a valid Prometheus duration string in the form // "[0-9]+[smhdwy]". @@ -61,15 +71,9 @@ message JobConfig { // Per-target timeout when scraping this job. Must be a valid Prometheus // duration string in the form "[0-9]+[smhdwy]". optional string scrape_timeout = 7 [default = "10s"]; - // The DNS-SD service name pointing to SRV records containing endpoint - // information for a job. When this field is provided, no target_group - // elements may be set. - optional string sd_name = 3; - // Discovery refresh period when using DNS-SD to discover targets. Must be a - // valid Prometheus duration string in the form "[0-9]+[smhdwy]". - optional string sd_refresh_interval = 4 [default = "30s"]; - // List of labeled target groups for this job. Only legal when DNS-SD isn't - // used for a job. + // List of DNS service discovery configurations. + repeated DNSConfig dns_config = 9; + // List of labeled target groups for this job. repeated TargetGroup target_group = 5; // The HTTP resource path on which to fetch metrics from targets. optional string metrics_path = 6 [default = "/metrics"]; @@ -83,6 +87,6 @@ message PrometheusConfig { // configuration with default values (see GlobalConfig definition) will be // created. optional GlobalConfig global = 1; - // The list of jobs to scrape. - repeated JobConfig job = 2; + // The list of scrape configs. + repeated ScrapeConfig scrape_config = 3; } diff --git a/config/config_test.go b/config/config_test.go index acc94b50d..9955dc99f 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -55,15 +55,10 @@ var configTests = []struct { shouldFail: true, errContains: "invalid label name", }, - { - inputFile: "mixing_sd_and_manual_targets.conf.input", - shouldFail: true, - errContains: "specified both DNS-SD name and target group", - }, { inputFile: "repeated_job_name.conf.input", shouldFail: true, - errContains: "found multiple jobs configured with the same name: 'testjob1'", + errContains: "found multiple scrape configs configured with the same job name: \"testjob1\"", }, } diff --git a/config/fixtures/invalid_job_name.conf.input b/config/fixtures/invalid_job_name.conf.input index e3923ac72..dcebbccb4 100644 --- a/config/fixtures/invalid_job_name.conf.input +++ b/config/fixtures/invalid_job_name.conf.input @@ -1,3 +1,3 @@ -job: < - name: "1testjob" +scrape_config: < + job_name: "1testjob" > diff --git a/config/fixtures/minimal.conf.input b/config/fixtures/minimal.conf.input index 637d95e50..9a436411f 100644 --- a/config/fixtures/minimal.conf.input +++ b/config/fixtures/minimal.conf.input @@ -10,8 +10,8 @@ global < rule_file: "prometheus.rules" > -job: < - name: "prometheus" +scrape_config: < + job_name: "prometheus" scrape_interval: "15s" metrics_path: "/metrics" scheme: "http" diff --git a/config/fixtures/mixing_sd_and_manual_targets.conf.input b/config/fixtures/mixing_sd_and_manual_targets.conf.input deleted file mode 100644 index 0d564234e..000000000 --- a/config/fixtures/mixing_sd_and_manual_targets.conf.input +++ /dev/null @@ -1,7 +0,0 @@ -job: < - name: "testjob" - sd_name: "sd_name" - target_group: < - target: "sampletarget:8080" - > -> diff --git a/config/fixtures/repeated_job_name.conf.input b/config/fixtures/repeated_job_name.conf.input index c59486219..3ca4fa468 100644 --- a/config/fixtures/repeated_job_name.conf.input +++ b/config/fixtures/repeated_job_name.conf.input @@ -1,11 +1,11 @@ -job: < - name: "testjob1" +scrape_config: < + job_name: "testjob1" > -job: < - name: "testjob2" +scrape_config: < + job_name: "testjob2" > -job: < - name: "testjob1" +scrape_config: < + job_name: "testjob1" > diff --git a/config/fixtures/sample.conf.input b/config/fixtures/sample.conf.input index 8ea3a069d..6bd42873f 100644 --- a/config/fixtures/sample.conf.input +++ b/config/fixtures/sample.conf.input @@ -10,8 +10,8 @@ global < rule_file: "prometheus.rules" > -job: < - name: "prometheus" +scrape_config: < + job_name: "prometheus" scrape_interval: "15s" target_group: < @@ -25,8 +25,8 @@ job: < > > -job: < - name: "random" +scrape_config: < + job_name: "random" scrape_interval: "30s" target_group: < diff --git a/config/fixtures/sd_targets.conf.input b/config/fixtures/sd_targets.conf.input index ffded895f..7b694db21 100644 --- a/config/fixtures/sd_targets.conf.input +++ b/config/fixtures/sd_targets.conf.input @@ -1,4 +1,6 @@ -job: < - name: "testjob" - sd_name: "sd_name" +scrape_config: < + job_name: "testjob" + dns_config: < + name: "sd_name" + > > diff --git a/config/generated/config.pb.go b/config/generated/config.pb.go index 089f9053b..c44247ab5 100644 --- a/config/generated/config.pb.go +++ b/config/generated/config.pb.go @@ -13,7 +13,8 @@ It has these top-level messages: LabelPairs GlobalConfig TargetGroup - JobConfig + DNSConfig + ScrapeConfig PrometheusConfig */ package io_prometheus @@ -146,12 +147,43 @@ func (m *TargetGroup) GetLabels() *LabelPairs { return nil } +// The configuration for DNS based service discovery. +type DNSConfig struct { + // The list of DNS-SD service names pointing to SRV records + // containing endpoint information. + Name []string `protobuf:"bytes,1,rep,name=name" json:"name,omitempty"` + // Discovery refresh period when using DNS-SD to discover targets. Must be a + // valid Prometheus duration string in the form "[0-9]+[smhdwy]". + RefreshInterval *string `protobuf:"bytes,2,opt,name=refresh_interval,def=30s" json:"refresh_interval,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DNSConfig) Reset() { *m = DNSConfig{} } +func (m *DNSConfig) String() string { return proto.CompactTextString(m) } +func (*DNSConfig) ProtoMessage() {} + +const Default_DNSConfig_RefreshInterval string = "30s" + +func (m *DNSConfig) GetName() []string { + if m != nil { + return m.Name + } + return nil +} + +func (m *DNSConfig) GetRefreshInterval() string { + if m != nil && m.RefreshInterval != nil { + return *m.RefreshInterval + } + return Default_DNSConfig_RefreshInterval +} + // The configuration for a Prometheus job to scrape. // -// The next field no. is 8. -type JobConfig struct { +// The next field no. is 10. +type ScrapeConfig struct { // The job name. Must adhere to the regex "[a-zA-Z_][a-zA-Z0-9_-]*". - Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + JobName *string `protobuf:"bytes,1,req,name=job_name" json:"job_name,omitempty"` // How frequently to scrape targets from this job. Overrides the global // default. Must be a valid Prometheus duration string in the form // "[0-9]+[smhdwy]". @@ -159,15 +191,9 @@ type JobConfig struct { // Per-target timeout when scraping this job. Must be a valid Prometheus // duration string in the form "[0-9]+[smhdwy]". ScrapeTimeout *string `protobuf:"bytes,7,opt,name=scrape_timeout,def=10s" json:"scrape_timeout,omitempty"` - // The DNS-SD service name pointing to SRV records containing endpoint - // information for a job. When this field is provided, no target_group - // elements may be set. - SdName *string `protobuf:"bytes,3,opt,name=sd_name" json:"sd_name,omitempty"` - // Discovery refresh period when using DNS-SD to discover targets. Must be a - // valid Prometheus duration string in the form "[0-9]+[smhdwy]". - SdRefreshInterval *string `protobuf:"bytes,4,opt,name=sd_refresh_interval,def=30s" json:"sd_refresh_interval,omitempty"` - // List of labeled target groups for this job. Only legal when DNS-SD isn't - // used for a job. + // List of DNS service discovery configurations. + DnsConfig []*DNSConfig `protobuf:"bytes,9,rep,name=dns_config" json:"dns_config,omitempty"` + // List of labeled target groups for this job. TargetGroup []*TargetGroup `protobuf:"bytes,5,rep,name=target_group" json:"target_group,omitempty"` // The HTTP resource path on which to fetch metrics from targets. MetricsPath *string `protobuf:"bytes,6,opt,name=metrics_path,def=/metrics" json:"metrics_path,omitempty"` @@ -176,69 +202,61 @@ type JobConfig struct { XXX_unrecognized []byte `json:"-"` } -func (m *JobConfig) Reset() { *m = JobConfig{} } -func (m *JobConfig) String() string { return proto.CompactTextString(m) } -func (*JobConfig) ProtoMessage() {} +func (m *ScrapeConfig) Reset() { *m = ScrapeConfig{} } +func (m *ScrapeConfig) String() string { return proto.CompactTextString(m) } +func (*ScrapeConfig) ProtoMessage() {} -const Default_JobConfig_ScrapeTimeout string = "10s" -const Default_JobConfig_SdRefreshInterval string = "30s" -const Default_JobConfig_MetricsPath string = "/metrics" -const Default_JobConfig_Scheme string = "http" +const Default_ScrapeConfig_ScrapeTimeout string = "10s" +const Default_ScrapeConfig_MetricsPath string = "/metrics" +const Default_ScrapeConfig_Scheme string = "http" -func (m *JobConfig) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (m *ScrapeConfig) GetJobName() string { + if m != nil && m.JobName != nil { + return *m.JobName } return "" } -func (m *JobConfig) GetScrapeInterval() string { +func (m *ScrapeConfig) GetScrapeInterval() string { if m != nil && m.ScrapeInterval != nil { return *m.ScrapeInterval } return "" } -func (m *JobConfig) GetScrapeTimeout() string { +func (m *ScrapeConfig) GetScrapeTimeout() string { if m != nil && m.ScrapeTimeout != nil { return *m.ScrapeTimeout } - return Default_JobConfig_ScrapeTimeout + return Default_ScrapeConfig_ScrapeTimeout } -func (m *JobConfig) GetSdName() string { - if m != nil && m.SdName != nil { - return *m.SdName +func (m *ScrapeConfig) GetDnsConfig() []*DNSConfig { + if m != nil { + return m.DnsConfig } - return "" + return nil } -func (m *JobConfig) GetSdRefreshInterval() string { - if m != nil && m.SdRefreshInterval != nil { - return *m.SdRefreshInterval - } - return Default_JobConfig_SdRefreshInterval -} - -func (m *JobConfig) GetTargetGroup() []*TargetGroup { +func (m *ScrapeConfig) GetTargetGroup() []*TargetGroup { if m != nil { return m.TargetGroup } return nil } -func (m *JobConfig) GetMetricsPath() string { +func (m *ScrapeConfig) GetMetricsPath() string { if m != nil && m.MetricsPath != nil { return *m.MetricsPath } - return Default_JobConfig_MetricsPath + return Default_ScrapeConfig_MetricsPath } -func (m *JobConfig) GetScheme() string { +func (m *ScrapeConfig) GetScheme() string { if m != nil && m.Scheme != nil { return *m.Scheme } - return Default_JobConfig_Scheme + return Default_ScrapeConfig_Scheme } // The top-level Prometheus configuration. @@ -247,9 +265,9 @@ type PrometheusConfig struct { // configuration with default values (see GlobalConfig definition) will be // created. Global *GlobalConfig `protobuf:"bytes,1,opt,name=global" json:"global,omitempty"` - // The list of jobs to scrape. - Job []*JobConfig `protobuf:"bytes,2,rep,name=job" json:"job,omitempty"` - XXX_unrecognized []byte `json:"-"` + // The list of scrape configs. + ScrapeConfig []*ScrapeConfig `protobuf:"bytes,3,rep,name=scrape_config" json:"scrape_config,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *PrometheusConfig) Reset() { *m = PrometheusConfig{} } @@ -263,9 +281,9 @@ func (m *PrometheusConfig) GetGlobal() *GlobalConfig { return nil } -func (m *PrometheusConfig) GetJob() []*JobConfig { +func (m *PrometheusConfig) GetScrapeConfig() []*ScrapeConfig { if m != nil { - return m.Job + return m.ScrapeConfig } return nil } diff --git a/config/load.go b/config/load.go index 195226aee..75b6b03f8 100644 --- a/config/load.go +++ b/config/load.go @@ -30,9 +30,9 @@ func LoadFromString(configStr string) (Config, error) { if configProto.Global == nil { configProto.Global = &pb.GlobalConfig{} } - for _, job := range configProto.Job { - if job.ScrapeInterval == nil { - job.ScrapeInterval = proto.String(configProto.Global.GetScrapeInterval()) + for _, scfg := range configProto.GetScrapeConfig() { + if scfg.ScrapeInterval == nil { + scfg.ScrapeInterval = proto.String(configProto.Global.GetScrapeInterval()) } } diff --git a/retrieval/discovery/dns.go b/retrieval/discovery/dns.go index 6b71dc0d1..70cf3a738 100644 --- a/retrieval/discovery/dns.go +++ b/retrieval/discovery/dns.go @@ -61,7 +61,7 @@ func init() { // DNSDiscovery periodically performs DNS-SD requests. It implements // the TargetProvider interface. type DNSDiscovery struct { - name string + names []string done chan struct{} ticker *time.Ticker @@ -69,9 +69,9 @@ type DNSDiscovery struct { } // NewDNSDiscovery returns a new DNSDiscovery which periodically refreshes its targets. -func NewDNSDiscovery(name string, refreshInterval time.Duration) *DNSDiscovery { +func NewDNSDiscovery(names []string, refreshInterval time.Duration) *DNSDiscovery { return &DNSDiscovery{ - name: name, + names: names, done: make(chan struct{}), ticker: time.NewTicker(refreshInterval), } @@ -82,16 +82,12 @@ func (dd *DNSDiscovery) Run(ch chan<- *config.TargetGroup) { defer close(ch) // Get an initial set right away. - if err := dd.refresh(ch); err != nil { - glog.Errorf("Error refreshing DNS targets: %s", err) - } + dd.refreshAll(ch) for { select { case <-dd.ticker.C: - if err := dd.refresh(ch); err != nil { - glog.Errorf("Error refreshing DNS targets: %s", err) - } + dd.refreshAll(ch) case <-dd.done: return } @@ -100,21 +96,39 @@ func (dd *DNSDiscovery) Run(ch chan<- *config.TargetGroup) { // Stop implements the TargetProvider interface. func (dd *DNSDiscovery) Stop() { - glog.V(1).Info("Stopping DNS discovery for %s...", dd.name) + glog.V(1).Info("Stopping DNS discovery for %s...", dd.names) dd.ticker.Stop() dd.done <- struct{}{} - glog.V(1).Info("DNS discovery for %s stopped.", dd.name) + glog.V(1).Info("DNS discovery for %s stopped.", dd.names) } // Sources implements the TargetProvider interface. func (dd *DNSDiscovery) Sources() []string { - return []string{dnsSourcePrefix + ":" + dd.name} + var srcs []string + for _, name := range dd.names { + srcs = append(srcs, dnsSourcePrefix+":"+name) + } + return srcs } -func (dd *DNSDiscovery) refresh(ch chan<- *config.TargetGroup) error { - response, err := lookupSRV(dd.name) +func (dd *DNSDiscovery) refreshAll(ch chan<- *config.TargetGroup) { + var wg sync.WaitGroup + wg.Add(len(dd.names)) + for _, name := range dd.names { + go func(n string) { + if err := dd.refresh(n, ch); err != nil { + glog.Errorf("Error refreshing DNS targets: %s", err) + } + wg.Done() + }(name) + } + wg.Wait() +} + +func (dd *DNSDiscovery) refresh(name string, ch chan<- *config.TargetGroup) error { + response, err := lookupSRV(name) dnsSDLookupsCount.Inc() if err != nil { dnsSDLookupFailuresCount.Inc() @@ -137,7 +151,7 @@ func (dd *DNSDiscovery) refresh(ch chan<- *config.TargetGroup) error { }) } - tg.Source = dnsSourcePrefix + ":" + dd.name + tg.Source = dnsSourcePrefix + ":" + name ch <- tg return nil diff --git a/retrieval/target.go b/retrieval/target.go index a87a5ddb4..d09dbeecb 100644 --- a/retrieval/target.go +++ b/retrieval/target.go @@ -125,6 +125,9 @@ type Target interface { // The URL as seen from other hosts. References to localhost are resolved // to the address of the prometheus server. GlobalURL() string + // Return the labels describing the targets. These are the base labels + // as well as internal labels. + Labels() clientmodel.LabelSet // Return the target's base labels. BaseLabels() clientmodel.LabelSet // Return the target's base labels without job and instance label. That's @@ -135,7 +138,7 @@ type Target interface { // Stop scraping, synchronous. StopScraper() // Update the target's state. - Update(config.JobConfig, clientmodel.LabelSet) + Update(*config.ScrapeConfig, clientmodel.LabelSet) } // target is a Target that refers to a singular HTTP or HTTPS endpoint. @@ -169,7 +172,7 @@ type target struct { } // NewTarget creates a reasonably configured target for querying. -func NewTarget(address string, cfg config.JobConfig, baseLabels clientmodel.LabelSet) Target { +func NewTarget(address string, cfg *config.ScrapeConfig, baseLabels clientmodel.LabelSet) Target { t := &target{ url: &url.URL{ Host: address, @@ -183,12 +186,12 @@ func NewTarget(address string, cfg config.JobConfig, baseLabels clientmodel.Labe // Update overwrites settings in the target that are derived from the job config // it belongs to. -func (t *target) Update(cfg config.JobConfig, baseLabels clientmodel.LabelSet) { +func (t *target) Update(cfg *config.ScrapeConfig, baseLabels clientmodel.LabelSet) { t.Lock() defer t.Unlock() t.url.Scheme = cfg.GetScheme() - t.url.Path = cfg.GetMetricsPath() + t.url.Path = string(baseLabels[clientmodel.MetricsPathLabel]) t.scrapeInterval = cfg.ScrapeInterval() t.deadline = cfg.ScrapeTimeout() @@ -197,8 +200,12 @@ func (t *target) Update(cfg config.JobConfig, baseLabels clientmodel.LabelSet) { t.baseLabels = clientmodel.LabelSet{ clientmodel.InstanceLabel: clientmodel.LabelValue(t.InstanceIdentifier()), } + + // All remaining internal labels will not be part of the label set. for name, val := range baseLabels { - t.baseLabels[name] = val + if !strings.HasPrefix(string(name), clientmodel.ReservedLabelPrefix) { + t.baseLabels[name] = val + } } } @@ -425,6 +432,19 @@ func (t *target) GlobalURL() string { return url } +// Labels implements Target. +func (t *target) Labels() clientmodel.LabelSet { + t.RLock() + defer t.RUnlock() + ls := clientmodel.LabelSet{} + for ln, lv := range t.baseLabels { + ls[ln] = lv + } + ls[clientmodel.MetricsPathLabel] = clientmodel.LabelValue(t.url.Path) + ls[clientmodel.AddressLabel] = clientmodel.LabelValue(t.url.Host) + return ls +} + // BaseLabels implements Target. func (t *target) BaseLabels() clientmodel.LabelSet { t.RLock() @@ -433,9 +453,14 @@ func (t *target) BaseLabels() clientmodel.LabelSet { } // BaseLabelsWithoutJobAndInstance implements Target. +// +// TODO(fabxc): This method does not have to be part of the interface. Implement this +// as a template filter func for the single use case. func (t *target) BaseLabelsWithoutJobAndInstance() clientmodel.LabelSet { + t.RLock() + defer t.RUnlock() ls := clientmodel.LabelSet{} - for ln, lv := range t.BaseLabels() { + for ln, lv := range t.baseLabels { if ln != clientmodel.JobLabel && ln != clientmodel.InstanceLabel { ls[ln] = lv } diff --git a/retrieval/target_test.go b/retrieval/target_test.go index 6ab80050b..20b0fcee3 100644 --- a/retrieval/target_test.go +++ b/retrieval/target_test.go @@ -91,8 +91,8 @@ func TestTargetScrapeWithFullChannel(t *testing.T) { } func TestTargetRecordScrapeHealth(t *testing.T) { - jcfg := config.JobConfig{} - proto.SetDefaults(&jcfg.JobConfig) + scfg := &config.ScrapeConfig{} + proto.SetDefaults(&scfg.ScrapeConfig) testTarget := newTestTarget("example.url", 0, clientmodel.LabelSet{clientmodel.JobLabel: "testjob"}) @@ -150,8 +150,8 @@ func TestTargetScrapeTimeout(t *testing.T) { ) defer server.Close() - jcfg := config.JobConfig{} - proto.SetDefaults(&jcfg.JobConfig) + scfg := &config.ScrapeConfig{} + proto.SetDefaults(&scfg.ScrapeConfig) var testTarget Target = newTestTarget(server.URL, 10*time.Millisecond, clientmodel.LabelSet{}) diff --git a/retrieval/targetmanager.go b/retrieval/targetmanager.go index 2a3e3b325..2427dc2cf 100644 --- a/retrieval/targetmanager.go +++ b/retrieval/targetmanager.go @@ -30,7 +30,7 @@ import ( // A TargetProvider provides information about target groups. It maintains a set // of sources from which TargetGroups can originate. Whenever a target provider -// detects a potential change it sends the TargetGroup through its provided channel. +// detects a potential change, it sends the TargetGroup through its provided channel. // // The TargetProvider does not have to guarantee that an actual change happened. // It does guarantee that it sends the new TargetGroup whenever a change happens. @@ -58,10 +58,8 @@ type TargetManager struct { // Targets by their source ID. targets map[string][]Target - // Providers and configs by their job name. - // TODO(fabxc): turn this into map[*ScrapeConfig][]TargetProvider eventually. - providers map[string][]TargetProvider - configs map[string]config.JobConfig + // Providers by the scrape configs they are derived from. + providers map[*config.ScrapeConfig][]TargetProvider } // NewTargetManager creates a new TargetManager based on the given config. @@ -82,15 +80,13 @@ func (tm *TargetManager) Run() { sources := map[string]struct{}{} - for name, provs := range tm.providers { + for scfg, provs := range tm.providers { for _, p := range provs { - jcfg := tm.configs[name] - ch := make(chan *config.TargetGroup) - go tm.handleTargetUpdates(tm.configs[name], ch) + go tm.handleTargetUpdates(scfg, ch) for _, src := range p.Sources() { - src = fullSource(jcfg, src) + src = fullSource(scfg, src) sources[src] = struct{}{} } @@ -113,7 +109,7 @@ func (tm *TargetManager) Run() { // handleTargetUpdates receives target group updates and handles them in the // context of the given job config. -func (tm *TargetManager) handleTargetUpdates(cfg config.JobConfig, ch <-chan *config.TargetGroup) { +func (tm *TargetManager) handleTargetUpdates(cfg *config.ScrapeConfig, ch <-chan *config.TargetGroup) { for tg := range ch { glog.V(1).Infof("Received potential update for target group %q", tg.Source) @@ -127,8 +123,8 @@ func (tm *TargetManager) handleTargetUpdates(cfg config.JobConfig, ch <-chan *co // // Thus, oscilliating label sets for targets with the same source, // but providers from different configs, are prevented. -func fullSource(cfg config.JobConfig, src string) string { - return cfg.GetName() + ":" + src +func fullSource(cfg *config.ScrapeConfig, src string) string { + return cfg.GetJobName() + ":" + src } // Stop all background processing. @@ -187,7 +183,7 @@ func (tm *TargetManager) removeTargets(f func(string) bool) { // updateTargetGroup creates new targets for the group and replaces the old targets // for the source ID. -func (tm *TargetManager) updateTargetGroup(tgroup *config.TargetGroup, cfg config.JobConfig) error { +func (tm *TargetManager) updateTargetGroup(tgroup *config.TargetGroup, cfg *config.ScrapeConfig) error { newTargets, err := tm.targetsFromGroup(tgroup, cfg) if err != nil { return err @@ -197,6 +193,10 @@ func (tm *TargetManager) updateTargetGroup(tgroup *config.TargetGroup, cfg confi tm.m.Lock() defer tm.m.Unlock() + if !tm.running { + return nil + } + oldTargets, ok := tm.targets[src] if ok { var wg sync.WaitGroup @@ -221,7 +221,7 @@ func (tm *TargetManager) updateTargetGroup(tgroup *config.TargetGroup, cfg confi // to build up. wg.Add(1) go func(t Target) { - match.Update(cfg, t.BaseLabels()) + match.Update(cfg, t.Labels()) wg.Done() }(tnew) newTargets[i] = match @@ -287,71 +287,72 @@ func (tm *TargetManager) ApplyConfig(cfg config.Config) error { func (tm *TargetManager) applyConfig(cfg config.Config) error { // Only apply changes if everything was successful. - providers := map[string][]TargetProvider{} - configs := map[string]config.JobConfig{} + providers := map[*config.ScrapeConfig][]TargetProvider{} - for _, jcfg := range cfg.Jobs() { - provs, err := ProvidersFromConfig(jcfg) + for _, scfg := range cfg.ScrapeConfigs() { + provs, err := ProvidersFromConfig(scfg) if err != nil { return err } - configs[jcfg.GetName()] = jcfg - providers[jcfg.GetName()] = provs + providers[scfg] = provs } tm.m.Lock() defer tm.m.Unlock() tm.globalLabels = cfg.GlobalLabels() tm.providers = providers - tm.configs = configs return nil } // targetsFromGroup builds targets based on the given TargetGroup and config. -func (tm *TargetManager) targetsFromGroup(tg *config.TargetGroup, cfg config.JobConfig) ([]Target, error) { +func (tm *TargetManager) targetsFromGroup(tg *config.TargetGroup, cfg *config.ScrapeConfig) ([]Target, error) { tm.m.RLock() defer tm.m.RUnlock() targets := make([]Target, 0, len(tg.Targets)) for i, labels := range tg.Targets { - for ln, lv := range tg.Labels { - if _, ok := labels[ln]; !ok { - labels[ln] = lv - } - } - for ln, lv := range tm.globalLabels { - if _, ok := labels[ln]; !ok { - labels[ln] = lv + // Copy labels into the labelset for the target if they are not + // set already. Apply the labelsets in order of decreasing precedence. + labelsets := []clientmodel.LabelSet{ + tg.Labels, + cfg.Labels(), + tm.globalLabels, + } + for _, lset := range labelsets { + for ln, lv := range lset { + if _, ok := labels[ln]; !ok { + labels[ln] = lv + } } } + address, ok := labels[clientmodel.AddressLabel] if !ok { return nil, fmt.Errorf("Instance %d in target group %s has no address", i, tg) } - if _, ok := labels[clientmodel.JobLabel]; !ok { - labels[clientmodel.JobLabel] = clientmodel.LabelValue(cfg.GetName()) - } for ln := range labels { - // There are currently no internal labels we want to take over to time series. - if strings.HasPrefix(string(ln), clientmodel.ReservedLabelPrefix) { + // Meta labels are deleted after relabelling. Other internal labels propagate to + // the target which decides whether they will be part of their label set. + if strings.HasPrefix(string(ln), clientmodel.MetaLabelPrefix) { delete(labels, ln) } } targets = append(targets, NewTarget(string(address), cfg, labels)) + } + return targets, nil } // ProvidersFromConfig returns all TargetProviders configured in cfg. -func ProvidersFromConfig(cfg config.JobConfig) ([]TargetProvider, error) { +func ProvidersFromConfig(cfg *config.ScrapeConfig) ([]TargetProvider, error) { var providers []TargetProvider - if name := cfg.GetSdName(); name != "" { - dnsSD := discovery.NewDNSDiscovery(name, cfg.SDRefreshInterval()) + for _, dnscfg := range cfg.DNSConfigs() { + dnsSD := discovery.NewDNSDiscovery(dnscfg.GetName(), dnscfg.RefreshInterval()) providers = append(providers, dnsSD) } - if tgs := cfg.GetTargetGroup(); tgs != nil { static := NewStaticProvider(tgs) providers = append(providers, static) diff --git a/retrieval/targetmanager_test.go b/retrieval/targetmanager_test.go index ed1b1cd00..1d8315ca2 100644 --- a/retrieval/targetmanager_test.go +++ b/retrieval/targetmanager_test.go @@ -27,12 +27,12 @@ import ( ) func TestTargetManagerChan(t *testing.T) { - testJob1 := pb.JobConfig{ - Name: proto.String("test_job1"), + testJob1 := &config.ScrapeConfig{pb.ScrapeConfig{ + JobName: proto.String("test_job1"), ScrapeInterval: proto.String("1m"), TargetGroup: []*pb.TargetGroup{ {Target: []string{"example.org:80", "example.com:80"}}, - }, + }}, } prov1 := &fakeTargetProvider{ sources: []string{"src1", "src2"}, @@ -41,11 +41,8 @@ func TestTargetManagerChan(t *testing.T) { targetManager := &TargetManager{ sampleAppender: nopAppender{}, - providers: map[string][]TargetProvider{ - *testJob1.Name: []TargetProvider{prov1}, - }, - configs: map[string]config.JobConfig{ - *testJob1.Name: config.JobConfig{testJob1}, + providers: map[*config.ScrapeConfig][]TargetProvider{ + testJob1: []TargetProvider{prov1}, }, targets: make(map[string][]Target), } @@ -156,15 +153,15 @@ func TestTargetManagerChan(t *testing.T) { } func TestTargetManagerConfigUpdate(t *testing.T) { - testJob1 := &pb.JobConfig{ - Name: proto.String("test_job1"), + testJob1 := &pb.ScrapeConfig{ + JobName: proto.String("test_job1"), ScrapeInterval: proto.String("1m"), TargetGroup: []*pb.TargetGroup{ {Target: []string{"example.org:80", "example.com:80"}}, }, } - testJob2 := &pb.JobConfig{ - Name: proto.String("test_job2"), + testJob2 := &pb.ScrapeConfig{ + JobName: proto.String("test_job2"), ScrapeInterval: proto.String("1m"), TargetGroup: []*pb.TargetGroup{ {Target: []string{"example.org:8080", "example.com:8081"}}, @@ -173,11 +170,11 @@ func TestTargetManagerConfigUpdate(t *testing.T) { } sequence := []struct { - jobConfigs []*pb.JobConfig - expected map[string][]clientmodel.LabelSet + scrapeConfigs []*pb.ScrapeConfig + expected map[string][]clientmodel.LabelSet }{ { - jobConfigs: []*pb.JobConfig{testJob1}, + scrapeConfigs: []*pb.ScrapeConfig{testJob1}, expected: map[string][]clientmodel.LabelSet{ "test_job1:static:0": { {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "example.org:80"}, @@ -185,7 +182,7 @@ func TestTargetManagerConfigUpdate(t *testing.T) { }, }, }, { - jobConfigs: []*pb.JobConfig{testJob1}, + scrapeConfigs: []*pb.ScrapeConfig{testJob1}, expected: map[string][]clientmodel.LabelSet{ "test_job1:static:0": { {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "example.org:80"}, @@ -193,7 +190,7 @@ func TestTargetManagerConfigUpdate(t *testing.T) { }, }, }, { - jobConfigs: []*pb.JobConfig{testJob1, testJob2}, + scrapeConfigs: []*pb.ScrapeConfig{testJob1, testJob2}, expected: map[string][]clientmodel.LabelSet{ "test_job1:static:0": { {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "example.org:80"}, @@ -208,10 +205,10 @@ func TestTargetManagerConfigUpdate(t *testing.T) { }, }, }, { - jobConfigs: []*pb.JobConfig{}, - expected: map[string][]clientmodel.LabelSet{}, + scrapeConfigs: []*pb.ScrapeConfig{}, + expected: map[string][]clientmodel.LabelSet{}, }, { - jobConfigs: []*pb.JobConfig{testJob2}, + scrapeConfigs: []*pb.ScrapeConfig{testJob2}, expected: map[string][]clientmodel.LabelSet{ "test_job2:static:0": { {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "example.org:8080"}, @@ -233,7 +230,7 @@ func TestTargetManagerConfigUpdate(t *testing.T) { for i, step := range sequence { cfg := pb.PrometheusConfig{ - Job: step.jobConfigs, + ScrapeConfig: step.scrapeConfigs, } err := targetManager.ApplyConfig(config.Config{cfg}) if err != nil { From 945c49a2dd869dd332e96f3b4b3a9ae20d9154ff Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Wed, 29 Apr 2015 00:08:58 +0200 Subject: [PATCH 04/27] Add relabelling to target management. This commit adds a relabelling stage on the set of base labels from which a target is created. It allows to drop targets and rewrite any regular or internal label. --- config/config.go | 26 +++++ config/config.proto | 27 ++++- config/generated/config.pb.go | 118 +++++++++++++++++++++- retrieval/discovery/dns.go | 2 + retrieval/relabel.go | 70 +++++++++++++ retrieval/relabel_test.go | 172 ++++++++++++++++++++++++++++++++ retrieval/target.go | 12 +-- retrieval/targetmanager.go | 26 +++-- retrieval/targetmanager_test.go | 55 ++++++++-- 9 files changed, 483 insertions(+), 25 deletions(-) create mode 100644 retrieval/relabel.go create mode 100644 retrieval/relabel_test.go diff --git a/config/config.go b/config/config.go index 844bd79e2..94f45fb0f 100644 --- a/config/config.go +++ b/config/config.go @@ -14,6 +14,7 @@ package config import ( + "errors" "fmt" "regexp" "strings" @@ -190,6 +191,11 @@ func (c *ScrapeConfig) Validate() error { return fmt.Errorf("invalid DNS config: %s", err) } } + for _, rlcfg := range c.RelabelConfigs() { + if err := rlcfg.Validate(); err != nil { + return fmt.Errorf("invalid relabelling config: %s", err) + } + } return nil } @@ -203,6 +209,15 @@ func (c *ScrapeConfig) DNSConfigs() []*DNSConfig { return dnscfgs } +// RelabelConfigs returns the relabel configs of the scrape config. +func (c *ScrapeConfig) RelabelConfigs() []*RelabelConfig { + var rlcfgs []*RelabelConfig + for _, rc := range c.GetRelabelConfig() { + rlcfgs = append(rlcfgs, &RelabelConfig{*rc}) + } + return rlcfgs +} + // DNSConfig encapsulates the protobuf configuration object for DNS based // service discovery. type DNSConfig struct { @@ -222,6 +237,17 @@ func (c *DNSConfig) RefreshInterval() time.Duration { return stringToDuration(c.GetRefreshInterval()) } +type RelabelConfig struct { + pb.RelabelConfig +} + +func (c *RelabelConfig) Validate() error { + if len(c.GetSourceLabel()) == 0 { + return errors.New("at least one source label is required") + } + return nil +} + // TargetGroup is derived from a protobuf TargetGroup and attaches a source to it // that identifies the origin of the group. type TargetGroup struct { diff --git a/config/config.proto b/config/config.proto index 3de910005..c0d534449 100644 --- a/config/config.proto +++ b/config/config.proto @@ -58,9 +58,32 @@ message DNSConfig { optional string refresh_interval = 2 [default = "30s"]; } +// The configuration for relabeling of target label sets. +message RelabelConfig { + // A list of labels from which values are taken and concatenated + // with the configured separator in order. + repeated string source_label = 1; + // Regex against which the concatenation is matched. + required string regex = 2; + // The label to which the resulting string is written in a replacement. + optional string target_label = 3; + // Replacement is the regex replacement pattern to be used. + optional string replacement = 4; + // Separator is the string between concatenated values from the source labels. + optional string separator = 5 [default = ";"]; + + // Action is the action to be performed for the relabeling. + enum Action { + REPLACE = 0; // Performs a regex replacement. + KEEP = 1; // Drops targets for which the input does not match the regex. + DROP = 2; // Drops targets for which the input does match the regex. + } + optional Action action = 6 [default = REPLACE]; +} + // The configuration for a Prometheus job to scrape. // -// The next field no. is 10. +// The next field no. is 11. message ScrapeConfig { // The job name. Must adhere to the regex "[a-zA-Z_][a-zA-Z0-9_-]*". required string job_name = 1; @@ -75,6 +98,8 @@ message ScrapeConfig { repeated DNSConfig dns_config = 9; // List of labeled target groups for this job. repeated TargetGroup target_group = 5; + // List of relabel configurations. + repeated RelabelConfig relabel_config = 10; // The HTTP resource path on which to fetch metrics from targets. optional string metrics_path = 6 [default = "/metrics"]; // The URL scheme with which to fetch metrics from targets. diff --git a/config/generated/config.pb.go b/config/generated/config.pb.go index c44247ab5..6898a9af8 100644 --- a/config/generated/config.pb.go +++ b/config/generated/config.pb.go @@ -14,6 +14,7 @@ It has these top-level messages: GlobalConfig TargetGroup DNSConfig + RelabelConfig ScrapeConfig PrometheusConfig */ @@ -26,6 +27,43 @@ import math "math" var _ = proto.Marshal var _ = math.Inf +// Action is the action to be performed for the relabeling. +type RelabelConfig_Action int32 + +const ( + RelabelConfig_REPLACE RelabelConfig_Action = 0 + RelabelConfig_KEEP RelabelConfig_Action = 1 + RelabelConfig_DROP RelabelConfig_Action = 2 +) + +var RelabelConfig_Action_name = map[int32]string{ + 0: "REPLACE", + 1: "KEEP", + 2: "DROP", +} +var RelabelConfig_Action_value = map[string]int32{ + "REPLACE": 0, + "KEEP": 1, + "DROP": 2, +} + +func (x RelabelConfig_Action) Enum() *RelabelConfig_Action { + p := new(RelabelConfig_Action) + *p = x + return p +} +func (x RelabelConfig_Action) String() string { + return proto.EnumName(RelabelConfig_Action_name, int32(x)) +} +func (x *RelabelConfig_Action) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RelabelConfig_Action_value, data, "RelabelConfig_Action") + if err != nil { + return err + } + *x = RelabelConfig_Action(value) + return nil +} + // A label/value pair suitable for attaching to timeseries. type LabelPair struct { // The name of the label. Must adhere to the regex "[a-zA-Z_][a-zA-Z0-9_]*". @@ -149,7 +187,7 @@ func (m *TargetGroup) GetLabels() *LabelPairs { // The configuration for DNS based service discovery. type DNSConfig struct { - // The list of DNS-SD service names pointing to SRV records + // The list of DNS-SD service names pointing to SRV records // containing endpoint information. Name []string `protobuf:"bytes,1,rep,name=name" json:"name,omitempty"` // Discovery refresh period when using DNS-SD to discover targets. Must be a @@ -178,9 +216,75 @@ func (m *DNSConfig) GetRefreshInterval() string { return Default_DNSConfig_RefreshInterval } +// The configuration for relabeling of target label sets. +type RelabelConfig struct { + // A list of labels from which values are taken and concatenated + // with the configured separator in order. + SourceLabel []string `protobuf:"bytes,1,rep,name=source_label" json:"source_label,omitempty"` + // Regex against which the concatenation is matched. + Regex *string `protobuf:"bytes,2,req,name=regex" json:"regex,omitempty"` + // The label to which the resulting string is written in a replacement. + TargetLabel *string `protobuf:"bytes,3,opt,name=target_label" json:"target_label,omitempty"` + // Replacement is the regex replacement pattern to be used. + Replacement *string `protobuf:"bytes,4,opt,name=replacement" json:"replacement,omitempty"` + // Separator is the string between concatenated values from the source labels. + Separator *string `protobuf:"bytes,5,opt,name=separator,def=;" json:"separator,omitempty"` + Action *RelabelConfig_Action `protobuf:"varint,6,opt,name=action,enum=io.prometheus.RelabelConfig_Action,def=0" json:"action,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RelabelConfig) Reset() { *m = RelabelConfig{} } +func (m *RelabelConfig) String() string { return proto.CompactTextString(m) } +func (*RelabelConfig) ProtoMessage() {} + +const Default_RelabelConfig_Separator string = ";" +const Default_RelabelConfig_Action RelabelConfig_Action = RelabelConfig_REPLACE + +func (m *RelabelConfig) GetSourceLabel() []string { + if m != nil { + return m.SourceLabel + } + return nil +} + +func (m *RelabelConfig) GetRegex() string { + if m != nil && m.Regex != nil { + return *m.Regex + } + return "" +} + +func (m *RelabelConfig) GetTargetLabel() string { + if m != nil && m.TargetLabel != nil { + return *m.TargetLabel + } + return "" +} + +func (m *RelabelConfig) GetReplacement() string { + if m != nil && m.Replacement != nil { + return *m.Replacement + } + return "" +} + +func (m *RelabelConfig) GetSeparator() string { + if m != nil && m.Separator != nil { + return *m.Separator + } + return Default_RelabelConfig_Separator +} + +func (m *RelabelConfig) GetAction() RelabelConfig_Action { + if m != nil && m.Action != nil { + return *m.Action + } + return Default_RelabelConfig_Action +} + // The configuration for a Prometheus job to scrape. // -// The next field no. is 10. +// The next field no. is 11. type ScrapeConfig struct { // The job name. Must adhere to the regex "[a-zA-Z_][a-zA-Z0-9_-]*". JobName *string `protobuf:"bytes,1,req,name=job_name" json:"job_name,omitempty"` @@ -195,6 +299,8 @@ type ScrapeConfig struct { DnsConfig []*DNSConfig `protobuf:"bytes,9,rep,name=dns_config" json:"dns_config,omitempty"` // List of labeled target groups for this job. TargetGroup []*TargetGroup `protobuf:"bytes,5,rep,name=target_group" json:"target_group,omitempty"` + // List of relabel configurations. + RelabelConfig []*RelabelConfig `protobuf:"bytes,10,rep,name=relabel_config" json:"relabel_config,omitempty"` // The HTTP resource path on which to fetch metrics from targets. MetricsPath *string `protobuf:"bytes,6,opt,name=metrics_path,def=/metrics" json:"metrics_path,omitempty"` // The URL scheme with which to fetch metrics from targets. @@ -245,6 +351,13 @@ func (m *ScrapeConfig) GetTargetGroup() []*TargetGroup { return nil } +func (m *ScrapeConfig) GetRelabelConfig() []*RelabelConfig { + if m != nil { + return m.RelabelConfig + } + return nil +} + func (m *ScrapeConfig) GetMetricsPath() string { if m != nil && m.MetricsPath != nil { return *m.MetricsPath @@ -289,4 +402,5 @@ func (m *PrometheusConfig) GetScrapeConfig() []*ScrapeConfig { } func init() { + proto.RegisterEnum("io.prometheus.RelabelConfig_Action", RelabelConfig_Action_name, RelabelConfig_Action_value) } diff --git a/retrieval/discovery/dns.go b/retrieval/discovery/dns.go index 70cf3a738..017ea476a 100644 --- a/retrieval/discovery/dns.go +++ b/retrieval/discovery/dns.go @@ -32,6 +32,7 @@ const ( resolvConf = "/etc/resolv.conf" dnsSourcePrefix = "dns" + DNSNameLabel = clientmodel.MetaLabelPrefix + "dns_srv_name" // Constants for instrumentation. namespace = "prometheus" @@ -148,6 +149,7 @@ func (dd *DNSDiscovery) refresh(name string, ch chan<- *config.TargetGroup) erro target := clientmodel.LabelValue(fmt.Sprintf("%s:%d", addr.Target, addr.Port)) tg.Targets = append(tg.Targets, clientmodel.LabelSet{ clientmodel.AddressLabel: target, + DNSNameLabel: clientmodel.LabelValue(name), }) } diff --git a/retrieval/relabel.go b/retrieval/relabel.go new file mode 100644 index 000000000..e0ddb232d --- /dev/null +++ b/retrieval/relabel.go @@ -0,0 +1,70 @@ +package retrieval + +import ( + "regexp" + "strings" + + clientmodel "github.com/prometheus/client_golang/model" + + "github.com/prometheus/prometheus/config" + pb "github.com/prometheus/prometheus/config/generated" +) + +// Relabel returns a relabeled copy of the given label set. The relabel configurations +// are applied in order of input. +// If a label set is dropped, nil is returned. +func Relabel(labels clientmodel.LabelSet, cfgs ...*config.RelabelConfig) (clientmodel.LabelSet, error) { + out := clientmodel.LabelSet{} + for ln, lv := range labels { + out[ln] = lv + } + var err error + for _, cfg := range cfgs { + if out, err = relabel(out, cfg); err != nil { + return nil, err + } + if out == nil { + return nil, nil + } + } + return out, nil +} + +func relabel(labels clientmodel.LabelSet, cfg *config.RelabelConfig) (clientmodel.LabelSet, error) { + pat, err := regexp.Compile(cfg.GetRegex()) + if err != nil { + return nil, err + } + + values := make([]string, 0, len(cfg.GetSourceLabel())) + for _, name := range cfg.GetSourceLabel() { + values = append(values, string(labels[clientmodel.LabelName(name)])) + } + val := strings.Join(values, cfg.GetSeparator()) + + switch cfg.GetAction() { + case pb.RelabelConfig_DROP: + if pat.MatchString(val) { + return nil, nil + } + case pb.RelabelConfig_KEEP: + if !pat.MatchString(val) { + return nil, nil + } + case pb.RelabelConfig_REPLACE: + // If there is no match no replacement must take place. + if !pat.MatchString(val) { + break + } + res := pat.ReplaceAllString(val, cfg.GetReplacement()) + ln := clientmodel.LabelName(cfg.GetTargetLabel()) + if res == "" { + delete(labels, ln) + } else { + labels[ln] = clientmodel.LabelValue(res) + } + default: + panic("retrieval.relabel: unknown relabel action type") + } + return labels, nil +} diff --git a/retrieval/relabel_test.go b/retrieval/relabel_test.go new file mode 100644 index 000000000..0bdfe4315 --- /dev/null +++ b/retrieval/relabel_test.go @@ -0,0 +1,172 @@ +package retrieval + +import ( + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + + clientmodel "github.com/prometheus/client_golang/model" + + "github.com/prometheus/prometheus/config" + pb "github.com/prometheus/prometheus/config/generated" +) + +func TestRelabel(t *testing.T) { + tests := []struct { + input clientmodel.LabelSet + relabel []pb.RelabelConfig + output clientmodel.LabelSet + }{ + { + input: clientmodel.LabelSet{ + "a": "foo", + "b": "bar", + "c": "baz", + }, + relabel: []pb.RelabelConfig{ + { + SourceLabel: []string{"a"}, + Regex: proto.String("f(.*)"), + TargetLabel: proto.String("d"), + Separator: proto.String(";"), + Replacement: proto.String("ch${1}-ch${1}"), + Action: pb.RelabelConfig_REPLACE.Enum(), + }, + }, + output: clientmodel.LabelSet{ + "a": "foo", + "b": "bar", + "c": "baz", + "d": "choo-choo", + }, + }, + { + input: clientmodel.LabelSet{ + "a": "foo", + "b": "bar", + "c": "baz", + }, + relabel: []pb.RelabelConfig{ + { + SourceLabel: []string{"a", "b"}, + Regex: proto.String("^f(.*);(.*)r$"), + TargetLabel: proto.String("a"), + Separator: proto.String(";"), + Replacement: proto.String("b${1}${2}m"), // boobam + }, + { + SourceLabel: []string{"c", "a"}, + Regex: proto.String("(b).*b(.*)ba(.*)"), + TargetLabel: proto.String("d"), + Separator: proto.String(";"), + Replacement: proto.String("$1$2$2$3"), + Action: pb.RelabelConfig_REPLACE.Enum(), + }, + }, + output: clientmodel.LabelSet{ + "a": "boobam", + "b": "bar", + "c": "baz", + "d": "boooom", + }, + }, + { + input: clientmodel.LabelSet{ + "a": "foo", + }, + relabel: []pb.RelabelConfig{ + { + SourceLabel: []string{"a"}, + Regex: proto.String("o$"), + Action: pb.RelabelConfig_DROP.Enum(), + }, { + SourceLabel: []string{"a"}, + Regex: proto.String("f(.*)"), + TargetLabel: proto.String("d"), + Separator: proto.String(";"), + Replacement: proto.String("ch$1-ch$1"), + Action: pb.RelabelConfig_REPLACE.Enum(), + }, + }, + output: nil, + }, + { + input: clientmodel.LabelSet{ + "a": "foo", + }, + relabel: []pb.RelabelConfig{ + { + SourceLabel: []string{"a"}, + Regex: proto.String("no-match"), + Action: pb.RelabelConfig_DROP.Enum(), + }, + }, + output: clientmodel.LabelSet{ + "a": "foo", + }, + }, + { + input: clientmodel.LabelSet{ + "a": "foo", + }, + relabel: []pb.RelabelConfig{ + { + SourceLabel: []string{"a"}, + Regex: proto.String("no-match"), + Action: pb.RelabelConfig_KEEP.Enum(), + }, + }, + output: nil, + }, + { + input: clientmodel.LabelSet{ + "a": "foo", + }, + relabel: []pb.RelabelConfig{ + { + SourceLabel: []string{"a"}, + Regex: proto.String("^f"), + Action: pb.RelabelConfig_KEEP.Enum(), + }, + }, + output: clientmodel.LabelSet{ + "a": "foo", + }, + }, + { + // No replacement must be applied if there is no match. + input: clientmodel.LabelSet{ + "a": "boo", + }, + relabel: []pb.RelabelConfig{ + { + SourceLabel: []string{"a"}, + Regex: proto.String("^f"), + Action: pb.RelabelConfig_REPLACE.Enum(), + TargetLabel: proto.String("b"), + Replacement: proto.String("bar"), + }, + }, + output: clientmodel.LabelSet{ + "a": "boo", + }, + }, + } + + for i, test := range tests { + var relabel []*config.RelabelConfig + for _, rl := range test.relabel { + proto.SetDefaults(&rl) + relabel = append(relabel, &config.RelabelConfig{rl}) + } + res, err := Relabel(test.input, relabel...) + if err != nil { + t.Errorf("Test %d: error relabeling: %s", i+1, err) + } + + if !reflect.DeepEqual(res, test.output) { + t.Errorf("Test %d: relabel output mismatch: expected %#v, got %#v", i+1, test.output, res) + } + } +} diff --git a/retrieval/target.go b/retrieval/target.go index d09dbeecb..cdd3acdca 100644 --- a/retrieval/target.go +++ b/retrieval/target.go @@ -172,10 +172,10 @@ type target struct { } // NewTarget creates a reasonably configured target for querying. -func NewTarget(address string, cfg *config.ScrapeConfig, baseLabels clientmodel.LabelSet) Target { +func NewTarget(cfg *config.ScrapeConfig, baseLabels clientmodel.LabelSet) Target { t := &target{ url: &url.URL{ - Host: address, + Host: string(baseLabels[clientmodel.AddressLabel]), }, scraperStopping: make(chan struct{}), scraperStopped: make(chan struct{}), @@ -197,16 +197,16 @@ func (t *target) Update(cfg *config.ScrapeConfig, baseLabels clientmodel.LabelSe t.deadline = cfg.ScrapeTimeout() t.httpClient = utility.NewDeadlineClient(cfg.ScrapeTimeout()) - t.baseLabels = clientmodel.LabelSet{ - clientmodel.InstanceLabel: clientmodel.LabelValue(t.InstanceIdentifier()), - } - + t.baseLabels = clientmodel.LabelSet{} // All remaining internal labels will not be part of the label set. for name, val := range baseLabels { if !strings.HasPrefix(string(name), clientmodel.ReservedLabelPrefix) { t.baseLabels[name] = val } } + if _, ok := t.baseLabels[clientmodel.InstanceLabel]; !ok { + t.baseLabels[clientmodel.InstanceLabel] = clientmodel.LabelValue(t.InstanceIdentifier()) + } } func (t *target) String() string { diff --git a/retrieval/targetmanager.go b/retrieval/targetmanager.go index 2427dc2cf..66d29bd23 100644 --- a/retrieval/targetmanager.go +++ b/retrieval/targetmanager.go @@ -81,19 +81,19 @@ func (tm *TargetManager) Run() { sources := map[string]struct{}{} for scfg, provs := range tm.providers { - for _, p := range provs { + for _, prov := range provs { ch := make(chan *config.TargetGroup) go tm.handleTargetUpdates(scfg, ch) - for _, src := range p.Sources() { + for _, src := range prov.Sources() { src = fullSource(scfg, src) sources[src] = struct{}{} } // Run the target provider after cleanup of the stale targets is done. - defer func(c chan *config.TargetGroup) { + defer func(p TargetProvider, c chan *config.TargetGroup) { go p.Run(c) - }(ch) + }(prov, ch) } } @@ -326,9 +326,17 @@ func (tm *TargetManager) targetsFromGroup(tg *config.TargetGroup, cfg *config.Sc } } - address, ok := labels[clientmodel.AddressLabel] - if !ok { - return nil, fmt.Errorf("Instance %d in target group %s has no address", i, tg) + if _, ok := labels[clientmodel.AddressLabel]; !ok { + return nil, fmt.Errorf("instance %d in target group %s has no address", i, tg) + } + + labels, err := Relabel(labels, cfg.RelabelConfigs()...) + if err != nil { + return nil, fmt.Errorf("error while relabelling instance %d in target group %s: %s", i, tg, err) + } + // Check if the target was dropped. + if labels == nil { + continue } for ln := range labels { @@ -338,8 +346,8 @@ func (tm *TargetManager) targetsFromGroup(tg *config.TargetGroup, cfg *config.Sc delete(labels, ln) } } - targets = append(targets, NewTarget(string(address), cfg, labels)) - + tr := NewTarget(cfg, labels) + targets = append(targets, tr) } return targets, nil diff --git a/retrieval/targetmanager_test.go b/retrieval/targetmanager_test.go index 1d8315ca2..6b539d1a7 100644 --- a/retrieval/targetmanager_test.go +++ b/retrieval/targetmanager_test.go @@ -164,10 +164,45 @@ func TestTargetManagerConfigUpdate(t *testing.T) { JobName: proto.String("test_job2"), ScrapeInterval: proto.String("1m"), TargetGroup: []*pb.TargetGroup{ - {Target: []string{"example.org:8080", "example.com:8081"}}, + { + Target: []string{"example.org:8080", "example.com:8081"}, + Labels: &pb.LabelPairs{Label: []*pb.LabelPair{ + {Name: proto.String("foo"), Value: proto.String("bar")}, + {Name: proto.String("boom"), Value: proto.String("box")}, + }}, + }, {Target: []string{"test.com:1234"}}, + { + Target: []string{"test.com:1235"}, + Labels: &pb.LabelPairs{Label: []*pb.LabelPair{ + {Name: proto.String("instance"), Value: proto.String("fixed")}, + }}, + }, + }, + RelabelConfig: []*pb.RelabelConfig{ + { + SourceLabel: []string{string(clientmodel.AddressLabel)}, + Regex: proto.String(`^test\.(.*?):(.*)`), + Replacement: proto.String("foo.${1}:${2}"), + TargetLabel: proto.String(string(clientmodel.AddressLabel)), + }, { + // Add a new label for example.* targets. + SourceLabel: []string{string(clientmodel.AddressLabel), "boom", "foo"}, + Regex: proto.String("^example.*?-b([a-z-]+)r$"), + TargetLabel: proto.String("new"), + Replacement: proto.String("$1"), + Separator: proto.String("-"), + }, { + // Drop an existing label. + SourceLabel: []string{"boom"}, + Regex: proto.String(".*"), + TargetLabel: proto.String("boom"), + Replacement: proto.String(""), + }, }, } + proto.SetDefaults(testJob1) + proto.SetDefaults(testJob2) sequence := []struct { scrapeConfigs []*pb.ScrapeConfig @@ -197,11 +232,14 @@ func TestTargetManagerConfigUpdate(t *testing.T) { {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "example.com:80"}, }, "test_job2:static:0": { - {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "example.org:8080"}, - {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "example.com:8081"}, + {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "example.org:8080", "foo": "bar", "new": "ox-ba"}, + {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "example.com:8081", "foo": "bar", "new": "ox-ba"}, }, "test_job2:static:1": { - {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "test.com:1234"}, + {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "foo.com:1234"}, + }, + "test_job2:static:2": { + {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "fixed"}, }, }, }, { @@ -211,11 +249,14 @@ func TestTargetManagerConfigUpdate(t *testing.T) { scrapeConfigs: []*pb.ScrapeConfig{testJob2}, expected: map[string][]clientmodel.LabelSet{ "test_job2:static:0": { - {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "example.org:8080"}, - {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "example.com:8081"}, + {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "example.org:8080", "foo": "bar", "new": "ox-ba"}, + {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "example.com:8081", "foo": "bar", "new": "ox-ba"}, }, "test_job2:static:1": { - {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "test.com:1234"}, + {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "foo.com:1234"}, + }, + "test_job2:static:2": { + {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "fixed"}, }, }, }, From b5a8f7b8fabe5a2b5213347c3006612fc52265b1 Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Thu, 30 Apr 2015 21:15:18 +0200 Subject: [PATCH 05/27] Cleanup, test, and document config. --- config/config.go | 10 ++- config/config_test.go | 5 +- config/fixtures/full.conf.input | 89 +++++++++++++++++++++++++++ config/fixtures/sd_targets.conf.input | 2 + config/load.go | 10 +-- config/load_test.go | 2 +- retrieval/targetmanager.go | 6 +- retrieval/targetmanager_test.go | 4 +- rules/manager/manager.go | 4 +- 9 files changed, 114 insertions(+), 18 deletions(-) create mode 100644 config/fixtures/full.conf.input diff --git a/config/config.go b/config/config.go index 94f45fb0f..e8eafa67a 100644 --- a/config/config.go +++ b/config/config.go @@ -29,8 +29,10 @@ import ( pb "github.com/prometheus/prometheus/config/generated" ) -var jobNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_-]*$") -var labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") +var ( + jobNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_-]*$") + labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") +) // Config encapsulates the configuration of a Prometheus instance. It wraps the // raw configuration protocol buffer to be able to add custom methods to it. @@ -232,15 +234,17 @@ func (c *DNSConfig) Validate() error { return nil } -// SDRefreshInterval gets the the SD refresh interval for the scrape config. +// RefreshInterval gets the the refresh interval for DNS service discovery. func (c *DNSConfig) RefreshInterval() time.Duration { return stringToDuration(c.GetRefreshInterval()) } +// RelabelConfig encapsulates the protobuf configuration object for relabeling. type RelabelConfig struct { pb.RelabelConfig } +// Validate checks the RelabelConfig for the validity of its fields. func (c *RelabelConfig) Validate() error { if len(c.GetSourceLabel()) == 0 { return errors.New("at least one source label is required") diff --git a/config/config_test.go b/config/config_test.go index 9955dc99f..80dfb3028 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -34,8 +34,9 @@ var configTests = []struct { inputFile: "empty.conf.input", }, { inputFile: "sd_targets.conf.input", - }, - { + }, { + inputFile: "full.conf.input", + }, { inputFile: "invalid_proto_format.conf.input", shouldFail: true, errContains: "unknown field name", diff --git a/config/fixtures/full.conf.input b/config/fixtures/full.conf.input new file mode 100644 index 000000000..5e5b34092 --- /dev/null +++ b/config/fixtures/full.conf.input @@ -0,0 +1,89 @@ +global < + scrape_interval: "30s" + evaluation_interval: "30s" + labels: < + label: < + name: "monitor" + value: "test" + > + label: < + name: "more" + value: "test" + > + > + rule_file: "prometheus.rules" + rule_file: "prometheus.more.rules" +> + +scrape_config: < + job_name: "prometheus" + scrape_interval: "15s" + metrics_path: "/metrics" + scheme: "http" + + target_group: < + target: "localhost:9090" + > +> + +scrape_config: < + job_name: "myjob" + scrape_interval: "15s" + metrics_path: "/metrics" + scheme: "http" + + dns_config: < + name: "first.srv.name" + name: "second.srv.name" + refresh_interval: "1h" + > + + dns_config: < + name: "first2.srv.name" + name: "second2.srv.name" + refresh_interval: "1m" + > + + relabel_config: < + source_label: "l1" + source_label: "l2" + regex: "^foobar.*$" + target_label: "l3" + replacement: "static" + > + relabel_config: < + source_label: "l4" + regex: "^foobar.*$" + action: DROP + > + relabel_config: < + source_label: "l4" + regex: "^foobar.*$" + action: KEEP + > + + target_group: < + target: "localhost:9090" + target: "localhost:9091" + labels: < + label: < + name: "tg1" + value: "tg1" + > + > + > + target_group: < + target: "my.domain:9090" + target: "my.domain:9091" + labels: < + label: < + name: "tg2" + value: "tg2" + > + label: < + name: "tg2_1" + value: "tg2_1" + > + > + > +> diff --git a/config/fixtures/sd_targets.conf.input b/config/fixtures/sd_targets.conf.input index 7b694db21..2f9acd68e 100644 --- a/config/fixtures/sd_targets.conf.input +++ b/config/fixtures/sd_targets.conf.input @@ -2,5 +2,7 @@ scrape_config: < job_name: "testjob" dns_config: < name: "sd_name" + name: "sd_name2" + refresh_interval: "15s" > > diff --git a/config/load.go b/config/load.go index 75b6b03f8..3ee7f8695 100644 --- a/config/load.go +++ b/config/load.go @@ -22,10 +22,10 @@ import ( ) // LoadFromString returns a config parsed from the provided string. -func LoadFromString(configStr string) (Config, error) { +func LoadFromString(configStr string) (*Config, error) { configProto := pb.PrometheusConfig{} if err := proto.UnmarshalText(configStr, &configProto); err != nil { - return Config{}, err + return nil, err } if configProto.Global == nil { configProto.Global = &pb.GlobalConfig{} @@ -36,17 +36,17 @@ func LoadFromString(configStr string) (Config, error) { } } - config := Config{configProto} + config := &Config{configProto} err := config.Validate() return config, err } // LoadFromFile returns a config parsed from the file of the provided name. -func LoadFromFile(fileName string) (Config, error) { +func LoadFromFile(fileName string) (*Config, error) { configStr, err := ioutil.ReadFile(fileName) if err != nil { - return Config{}, err + return nil, err } return LoadFromString(string(configStr)) diff --git a/config/load_test.go b/config/load_test.go index 0a086f300..5e1aa423f 100644 --- a/config/load_test.go +++ b/config/load_test.go @@ -20,6 +20,6 @@ import ( func TestLoadFromFile(t *testing.T) { _, err := LoadFromFile("file-does-not-exist.conf") if err == nil { - t.Error(err) + t.Error("Error expected on non-existing config file path but got none") } } diff --git a/retrieval/targetmanager.go b/retrieval/targetmanager.go index 66d29bd23..6e9f29f49 100644 --- a/retrieval/targetmanager.go +++ b/retrieval/targetmanager.go @@ -63,7 +63,7 @@ type TargetManager struct { } // NewTargetManager creates a new TargetManager based on the given config. -func NewTargetManager(cfg config.Config, sampleAppender storage.SampleAppender) (*TargetManager, error) { +func NewTargetManager(cfg *config.Config, sampleAppender storage.SampleAppender) (*TargetManager, error) { tm := &TargetManager{ sampleAppender: sampleAppender, targets: make(map[string][]Target), @@ -273,7 +273,7 @@ func (tm *TargetManager) Pools() map[string][]Target { // ApplyConfig resets the manager's target providers and job configurations as defined // by the new cfg. The state of targets that are valid in the new configuration remains unchanged. -func (tm *TargetManager) ApplyConfig(cfg config.Config) error { +func (tm *TargetManager) ApplyConfig(cfg *config.Config) error { tm.stop(false) // Even if updating the config failed, we want to continue rather than stop scraping anything. defer tm.Run() @@ -285,7 +285,7 @@ func (tm *TargetManager) ApplyConfig(cfg config.Config) error { return nil } -func (tm *TargetManager) applyConfig(cfg config.Config) error { +func (tm *TargetManager) applyConfig(cfg *config.Config) error { // Only apply changes if everything was successful. providers := map[*config.ScrapeConfig][]TargetProvider{} diff --git a/retrieval/targetmanager_test.go b/retrieval/targetmanager_test.go index 6b539d1a7..7aa090314 100644 --- a/retrieval/targetmanager_test.go +++ b/retrieval/targetmanager_test.go @@ -262,7 +262,7 @@ func TestTargetManagerConfigUpdate(t *testing.T) { }, } - targetManager, err := NewTargetManager(config.Config{}, nopAppender{}) + targetManager, err := NewTargetManager(&config.Config{}, nopAppender{}) if err != nil { t.Fatal(err) } @@ -273,7 +273,7 @@ func TestTargetManagerConfigUpdate(t *testing.T) { cfg := pb.PrometheusConfig{ ScrapeConfig: step.scrapeConfigs, } - err := targetManager.ApplyConfig(config.Config{cfg}) + err := targetManager.ApplyConfig(&config.Config{cfg}) if err != nil { t.Fatal(err) } diff --git a/rules/manager/manager.go b/rules/manager/manager.go index 6865e7df5..1e7e070f4 100644 --- a/rules/manager/manager.go +++ b/rules/manager/manager.go @@ -74,7 +74,7 @@ func init() { // NewRuleManager. type RuleManager interface { // Load and add rules from rule files specified in the configuration. - AddRulesFromConfig(config config.Config) error + AddRulesFromConfig(config *config.Config) error // Start the rule manager's periodic rule evaluation. Run() // Stop the rule manager's rule evaluation cycles. @@ -267,7 +267,7 @@ func (m *ruleManager) runIteration() { wg.Wait() } -func (m *ruleManager) AddRulesFromConfig(config config.Config) error { +func (m *ruleManager) AddRulesFromConfig(config *config.Config) error { for _, ruleFile := range config.Global.RuleFile { newRules, err := rules.LoadRulesFromFile(ruleFile) if err != nil { From 8f75ff0513d4a68948a98e4b5930a189bd1fd7ae Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Tue, 5 May 2015 14:04:41 +0200 Subject: [PATCH 06/27] Add warning about config changes. --- main.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/main.go b/main.go index 83cceb2ec..083d5b5af 100644 --- a/main.go +++ b/main.go @@ -92,7 +92,8 @@ type prometheus struct { func NewPrometheus() *prometheus { conf, err := config.LoadFromFile(*configFile) if err != nil { - glog.Errorf("Couldn't load configuration (-config.file=%s): %v\n", *configFile, err) + glog.Errorf("Couldn't load configuration (-config.file=%s): %v", *configFile, err) + glog.Errorf("Note: The configuration format has changed with version 0.14, please check the documentation.") os.Exit(2) } From 66ecc420ef3301042a3245a630f3f6dc137727a6 Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Thu, 7 May 2015 11:06:27 +0200 Subject: [PATCH 07/27] Add gopkg.in/yaml.v2 godep. --- Godeps/Godeps.json | 4 + .../_workspace/src/gopkg.in/yaml.v2/LICENSE | 188 ++ .../src/gopkg.in/yaml.v2/LICENSE.libyaml | 31 + .../_workspace/src/gopkg.in/yaml.v2/README.md | 128 + .../_workspace/src/gopkg.in/yaml.v2/apic.go | 742 +++++ .../_workspace/src/gopkg.in/yaml.v2/decode.go | 683 +++++ .../src/gopkg.in/yaml.v2/decode_test.go | 966 ++++++ .../src/gopkg.in/yaml.v2/emitterc.go | 1685 ++++++++++ .../_workspace/src/gopkg.in/yaml.v2/encode.go | 306 ++ .../src/gopkg.in/yaml.v2/encode_test.go | 485 +++ .../src/gopkg.in/yaml.v2/parserc.go | 1096 +++++++ .../src/gopkg.in/yaml.v2/readerc.go | 391 +++ .../src/gopkg.in/yaml.v2/resolve.go | 203 ++ .../src/gopkg.in/yaml.v2/scannerc.go | 2710 +++++++++++++++++ .../_workspace/src/gopkg.in/yaml.v2/sorter.go | 104 + .../src/gopkg.in/yaml.v2/suite_test.go | 12 + .../src/gopkg.in/yaml.v2/writerc.go | 89 + .../_workspace/src/gopkg.in/yaml.v2/yaml.go | 344 +++ .../_workspace/src/gopkg.in/yaml.v2/yamlh.go | 716 +++++ .../src/gopkg.in/yaml.v2/yamlprivateh.go | 173 ++ 20 files changed, 11056 insertions(+) create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/README.md create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 1f3b26468..e32bd4590 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -63,6 +63,10 @@ { "ImportPath": "github.com/syndtr/gosnappy/snappy", "Rev": "156a073208e131d7d2e212cb749feae7c339e846" + }, + { + "ImportPath": "gopkg.in/yaml.v2", + "Rev": "49c95bdc21843256fb6c4e0d370a05f24a0bf213" } ] } diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE b/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 000000000..a68e67f01 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,188 @@ + +Copyright (c) 2011-2014 - Canonical Inc. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml b/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 000000000..8da58fbf6 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md b/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md new file mode 100644 index 000000000..d6c919e60 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md @@ -0,0 +1,128 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +type T struct { + A string + B struct{C int; D []int ",flow"} +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go new file mode 100644 index 000000000..95ec014e8 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,742 @@ +package yaml + +import ( + "io" + "os" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// File read handler. +func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_file.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_file_read_handler + parser.input_file = file +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + return true +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// File write handler. +func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_file.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_file_write_handler + emitter.output_file = file +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } + return true +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } + return true +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } + return true +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } + return true +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } + return true +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go new file mode 100644 index 000000000..085cddc44 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,683 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + + if len(b) == 0 { + b = []byte{'\n'} + } + + yaml_parser_set_input_string(&p.parser, b) + + p.skip() + if p.event.typ != yaml_STREAM_START_EVENT { + panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return &p +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +func (p *parser) skip() { + if p.event.typ != yaml_NO_EVENT { + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + yaml_event_delete(&p.event) + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + switch p.event.typ { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + } + panic("unreachable") +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.skip() + n.children = append(n.children, p.parse()) + if p.event.typ != yaml_DOCUMENT_END_EVENT { + panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + p.skip() + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.skip() + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.skip() + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.skip() + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[string]bool + mapType reflect.Type + terrors []string +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() +) + +func newDecoder() *decoder { + d := &decoder{mapType: defaultMapType} + d.aliases = make(map[string]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + an, ok := d.doc.anchors[n.value] + if !ok { + failf("unknown anchor '%s' referenced", n.value) + } + if d.aliases[n.value] { + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n.value] = true + good = d.unmarshal(an, out) + delete(d.aliases, n.value) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if s, ok := resolved.(string); ok && out.CanAddr() { + if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { + err := u.UnmarshalText([]byte(s)) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + good = true + } else if resolved != nil { + out.SetString(n.value) + good = true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else { + out.Set(reflect.ValueOf(resolved)) + } + good = true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + good = true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + good = true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + good = true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + good = true + case int64: + out.SetFloat(float64(resolved)) + good = true + case uint64: + out.SetFloat(float64(resolved)) + good = true + case float64: + out.SetFloat(resolved) + good = true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + good = true + } + } + if !good { + d.terror(n, tag, out) + } + return good +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + out.Set(out.Slice(0, j)) + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + inlineMap.SetMapIndex(name, value) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go new file mode 100644 index 000000000..04fdd9e72 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go @@ -0,0 +1,966 @@ +package yaml_test + +import ( + "errors" + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "math" + "net" + "reflect" + "strings" + "time" +) + +var unmarshalIntTest = 123 + +var unmarshalTests = []struct { + data string + value interface{} +}{ + { + "", + &struct{}{}, + }, { + "{}", &struct{}{}, + }, { + "v: hi", + map[string]string{"v": "hi"}, + }, { + "v: hi", map[string]interface{}{"v": "hi"}, + }, { + "v: true", + map[string]string{"v": "true"}, + }, { + "v: true", + map[string]interface{}{"v": true}, + }, { + "v: 10", + map[string]interface{}{"v": 10}, + }, { + "v: 0b10", + map[string]interface{}{"v": 2}, + }, { + "v: 0xA", + map[string]interface{}{"v": 10}, + }, { + "v: 4294967296", + map[string]int64{"v": 4294967296}, + }, { + "v: 0.1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .Inf", + map[string]interface{}{"v": math.Inf(+1)}, + }, { + "v: -.Inf", + map[string]interface{}{"v": math.Inf(-1)}, + }, { + "v: -10", + map[string]interface{}{"v": -10}, + }, { + "v: -.1", + map[string]interface{}{"v": -0.1}, + }, + + // Simple values. + { + "123", + &unmarshalIntTest, + }, + + // Floats from spec + { + "canonical: 6.8523e+5", + map[string]interface{}{"canonical": 6.8523e+5}, + }, { + "expo: 685.230_15e+03", + map[string]interface{}{"expo": 685.23015e+03}, + }, { + "fixed: 685_230.15", + map[string]interface{}{"fixed": 685230.15}, + }, { + "neginf: -.inf", + map[string]interface{}{"neginf": math.Inf(-1)}, + }, { + "fixed: 685_230.15", + map[string]float64{"fixed": 685230.15}, + }, + //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported + //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails. + + // Bools from spec + { + "canonical: y", + map[string]interface{}{"canonical": true}, + }, { + "answer: NO", + map[string]interface{}{"answer": false}, + }, { + "logical: True", + map[string]interface{}{"logical": true}, + }, { + "option: on", + map[string]interface{}{"option": true}, + }, { + "option: on", + map[string]bool{"option": true}, + }, + // Ints from spec + { + "canonical: 685230", + map[string]interface{}{"canonical": 685230}, + }, { + "decimal: +685_230", + map[string]interface{}{"decimal": 685230}, + }, { + "octal: 02472256", + map[string]interface{}{"octal": 685230}, + }, { + "hexa: 0x_0A_74_AE", + map[string]interface{}{"hexa": 685230}, + }, { + "bin: 0b1010_0111_0100_1010_1110", + map[string]interface{}{"bin": 685230}, + }, { + "bin: -0b101010", + map[string]interface{}{"bin": -42}, + }, { + "decimal: +685_230", + map[string]int{"decimal": 685230}, + }, + + //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported + + // Nulls from spec + { + "empty:", + map[string]interface{}{"empty": nil}, + }, { + "canonical: ~", + map[string]interface{}{"canonical": nil}, + }, { + "english: null", + map[string]interface{}{"english": nil}, + }, { + "~: null key", + map[interface{}]string{nil: "null key"}, + }, { + "empty:", + map[string]*bool{"empty": nil}, + }, + + // Flow sequence + { + "seq: [A,B]", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq: [A,B,C,]", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]int{"seq": []int{1}}, + }, { + "seq: [A,1,C]", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + // Block sequence + { + "seq:\n - A\n - B", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq:\n - A\n - B\n - C", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]int{"seq": []int{1}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + + // Literal block scalar + { + "scalar: | # Comment\n\n literal\n\n \ttext\n\n", + map[string]string{"scalar": "\nliteral\n\n\ttext\n"}, + }, + + // Folded block scalar + { + "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n", + map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"}, + }, + + // Map inside interface with no type hints. + { + "a: {b: c}", + map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + }, + + // Structs and type conversions. + { + "hello: world", + &struct{ Hello string }{"world"}, + }, { + "a: {b: c}", + &struct{ A struct{ B string } }{struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A *struct{ B string } }{&struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A map[string]string }{map[string]string{"b": "c"}}, + }, { + "a: {b: c}", + &struct{ A *map[string]string }{&map[string]string{"b": "c"}}, + }, { + "a:", + &struct{ A map[string]string }{}, + }, { + "a: 1", + &struct{ A int }{1}, + }, { + "a: 1", + &struct{ A float64 }{1}, + }, { + "a: 1.0", + &struct{ A int }{1}, + }, { + "a: 1.0", + &struct{ A uint }{1}, + }, { + "a: [1, 2]", + &struct{ A []int }{[]int{1, 2}}, + }, { + "a: 1", + &struct{ B int }{0}, + }, { + "a: 1", + &struct { + B int "a" + }{1}, + }, { + "a: y", + &struct{ A bool }{true}, + }, + + // Some cross type conversions + { + "v: 42", + map[string]uint{"v": 42}, + }, { + "v: -42", + map[string]uint{}, + }, { + "v: 4294967296", + map[string]uint64{"v": 4294967296}, + }, { + "v: -4294967296", + map[string]uint64{}, + }, + + // int + { + "int_max: 2147483647", + map[string]int{"int_max": math.MaxInt32}, + }, + { + "int_min: -2147483648", + map[string]int{"int_min": math.MinInt32}, + }, + { + "int_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int{}, + }, + + // int64 + { + "int64_max: 9223372036854775807", + map[string]int64{"int64_max": math.MaxInt64}, + }, + { + "int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_max_base2": math.MaxInt64}, + }, + { + "int64_min: -9223372036854775808", + map[string]int64{"int64_min": math.MinInt64}, + }, + { + "int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_neg_base2": -math.MaxInt64}, + }, + { + "int64_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int64{}, + }, + + // uint + { + "uint_min: 0", + map[string]uint{"uint_min": 0}, + }, + { + "uint_max: 4294967295", + map[string]uint{"uint_max": math.MaxUint32}, + }, + { + "uint_underflow: -1", + map[string]uint{}, + }, + + // uint64 + { + "uint64_min: 0", + map[string]uint{"uint64_min": 0}, + }, + { + "uint64_max: 18446744073709551615", + map[string]uint64{"uint64_max": math.MaxUint64}, + }, + { + "uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111", + map[string]uint64{"uint64_max_base2": math.MaxUint64}, + }, + { + "uint64_maxint64: 9223372036854775807", + map[string]uint64{"uint64_maxint64": math.MaxInt64}, + }, + { + "uint64_underflow: -1", + map[string]uint64{}, + }, + + // float32 + { + "float32_max: 3.40282346638528859811704183484516925440e+38", + map[string]float32{"float32_max": math.MaxFloat32}, + }, + { + "float32_nonzero: 1.401298464324817070923729583289916131280e-45", + map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32}, + }, + { + "float32_maxuint64: 18446744073709551615", + map[string]float32{"float32_maxuint64": float32(math.MaxUint64)}, + }, + { + "float32_maxuint64+1: 18446744073709551616", + map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)}, + }, + + // float64 + { + "float64_max: 1.797693134862315708145274237317043567981e+308", + map[string]float64{"float64_max": math.MaxFloat64}, + }, + { + "float64_nonzero: 4.940656458412465441765687928682213723651e-324", + map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64}, + }, + { + "float64_maxuint64: 18446744073709551615", + map[string]float64{"float64_maxuint64": float64(math.MaxUint64)}, + }, + { + "float64_maxuint64+1: 18446744073709551616", + map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)}, + }, + + // Overflow cases. + { + "v: 4294967297", + map[string]int32{}, + }, { + "v: 128", + map[string]int8{}, + }, + + // Quoted values. + { + "'1': '\"2\"'", + map[interface{}]interface{}{"1": "\"2\""}, + }, { + "v:\n- A\n- 'B\n\n C'\n", + map[string][]string{"v": []string{"A", "B\nC"}}, + }, + + // Explicit tags. + { + "v: !!float '1.1'", + map[string]interface{}{"v": 1.1}, + }, { + "v: !!null ''", + map[string]interface{}{"v": nil}, + }, { + "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'", + map[string]interface{}{"v": 1}, + }, + + // Anchors and aliases. + { + "a: &x 1\nb: &y 2\nc: *x\nd: *y\n", + &struct{ A, B, C, D int }{1, 2, 1, 2}, + }, { + "a: &a {c: 1}\nb: *a", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, { + "a: &a [1, 2]\nb: *a", + &struct{ B []int }{[]int{1, 2}}, + }, { + "b: *a\na: &a {c: 1}", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, + + // Bug #1133337 + { + "foo: ''", + map[string]*string{"foo": new(string)}, + }, { + "foo: null", + map[string]string{"foo": ""}, + }, { + "foo: null", + map[string]interface{}{"foo": nil}, + }, + + // Ignored field + { + "a: 1\nb: 2\n", + &struct { + A int + B int "-" + }{1, 0}, + }, + + // Bug #1191981 + { + "" + + "%YAML 1.1\n" + + "--- !!str\n" + + `"Generic line break (no glyph)\n\` + "\n" + + ` Generic line break (glyphed)\n\` + "\n" + + ` Line separator\u2028\` + "\n" + + ` Paragraph separator\u2029"` + "\n", + "" + + "Generic line break (no glyph)\n" + + "Generic line break (glyphed)\n" + + "Line separator\u2028Paragraph separator\u2029", + }, + + // Struct inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + }, + + // Map inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + }, + + // bug 1243827 + { + "a: -b_c", + map[string]interface{}{"a": "-b_c"}, + }, + { + "a: +b_c", + map[string]interface{}{"a": "+b_c"}, + }, + { + "a: 50cent_of_dollar", + map[string]interface{}{"a": "50cent_of_dollar"}, + }, + + // Duration + { + "a: 3s", + map[string]time.Duration{"a": 3 * time.Second}, + }, + + // Issue #24. + { + "a: ", + map[string]string{"a": ""}, + }, + + // Base 60 floats are obsolete and unsupported. + { + "a: 1:1\n", + map[string]string{"a": "1:1"}, + }, + + // Binary data. + { + "a: !!binary gIGC\n", + map[string]string{"a": "\x80\x81\x82"}, + }, { + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + map[string]string{"a": strings.Repeat("\x90", 54)}, + }, { + "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n", + map[string]string{"a": strings.Repeat("\x00", 52)}, + }, + + // Ordered maps. + { + "{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}", + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + }, + + // Issue #39. + { + "a:\n b:\n c: d\n", + map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}}, + }, + + // Custom map type. + { + "a: {b: c}", + M{"a": M{"b": "c"}}, + }, + + // Support encoding.TextUnmarshaler. + { + "a: 1.2.3.4\n", + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + }, + { + "a: 2015-02-24T18:19:39Z\n", + map[string]time.Time{"a": time.Unix(1424801979, 0)}, + }, + + // Encode empty lists as zero-length slices. + { + "a: []", + &struct{ A []int }{[]int{}}, + }, +} + +type M map[interface{}]interface{} + +type inlineB struct { + B int + inlineC `yaml:",inline"` +} + +type inlineC struct { + C int +} + +func (s *S) TestUnmarshal(c *C) { + for _, item := range unmarshalTests { + t := reflect.ValueOf(item.value).Type() + var value interface{} + switch t.Kind() { + case reflect.Map: + value = reflect.MakeMap(t).Interface() + case reflect.String: + value = reflect.New(t).Interface() + case reflect.Ptr: + value = reflect.New(t.Elem()).Interface() + default: + c.Fatalf("missing case for %s", t) + } + err := yaml.Unmarshal([]byte(item.data), value) + if _, ok := err.(*yaml.TypeError); !ok { + c.Assert(err, IsNil) + } + if t.Kind() == reflect.String { + c.Assert(*value.(*string), Equals, item.value) + } else { + c.Assert(value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalNaN(c *C) { + value := map[string]interface{}{} + err := yaml.Unmarshal([]byte("notanum: .NaN"), &value) + c.Assert(err, IsNil) + c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true) +} + +var unmarshalErrorTests = []struct { + data, error string +}{ + {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"}, + {"v: [A,", "yaml: line 1: did not find expected node content"}, + {"v:\n- [A,", "yaml: line 2: did not find expected node content"}, + {"a: *b\n", "yaml: unknown anchor 'b' referenced"}, + {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"}, + {"value: -", "yaml: block sequence entries are not allowed in this context"}, + {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"}, + {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`}, + {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`}, +} + +func (s *S) TestUnmarshalErrors(c *C) { + for _, item := range unmarshalErrorTests { + var value interface{} + err := yaml.Unmarshal([]byte(item.data), &value) + c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) + } +} + +var unmarshalerTests = []struct { + data, tag string + value interface{} +}{ + {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}}, + {"_: [1,A]", "!!seq", []interface{}{1, "A"}}, + {"_: 10", "!!int", 10}, + {"_: null", "!!null", nil}, + {`_: BAR!`, "!!str", "BAR!"}, + {`_: "BAR!"`, "!!str", "BAR!"}, + {"_: !!foo 'BAR!'", "!!foo", "BAR!"}, +} + +var unmarshalerResult = map[int]error{} + +type unmarshalerType struct { + value interface{} +} + +func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error { + if err := unmarshal(&o.value); err != nil { + return err + } + if i, ok := o.value.(int); ok { + if result, ok := unmarshalerResult[i]; ok { + return result + } + } + return nil +} + +type unmarshalerPointer struct { + Field *unmarshalerType "_" +} + +type unmarshalerValue struct { + Field unmarshalerType "_" +} + +func (s *S) TestUnmarshalerPointerField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerPointer{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + if item.value == nil { + c.Assert(obj.Field, IsNil) + } else { + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalerValueField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerValue{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } +} + +func (s *S) TestUnmarshalerWholeDocument(c *C) { + obj := &unmarshalerType{} + err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj) + c.Assert(err, IsNil) + value, ok := obj.value.(map[interface{}]interface{}) + c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value)) + c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value) +} + +func (s *S) TestUnmarshalerTypeError(c *C) { + unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}} + unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}} + defer func() { + delete(unmarshalerResult, 2) + delete(unmarshalerResult, 4) + }() + + type T struct { + Before int + After int + M map[string]*unmarshalerType + } + var v T + data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " foo\n"+ + " bar\n"+ + " line 1: cannot unmarshal !!str `B` into int") + c.Assert(v.M["abc"], NotNil) + c.Assert(v.M["def"], IsNil) + c.Assert(v.M["ghi"], NotNil) + c.Assert(v.M["jkl"], IsNil) + + c.Assert(v.M["abc"].value, Equals, 1) + c.Assert(v.M["ghi"].value, Equals, 3) +} + +type proxyTypeError struct{} + +func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + var a int32 + var b int64 + if err := unmarshal(&s); err != nil { + panic(err) + } + if s == "a" { + if err := unmarshal(&b); err == nil { + panic("should have failed") + } + return unmarshal(&a) + } + if err := unmarshal(&a); err == nil { + panic("should have failed") + } + return unmarshal(&b) +} + +func (s *S) TestUnmarshalerTypeErrorProxying(c *C) { + type T struct { + Before int + After int + M map[string]*proxyTypeError + } + var v T + data := `{before: A, m: {abc: a, def: b}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " line 1: cannot unmarshal !!str `a` into int32\n"+ + " line 1: cannot unmarshal !!str `b` into int64\n"+ + " line 1: cannot unmarshal !!str `B` into int") +} + +type failingUnmarshaler struct{} + +var failingErr = errors.New("failingErr") + +func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + return failingErr +} + +func (s *S) TestUnmarshalerError(c *C) { + err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{}) + c.Assert(err, Equals, failingErr) +} + +type sliceUnmarshaler []int + +func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + var slice []int + err := unmarshal(&slice) + if err == nil { + *su = slice + return nil + } + + var intVal int + err = unmarshal(&intVal) + if err == nil { + *su = []int{intVal} + return nil + } + + return err +} + +func (s *S) TestUnmarshalerRetry(c *C) { + var su sliceUnmarshaler + err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3})) + + err = yaml.Unmarshal([]byte("1"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1})) +} + +// From http://yaml.org/type/merge.html +var mergeTests = ` +anchors: + list: + - &CENTER { "x": 1, "y": 2 } + - &LEFT { "x": 0, "y": 2 } + - &BIG { "r": 10 } + - &SMALL { "r": 1 } + +# All the following maps are equal: + +plain: + # Explicit keys + "x": 1 + "y": 2 + "r": 10 + label: center/big + +mergeOne: + # Merge one map + << : *CENTER + "r": 10 + label: center/big + +mergeMultiple: + # Merge multiple maps + << : [ *CENTER, *BIG ] + label: center/big + +override: + # Override + << : [ *BIG, *LEFT, *SMALL ] + "x": 1 + label: center/big + +shortTag: + # Explicit short merge tag + !!merge "<<" : [ *CENTER, *BIG ] + label: center/big + +longTag: + # Explicit merge long tag + ! "<<" : [ *CENTER, *BIG ] + label: center/big + +inlineMap: + # Inlined map + << : {"x": 1, "y": 2, "r": 10} + label: center/big + +inlineSequenceMap: + # Inlined map in sequence + << : [ *CENTER, {"r": 10} ] + label: center/big +` + +func (s *S) TestMerge(c *C) { + var want = map[interface{}]interface{}{ + "x": 1, + "y": 2, + "r": 10, + "label": "center/big", + } + + var m map[interface{}]interface{} + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, DeepEquals, want, Commentf("test %q failed", name)) + } +} + +func (s *S) TestMergeStruct(c *C) { + type Data struct { + X, Y, R int + Label string + } + want := Data{1, 2, 10, "center/big"} + + var m map[string]Data + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, Equals, want, Commentf("test %q failed", name)) + } +} + +var unmarshalNullTests = []func() interface{}{ + func() interface{} { var v interface{}; v = "v"; return &v }, + func() interface{} { var s = "s"; return &s }, + func() interface{} { var s = "s"; sptr := &s; return &sptr }, + func() interface{} { var i = 1; return &i }, + func() interface{} { var i = 1; iptr := &i; return &iptr }, + func() interface{} { m := map[string]int{"s": 1}; return &m }, + func() interface{} { m := map[string]int{"s": 1}; return m }, +} + +func (s *S) TestUnmarshalNull(c *C) { + for _, test := range unmarshalNullTests { + item := test() + zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface() + err := yaml.Unmarshal([]byte("null"), item) + c.Assert(err, IsNil) + if reflect.TypeOf(item).Kind() == reflect.Map { + c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface()) + } else { + c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero) + } + } +} + +func (s *S) TestUnmarshalSliceOnPreset(c *C) { + // Issue #48. + v := struct{ A []int }{[]int{1}} + yaml.Unmarshal([]byte("a: [2]"), &v) + c.Assert(v.A, DeepEquals, []int{2}) +} + +//var data []byte +//func init() { +// var err error +// data, err = ioutil.ReadFile("/tmp/file.yaml") +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkUnmarshal(c *C) { +// var err error +// for i := 0; i < c.N; i++ { +// var v map[string]interface{} +// err = yaml.Unmarshal(data, &v) +// } +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkMarshal(c *C) { +// var v map[string]interface{} +// yaml.Unmarshal(data, &v) +// c.ResetTimer() +// for i := 0; i < c.N; i++ { +// yaml.Marshal(&v) +// } +//} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 000000000..9b3dc4a43 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + } + return false +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an achor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceeded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceeded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[0]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceeded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceeded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go new file mode 100644 index 000000000..84f849955 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,306 @@ +package yaml + +import ( + "encoding" + "fmt" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool +} + +func newEncoder() (e *encoder) { + e = &encoder{} + e.must(yaml_emitter_initialize(&e.emitter)) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) + e.emit() + e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) + e.emit() + return e +} + +func (e *encoder) finish() { + e.must(yaml_document_end_event_initialize(&e.event, true)) + e.emit() + e.emitter.open_ended = false + e.must(yaml_stream_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { + e.must(false) + } +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() { + e.nilv() + return + } + iface := in.Interface() + if m, ok := iface.(Marshaler); ok { + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + } else if m, ok := iface.(encoding.TextMarshaler); ok { + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + } + switch in.Kind() { + case reflect.Interface: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + f() + e.must(yaml_mapping_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + rtag, rs := resolve("", s) + if rtag == yaml_BINARY_TAG { + if tag == "" || tag == yaml_STR_TAG { + tag = rtag + s = rs.(string) + } else if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } else { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + } + if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else if strings.Contains(s, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } else { + style = yaml_PLAIN_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // FIXME: Handle 64 bits here. + s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go new file mode 100644 index 000000000..ba68ad291 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go @@ -0,0 +1,485 @@ +package yaml_test + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" + + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "net" + "os" +) + +var marshalIntTest = 123 + +var marshalTests = []struct { + value interface{} + data string +}{ + { + nil, + "null\n", + }, { + &struct{}{}, + "{}\n", + }, { + map[string]string{"v": "hi"}, + "v: hi\n", + }, { + map[string]interface{}{"v": "hi"}, + "v: hi\n", + }, { + map[string]string{"v": "true"}, + "v: \"true\"\n", + }, { + map[string]string{"v": "false"}, + "v: \"false\"\n", + }, { + map[string]interface{}{"v": true}, + "v: true\n", + }, { + map[string]interface{}{"v": false}, + "v: false\n", + }, { + map[string]interface{}{"v": 10}, + "v: 10\n", + }, { + map[string]interface{}{"v": -10}, + "v: -10\n", + }, { + map[string]uint{"v": 42}, + "v: 42\n", + }, { + map[string]interface{}{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]int64{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]uint64{"v": 4294967296}, + "v: 4294967296\n", + }, { + map[string]interface{}{"v": "10"}, + "v: \"10\"\n", + }, { + map[string]interface{}{"v": 0.1}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": float64(0.1)}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": -0.1}, + "v: -0.1\n", + }, { + map[string]interface{}{"v": math.Inf(+1)}, + "v: .inf\n", + }, { + map[string]interface{}{"v": math.Inf(-1)}, + "v: -.inf\n", + }, { + map[string]interface{}{"v": math.NaN()}, + "v: .nan\n", + }, { + map[string]interface{}{"v": nil}, + "v: null\n", + }, { + map[string]interface{}{"v": ""}, + "v: \"\"\n", + }, { + map[string][]string{"v": []string{"A", "B"}}, + "v:\n- A\n- B\n", + }, { + map[string][]string{"v": []string{"A", "B\nC"}}, + "v:\n- A\n- |-\n B\n C\n", + }, { + map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}}, + "v:\n- A\n- 1\n- B:\n - 2\n - 3\n", + }, { + map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + "a:\n b: c\n", + }, { + map[string]interface{}{"a": "-"}, + "a: '-'\n", + }, + + // Simple values. + { + &marshalIntTest, + "123\n", + }, + + // Structures + { + &struct{ Hello string }{"world"}, + "hello: world\n", + }, { + &struct { + A struct { + B string + } + }{struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{&struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{}, + "a: null\n", + }, { + &struct{ A int }{1}, + "a: 1\n", + }, { + &struct{ A []int }{[]int{1, 2}}, + "a:\n- 1\n- 2\n", + }, { + &struct { + B int "a" + }{1}, + "a: 1\n", + }, { + &struct{ A bool }{true}, + "a: true\n", + }, + + // Conditional flag + { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{1, 0}, + "a: 1\n", + }, { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{0, 0}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{nil}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{}}, + "a: {x: 0}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{0, 1}}, + "{}\n", + }, + + // Flow flag + { + &struct { + A []int "a,flow" + }{[]int{1, 2}}, + "a: [1, 2]\n", + }, { + &struct { + A map[string]string "a,flow" + }{map[string]string{"b": "c", "d": "e"}}, + "a: {b: c, d: e}\n", + }, { + &struct { + A struct { + B, D string + } "a,flow" + }{struct{ B, D string }{"c", "e"}}, + "a: {b: c, d: e}\n", + }, + + // Unexported field + { + &struct { + u int + A int + }{0, 1}, + "a: 1\n", + }, + + // Ignored field + { + &struct { + A int + B int "-" + }{1, 2}, + "a: 1\n", + }, + + // Struct inlining + { + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Map inlining + { + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Duration + { + map[string]time.Duration{"a": 3 * time.Second}, + "a: 3s\n", + }, + + // Issue #24: bug in map merging logic. + { + map[string]string{"a": ""}, + "a: \n", + }, + + // Issue #34: marshal unsupported base 60 floats quoted for compatibility + // with old YAML 1.1 parsers. + { + map[string]string{"a": "1:1"}, + "a: \"1:1\"\n", + }, + + // Binary data. + { + map[string]string{"a": "\x00"}, + "a: \"\\0\"\n", + }, { + map[string]string{"a": "\x80\x81\x82"}, + "a: !!binary gIGC\n", + }, { + map[string]string{"a": strings.Repeat("\x90", 54)}, + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + }, + + // Ordered maps. + { + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + "b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n", + }, + + // Encode unicode as utf-8 rather than in escaped form. + { + map[string]string{"a": "你好"}, + "a: 你好\n", + }, + + // Support encoding.TextMarshaler. + { + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + "a: 1.2.3.4\n", + }, + { + map[string]time.Time{"a": time.Unix(1424801979, 0)}, + "a: 2015-02-24T18:19:39Z\n", + }, + + // Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible). + { + map[string]string{"a": "b: c"}, + "a: 'b: c'\n", + }, +} + +func (s *S) TestMarshal(c *C) { + defer os.Setenv("TZ", os.Getenv("TZ")) + os.Setenv("TZ", "UTC") + for _, item := range marshalTests { + data, err := yaml.Marshal(item.value) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, item.data) + } +} + +var marshalErrorTests = []struct { + value interface{} + error string + panic string +}{{ + value: &struct { + B int + inlineB ",inline" + }{1, inlineB{2, inlineC{3}}}, + panic: `Duplicated key 'b' in struct struct \{ B int; .*`, +}, { + value: &struct { + A int + B map[string]int ",inline" + }{1, map[string]int{"a": 2}}, + panic: `Can't have key "a" in inlined map; conflicts with struct field`, +}} + +func (s *S) TestMarshalErrors(c *C) { + for _, item := range marshalErrorTests { + if item.panic != "" { + c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic) + } else { + _, err := yaml.Marshal(item.value) + c.Assert(err, ErrorMatches, item.error) + } + } +} + +func (s *S) TestMarshalTypeCache(c *C) { + var data []byte + var err error + func() { + type T struct{ A int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + func() { + type T struct{ B int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + c.Assert(string(data), Equals, "b: 0\n") +} + +var marshalerTests = []struct { + data string + value interface{} +}{ + {"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}}, + {"_:\n- 1\n- A\n", []interface{}{1, "A"}}, + {"_: 10\n", 10}, + {"_: null\n", nil}, + {"_: BAR!\n", "BAR!"}, +} + +type marshalerType struct { + value interface{} +} + +func (o marshalerType) MarshalText() ([]byte, error) { + panic("MarshalText called on type with MarshalYAML") +} + +func (o marshalerType) MarshalYAML() (interface{}, error) { + return o.value, nil +} + +type marshalerValue struct { + Field marshalerType "_" +} + +func (s *S) TestMarshaler(c *C) { + for _, item := range marshalerTests { + obj := &marshalerValue{} + obj.Field.value = item.value + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, string(item.data)) + } +} + +func (s *S) TestMarshalerWholeDocument(c *C) { + obj := &marshalerType{} + obj.value = map[string]string{"hello": "world!"} + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, "hello: world!\n") +} + +type failingMarshaler struct{} + +func (ft *failingMarshaler) MarshalYAML() (interface{}, error) { + return nil, failingErr +} + +func (s *S) TestMarshalerError(c *C) { + _, err := yaml.Marshal(&failingMarshaler{}) + c.Assert(err, Equals, failingErr) +} + +func (s *S) TestSortedOutput(c *C) { + order := []interface{}{ + false, + true, + 1, + uint(1), + 1.0, + 1.1, + 1.2, + 2, + uint(2), + 2.0, + 2.1, + "", + ".1", + ".2", + ".a", + "1", + "2", + "a!10", + "a/2", + "a/10", + "a~10", + "ab/1", + "b/1", + "b/01", + "b/2", + "b/02", + "b/3", + "b/03", + "b1", + "b01", + "b3", + "c2.10", + "c10.2", + "d1", + "d12", + "d12a", + } + m := make(map[interface{}]int) + for _, k := range order { + m[k] = 1 + } + data, err := yaml.Marshal(m) + c.Assert(err, IsNil) + out := "\n" + string(data) + last := 0 + for i, k := range order { + repr := fmt.Sprint(k) + if s, ok := k.(string); ok { + if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil { + repr = `"` + repr + `"` + } + } + index := strings.Index(out, "\n"+repr+":") + if index == -1 { + c.Fatalf("%#v is not in the output: %#v", k, out) + } + if index < last { + c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out) + } + last = index + } +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 000000000..0a7037ad1 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1096 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } + return false +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 000000000..d5fb09727 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,391 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + } + buffer_len += width + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 000000000..93a863274 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,203 @@ +package yaml + +import ( + "encoding/base64" + "math" + "strconv" + "strings" + "unicode/utf8" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + return true + } + return false +} + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt(plain[3:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, -int(intv) + } else { + return yaml_INT_TAG, -intv + } + } + } + // XXX Handle timestamps here. + + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + if tag == yaml_BINARY_TAG { + return yaml_BINARY_TAG, in + } + if utf8.ValidString(in) { + return yaml_STR_TAG, in + } + return yaml_BINARY_TAG, encodeBase64(in) +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 000000000..fe93b190c --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2710 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // A simple key is required only when it is the first token in the current + // line. Therefore it is always allowed. But we add a check anyway. + if required && !parser.simple_key_allowed { + panic("should not happen") + } + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each intendation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the intendation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found uknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && !(s[0] == '!' && s[1] == 0) { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the tag is non-empty. + if len(s) == 0 { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the intendation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an intendation indicator equal to 0") + return false + } + + // Get the intendation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an intendation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the intendation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following intendation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan intendation spaces and line breaks for a block scalar. Determine the +// intendation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the intendation spaces and line breaks. + max_indent := 0 + for { + // Eat the intendation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the intendation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an intendation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". + if parser.flow_level > 0 && + parser.buffer[parser.buffer_pos] == ':' && + !is_blankz(parser.buffer, parser.buffer_pos+1) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found unexpected ':'") + return false + } + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab character that abuse intendation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violate intendation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check intendation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 000000000..5958822f9 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,104 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go new file mode 100644 index 000000000..c5cf1ed4f --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go @@ -0,0 +1,12 @@ +package yaml_test + +import ( + . "gopkg.in/check.v1" + "testing" +) + +func Test(t *testing.T) { TestingT(t) } + +type S struct{} + +var _ = Suite(&S{}) diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 000000000..190362f25 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,89 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + // If the output encoding is UTF-8, we don't need to recode the buffer. + if emitter.encoding == yaml_UTF8_ENCODING { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true + } + + // Recode the buffer into the raw buffer. + var low, high int + if emitter.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + pos := 0 + for pos < emitter.buffer_pos { + // See the "reader.c" code for more details on UTF-8 encoding. Note + // that we assume that the buffer contains a valid UTF-8 sequence. + + // Read the next UTF-8 character. + octet := emitter.buffer[pos] + + var w int + var value rune + switch { + case octet&0x80 == 0x00: + w, value = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, value = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, value = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, value = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = emitter.buffer[pos+k] + value = (value << 6) + (rune(octet) & 0x3F) + } + pos += w + + // Write the character. + if value < 0x10000 { + var b [2]byte + b[high] = byte(value >> 8) + b[low] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) + } else { + // Write the character using a surrogate pair (check "reader.c"). + var b [4]byte + value -= 0x10000 + b[high] = byte(0xD8 + (value >> 18)) + b[low] = byte((value >> 10) & 0xFF) + b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) + b[low+2] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) + } + } + + // Write the raw buffer. + if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + emitter.raw_buffer = emitter.raw_buffer[:0] + return true +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 000000000..af4df8a42 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,344 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only unmarshalled if they are exported (have an upper case +// first letter), and are unmarshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Does not apply to zero valued structs. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int "a,omitempty" +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshal("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField()-1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 000000000..d60a6b6b0 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,716 @@ +package yaml + +import ( + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occured. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_file io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_file io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 000000000..8110ce3c3 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} From 5fbde88919eceb18dc5ebbc2696b3da237073af8 Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Thu, 7 May 2015 10:55:03 +0200 Subject: [PATCH 08/27] Switch config to YAML format. --- .../client_golang/model/labelname.go | 17 + Makefile | 11 +- config/Makefile | 22 - config/config.go | 544 ++++++++++-------- config/config.proto | 117 ---- config/config_test.go | 194 +++++-- config/fixtures/empty.conf.input | 0 config/fixtures/full.conf.input | 89 --- config/fixtures/invalid_job_name.conf.input | 3 - config/fixtures/invalid_label_name.conf.input | 10 - .../fixtures/invalid_proto_format.conf.input | 11 - .../invalid_scrape_interval.conf.input | 10 - config/fixtures/minimal.conf.input | 22 - config/fixtures/repeated_job_name.conf.input | 11 - config/fixtures/sample.conf.input | 57 -- config/fixtures/sd_targets.conf.input | 8 - config/generated/config.pb.go | 406 ------------- config/load.go | 53 -- config/load_test.go | 25 - config/testdata/conf.good.yml | 61 ++ config/testdata/jobname.bad.yml | 2 + config/testdata/jobname_dup.bad.yml | 5 + config/testdata/labelname.bad.yml | 3 + config/testdata/regex.bad.yml | 4 + main.go | 2 +- retrieval/relabel.go | 39 +- retrieval/relabel_test.go | 102 ++-- retrieval/target.go | 8 +- retrieval/target_test.go | 8 - retrieval/targetmanager.go | 46 +- retrieval/targetmanager_test.go | 139 +++-- rules/manager/manager.go | 2 +- 32 files changed, 712 insertions(+), 1319 deletions(-) delete mode 100644 config/Makefile delete mode 100644 config/config.proto delete mode 100644 config/fixtures/empty.conf.input delete mode 100644 config/fixtures/full.conf.input delete mode 100644 config/fixtures/invalid_job_name.conf.input delete mode 100644 config/fixtures/invalid_label_name.conf.input delete mode 100644 config/fixtures/invalid_proto_format.conf.input delete mode 100644 config/fixtures/invalid_scrape_interval.conf.input delete mode 100644 config/fixtures/minimal.conf.input delete mode 100644 config/fixtures/repeated_job_name.conf.input delete mode 100644 config/fixtures/sample.conf.input delete mode 100644 config/fixtures/sd_targets.conf.input delete mode 100644 config/generated/config.pb.go delete mode 100644 config/load.go delete mode 100644 config/load_test.go create mode 100644 config/testdata/conf.good.yml create mode 100644 config/testdata/jobname.bad.yml create mode 100644 config/testdata/jobname_dup.bad.yml create mode 100644 config/testdata/labelname.bad.yml create mode 100644 config/testdata/regex.bad.yml diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go index 5ea4258aa..36770f70a 100644 --- a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go @@ -14,6 +14,8 @@ package model import ( + "fmt" + "regexp" "strings" ) @@ -58,10 +60,25 @@ const ( QuantileLabel = "quantile" ) +var labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + // A LabelName is a key for a LabelSet or Metric. It has a value associated // therewith. type LabelName string +// UnmarshalYAML implements the yaml.Unmarshaller interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !labelNameRE.MatchString(s) { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + // LabelNames is a sortable LabelName slice. In implements sort.Interface. type LabelNames []LabelName diff --git a/Makefile b/Makefile index f17aad623..bd732e049 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,7 @@ advice: $(GOCC) binary: build -build: config tools web $(GOPATH) +build: tools web $(GOPATH) $(GO) build -o prometheus $(BUILDFLAGS) . docker: build @@ -49,7 +49,7 @@ tag: $(BUILD_PATH)/cache/$(GOPKG): $(CURL) -o $@ -L $(GOURL)/$(GOPKG) -benchmark: config dependencies tools web +benchmark: dependencies tools web $(GO) test $(GO_TEST_FLAGS) -test.run='NONE' -test.bench='.*' -test.benchmem ./... | tee benchmark.txt clean: @@ -62,9 +62,6 @@ clean: -find . -type f -name '*#' -exec rm '{}' ';' -find . -type f -name '.#*' -exec rm '{}' ';' -config: - $(MAKE) -C config - $(SELFLINK): $(GOPATH) ln -s $(MAKEFILE_DIR) $@ @@ -91,7 +88,7 @@ run: binary search_index: godoc -index -write_index -index_files='search_index' -test: config dependencies tools web +test: dependencies tools web $(GO) test $(GO_TEST_FLAGS) ./... tools: dependencies @@ -103,4 +100,4 @@ web: dependencies rules: dependencies $(MAKE) -C rules -.PHONY: advice binary build clean config dependencies documentation format race_condition_binary race_condition_run release run search_index tag tarball test tools +.PHONY: advice binary build clean dependencies documentation format race_condition_binary race_condition_run release run search_index tag tarball test tools diff --git a/config/Makefile b/config/Makefile deleted file mode 100644 index 1a71d02b9..000000000 --- a/config/Makefile +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2013 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -all: generated/config.pb.go - -SUFFIXES: - -include ../Makefile.INCLUDE - -generated/config.pb.go: config.proto - go get github.com/golang/protobuf/protoc-gen-go - $(PROTOC) --proto_path=$(PREFIX)/include:. --go_out=generated/ config.proto diff --git a/config/config.go b/config/config.go index e8eafa67a..1c501b66f 100644 --- a/config/config.go +++ b/config/config.go @@ -1,269 +1,359 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package config import ( - "errors" "fmt" + "io/ioutil" "regexp" "strings" "time" - "github.com/golang/protobuf/proto" + "gopkg.in/yaml.v2" clientmodel "github.com/prometheus/client_golang/model" "github.com/prometheus/prometheus/utility" - - pb "github.com/prometheus/prometheus/config/generated" ) -var ( - jobNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_-]*$") - labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") -) +var jobNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_-]*$") -// Config encapsulates the configuration of a Prometheus instance. It wraps the -// raw configuration protocol buffer to be able to add custom methods to it. -type Config struct { - // The protobuf containing the actual configuration values. - pb.PrometheusConfig -} - -// String returns an ASCII serialization of the loaded configuration protobuf. -func (c *Config) String() string { - return proto.MarshalTextString(&c.PrometheusConfig) -} - -// validateLabels validates whether label names have the correct format. -func validateLabels(labels *pb.LabelPairs) error { - if labels == nil { - return nil +// Load parses the YAML input s into a Config. +func Load(s string) (*Config, error) { + cfg := &Config{ + original: s, } - for _, label := range labels.Label { - if !labelNameRE.MatchString(label.GetName()) { - return fmt.Errorf("invalid label name '%s'", label.GetName()) - } - } - return nil -} - -// validateHosts validates whether a target group contains valid hosts. -func validateHosts(hosts []string) error { - if hosts == nil { - return nil - } - for _, host := range hosts { - // Make sure that this does not contain any paths or schemes. - // This ensures that old configurations error. - if strings.Contains(host, "/") { - return fmt.Errorf("invalid host '%s', no schemes or paths allowed", host) - } - } - return nil -} - -// Validate checks an entire parsed Config for the validity of its fields. -func (c *Config) Validate() error { - // Check the global configuration section for validity. - global := c.Global - if _, err := utility.StringToDuration(global.GetScrapeInterval()); err != nil { - return fmt.Errorf("invalid global scrape interval: %s", err) - } - if _, err := utility.StringToDuration(global.GetEvaluationInterval()); err != nil { - return fmt.Errorf("invalid rule evaluation interval: %s", err) - } - if err := validateLabels(global.Labels); err != nil { - return fmt.Errorf("invalid global labels: %s", err) - } - - // Check each scrape configuration for validity. - jobNames := map[string]struct{}{} - for _, sc := range c.ScrapeConfigs() { - name := sc.GetJobName() - - if _, ok := jobNames[name]; ok { - return fmt.Errorf("found multiple scrape configs configured with the same job name: %q", name) - } - jobNames[name] = struct{}{} - - if err := sc.Validate(); err != nil { - return fmt.Errorf("error in scrape config %q: %s", name, err) - } - } - - return nil -} - -// GlobalLabels returns the global labels as a LabelSet. -func (c *Config) GlobalLabels() clientmodel.LabelSet { - labels := clientmodel.LabelSet{} - if c.Global != nil && c.Global.Labels != nil { - for _, label := range c.Global.Labels.Label { - labels[clientmodel.LabelName(label.GetName())] = clientmodel.LabelValue(label.GetValue()) - } - } - return labels -} - -// ScrapeConfigs returns all scrape configurations. -func (c *Config) ScrapeConfigs() (cfgs []*ScrapeConfig) { - for _, sc := range c.GetScrapeConfig() { - cfgs = append(cfgs, &ScrapeConfig{*sc}) - } - return -} - -// stringToDuration converts a string to a duration and dies on invalid format. -func stringToDuration(intervalStr string) time.Duration { - duration, err := utility.StringToDuration(intervalStr) + err := yaml.Unmarshal([]byte(s), cfg) if err != nil { - panic(err) + return nil, err } - return duration + return cfg, nil } -// ScrapeInterval gets the default scrape interval for a Config. -func (c *Config) ScrapeInterval() time.Duration { - return stringToDuration(c.Global.GetScrapeInterval()) +// LoadFromFile parses the given YAML file into a Config. +func LoadFromFile(filename string) (*Config, error) { + content, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return Load(string(content)) } -// EvaluationInterval gets the default evaluation interval for a Config. -func (c *Config) EvaluationInterval() time.Duration { - return stringToDuration(c.Global.GetEvaluationInterval()) +// The defaults applied before parsing the respective config sections. +var ( + // The default top-level configuration. + DefaultConfig = DefaultedConfig{ + GlobalConfig: &GlobalConfig{DefaultGlobalConfig}, + } + + // The default global configuration. + DefaultGlobalConfig = DefaultedGlobalConfig{ + ScrapeInterval: Duration(10 * time.Second), + ScrapeTimeout: Duration(10 * time.Second), + EvaluationInterval: Duration(1 * time.Minute), + } + + // Te default scrape configuration. + DefaultScrapeConfig = DefaultedScrapeConfig{ + // ScrapeTimeout and ScrapeInterval default to the + // configured globals. + MetricsPath: "/metrics", + Scheme: "http", + } + + // The default Relabel configuration. + DefaultRelabelConfig = DefaultedRelabelConfig{ + Action: RelabelReplace, + Separator: ";", + } + + // The default DNS SD configuration. + DefaultDNSConfig = DefaultedDNSConfig{ + RefreshInterval: Duration(30 * time.Second), + } +) + +// Config is the top-level configuration for Prometheus's config files. +type Config struct { + // DefaultedConfig contains the actual fields of Config. + DefaultedConfig `yaml:",inline"` + + // original is the input from which the config was parsed. + original string } -// ScrapeConfig encapsulates a protobuf scrape configuration. +func (c *Config) String() string { + if c.original != "" { + return c.original + } + b, err := yaml.Marshal(c) + if err != nil { + return fmt.Sprintf("", err) + } + return string(b) +} + +// UnmarshalYAML implements the yaml.Unmarshaller interface. +func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { + c.DefaultedConfig = DefaultConfig + if err := unmarshal(&c.DefaultedConfig); err != nil { + return err + } + // Do global overrides and validate unique names. + jobNames := map[string]struct{}{} + for _, scfg := range c.ScrapeConfigs { + if scfg.ScrapeInterval == 0 { + scfg.ScrapeInterval = c.GlobalConfig.ScrapeInterval + } + if scfg.ScrapeTimeout == 0 { + scfg.ScrapeTimeout = c.GlobalConfig.ScrapeTimeout + } + + if _, ok := jobNames[scfg.JobName]; ok { + return fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName) + } + jobNames[scfg.JobName] = struct{}{} + } + return nil +} + +// DefaultedConfig is a proxy type for Config. +type DefaultedConfig struct { + GlobalConfig *GlobalConfig `yaml:"global_config"` + RuleFiles []string `yaml:"rule_files,omitempty"` + ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` +} + +// GlobalConfig configures values that used across other configuration +// objects. +type GlobalConfig struct { + // DefaultedGlobalConfig contains the actual fields for GlobalConfig. + DefaultedGlobalConfig `yaml:",inline"` +} + +// UnmarshalYAML implements the yaml.Unmarshaller interface. +func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + c.DefaultedGlobalConfig = DefaultGlobalConfig + if err := unmarshal(&c.DefaultedGlobalConfig); err != nil { + return err + } + return nil +} + +// DefaultedGlobalConfig is a proxy type for GlobalConfig. +type DefaultedGlobalConfig struct { + // How frequently to scrape targets by default. + ScrapeInterval Duration `yaml:"scrape_interval"` + // The default timeout when scraping targets. + ScrapeTimeout Duration `yaml:"scrape_timeout"` + // How frequently to evaluate rules by default. + EvaluationInterval Duration `yaml:"evaluation_interval"` + + // The labels to add to any timeseries that this Prometheus instance scrapes. + Labels clientmodel.LabelSet `yaml:"labels,omitempty"` +} + +// ScrapeConfig configures a scraping unit for Prometheus. type ScrapeConfig struct { - pb.ScrapeConfig + // DefaultedScrapeConfig contains the actual fields for ScrapeConfig. + DefaultedScrapeConfig `yaml:",inline"` } -// ScrapeInterval gets the scrape interval for the scrape config. -func (c *ScrapeConfig) ScrapeInterval() time.Duration { - return stringToDuration(c.GetScrapeInterval()) -} - -// ScrapeTimeout gets the scrape timeout for the scrape config. -func (c *ScrapeConfig) ScrapeTimeout() time.Duration { - return stringToDuration(c.GetScrapeTimeout()) -} - -// Labels returns a label set for the targets that is implied by the scrape config. -func (c *ScrapeConfig) Labels() clientmodel.LabelSet { - return clientmodel.LabelSet{ - clientmodel.MetricsPathLabel: clientmodel.LabelValue(c.GetMetricsPath()), - clientmodel.JobLabel: clientmodel.LabelValue(c.GetJobName()), +// UnmarshalYAML implements the yaml.Unmarshaller interface. +func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + c.DefaultedScrapeConfig = DefaultScrapeConfig + err := unmarshal(&c.DefaultedScrapeConfig) + if err != nil { + return err } -} - -// Validate checks the ScrapeConfig for the validity of its fields -func (c *ScrapeConfig) Validate() error { - name := c.GetJobName() - - if !jobNameRE.MatchString(name) { - return fmt.Errorf("invalid job name %q", name) - } - if _, err := utility.StringToDuration(c.GetScrapeInterval()); err != nil { - return fmt.Errorf("invalid scrape interval: %s", err) - } - if _, err := utility.StringToDuration(c.GetScrapeTimeout()); err != nil { - return fmt.Errorf("invalid scrape timeout: %s", err) - } - for _, tgroup := range c.GetTargetGroup() { - if err := validateLabels(tgroup.Labels); err != nil { - return fmt.Errorf("invalid labels: %s", err) - } - if err := validateHosts(tgroup.Target); err != nil { - return fmt.Errorf("invalid targets: %s", err) - } - } - for _, dnscfg := range c.DNSConfigs() { - if err := dnscfg.Validate(); err != nil { - return fmt.Errorf("invalid DNS config: %s", err) - } - } - for _, rlcfg := range c.RelabelConfigs() { - if err := rlcfg.Validate(); err != nil { - return fmt.Errorf("invalid relabelling config: %s", err) - } + if !jobNameRE.MatchString(c.JobName) { + return fmt.Errorf("%q is not a valid job name", c.JobName) } return nil } -// DNSConfigs returns the list of DNS service discovery configurations -// for the scrape config. -func (c *ScrapeConfig) DNSConfigs() []*DNSConfig { - var dnscfgs []*DNSConfig - for _, dc := range c.GetDnsConfig() { - dnscfgs = append(dnscfgs, &DNSConfig{*dc}) - } - return dnscfgs +// DefaultedScrapeConfig is a proxy type for ScrapeConfig. +type DefaultedScrapeConfig struct { + // The job name to which the job label is set by default. + JobName string `yaml:"job_name"` + // How frequently to scrape the targets of this scrape config. + ScrapeInterval Duration `yaml:"scrape_interval"` + // The timeout for scraping targets of this config. + ScrapeTimeout Duration `yaml:"scrape_timeout"` + // The HTTP resource path on which to fetch metrics from targets. + MetricsPath string `yaml:"metrics_path"` + // The URL scheme with which to fetch metrics from targets. + Scheme string `yaml:"scheme"` + + // List of labeled target groups for this job. + TargetGroups []*TargetGroup `yaml:"target_groups,omitempty"` + // List of DNS service discovery configurations. + DNSConfigs []*DNSConfig `yaml:"dns_configs,omitempty"` + // List of relabel configurations. + RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"` } -// RelabelConfigs returns the relabel configs of the scrape config. -func (c *ScrapeConfig) RelabelConfigs() []*RelabelConfig { - var rlcfgs []*RelabelConfig - for _, rc := range c.GetRelabelConfig() { - rlcfgs = append(rlcfgs, &RelabelConfig{*rc}) - } - return rlcfgs -} - -// DNSConfig encapsulates the protobuf configuration object for DNS based -// service discovery. -type DNSConfig struct { - pb.DNSConfig -} - -// Validate checks the DNSConfig for the validity of its fields. -func (c *DNSConfig) Validate() error { - if _, err := utility.StringToDuration(c.GetRefreshInterval()); err != nil { - return fmt.Errorf("invalid refresh interval: %s", err) - } - return nil -} - -// RefreshInterval gets the the refresh interval for DNS service discovery. -func (c *DNSConfig) RefreshInterval() time.Duration { - return stringToDuration(c.GetRefreshInterval()) -} - -// RelabelConfig encapsulates the protobuf configuration object for relabeling. -type RelabelConfig struct { - pb.RelabelConfig -} - -// Validate checks the RelabelConfig for the validity of its fields. -func (c *RelabelConfig) Validate() error { - if len(c.GetSourceLabel()) == 0 { - return errors.New("at least one source label is required") - } - return nil -} - -// TargetGroup is derived from a protobuf TargetGroup and attaches a source to it -// that identifies the origin of the group. +// A labeled group of targets to scrape for a job. type TargetGroup struct { - // Source is an identifier that describes a group of targets. - Source string - // Labels is a set of labels that is common across all targets in the group. - Labels clientmodel.LabelSet // Targets is a list of targets identified by a label set. Each target is // uniquely identifiable in the group by its address label. - Targets []clientmodel.LabelSet + Targets []clientmodel.LabelSet `yaml:"targets,omitempty" json:"targets,omitempty"` + // Labels is a set of labels that is common across all targets in the group. + Labels clientmodel.LabelSet `yaml:"labels,omitempty" json:"labels,omitempty"` + + // Source is an identifier that describes a group of targets. + Source string `yaml:"-", json:"-"` } func (tg *TargetGroup) String() string { return tg.Source } + +// UnmarshalYAML implements the yaml.Unmarshaller interface. +func (tg *TargetGroup) UnmarshalYAML(unmarshal func(interface{}) error) error { + g := struct { + Targets []string `yaml:"targets"` + Labels clientmodel.LabelSet `yaml:"labels"` + }{} + if err := unmarshal(&g); err != nil { + return err + } + tg.Targets = make([]clientmodel.LabelSet, 0, len(g.Targets)) + for _, t := range g.Targets { + if strings.Contains(t, "/") { + return fmt.Errorf("%q is not a valid hostname", t) + } + tg.Targets = append(tg.Targets, clientmodel.LabelSet{ + clientmodel.AddressLabel: clientmodel.LabelValue(t), + }) + } + tg.Labels = g.Labels + return nil +} + +// DNSConfig is the configuration for DNS based service discovery. +type DNSConfig struct { + // DefaultedDNSConfig contains the actual fields for DNSConfig. + DefaultedDNSConfig `yaml:",inline"` +} + +// UnmarshalYAML implements the yaml.Unmarshaller interface. +func (c *DNSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + c.DefaultedDNSConfig = DefaultDNSConfig + err := unmarshal(&c.DefaultedDNSConfig) + if err != nil { + return err + } + if len(c.Names) == 0 { + return fmt.Errorf("DNS config must contain at least one SRV server name") + } + return nil +} + +// DefaultedDNSConfig is a proxy type for DNSConfig. +type DefaultedDNSConfig struct { + Names []string `yaml:"names"` + RefreshInterval Duration `yaml:"refresh_interval"` +} + +// RelabelAction is the action to be performed on relabeling. +type RelabelAction string + +const ( + // Performs a regex replacement. + RelabelReplace RelabelAction = "replace" + // Drops targets for which the input does not match the regex. + RelabelKeep = "keep" + // Drops targets for which the input does match the regex. + RelabelDrop = "drop" +) + +// UnmarshalYAML implements the yaml.Unmarshaller interface. +func (a *RelabelAction) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + switch act := RelabelAction(strings.ToLower(s)); act { + case RelabelReplace, RelabelKeep, RelabelDrop: + *a = act + return nil + } + return fmt.Errorf("unknown relabel action %q", s) +} + +// RelabelConfig is the configuration for relabeling of target label sets. +type RelabelConfig struct { + // DefaultedRelabelConfig contains the actual fields for RelabelConfig. + DefaultedRelabelConfig `yaml:",inline"` +} + +// UnmarshalYAML implements the yaml.Unmarshaller interface. +func (c *RelabelConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + c.DefaultedRelabelConfig = DefaultRelabelConfig + return unmarshal(&c.DefaultedRelabelConfig) +} + +// DefaultedRelabelConfig is a proxy type for RelabelConfig. +type DefaultedRelabelConfig struct { + // A list of labels from which values are taken and concatenated + // with the configured separator in order. + SourceLabels clientmodel.LabelNames `yaml:"source_labels,flow"` + // Separator is the string between concatenated values from the source labels. + Separator string `yaml:"separator"` + // Regex against which the concatenation is matched. + Regex *Regexp `yaml:"regex"` + // The label to which the resulting string is written in a replacement. + TargetLabel clientmodel.LabelName `yaml:"target_label,omitempty"` + // Replacement is the regex replacement pattern to be used. + Replacement string `yaml:"replacement,omitempty"` + // Action is the action to be performed for the relabeling. + Action RelabelAction `yaml:"action"` +} + +// Regexp encapsulates a regexp.Regexp and makes it YAML marshallable. +type Regexp struct { + regexp.Regexp +} + +// UnmarshalYAML implements the yaml.Unmarshaller interface. +func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + regex, err := regexp.Compile(s) + if err != nil { + return err + } + re.Regexp = *regex + return nil +} + +// MarshalYAML implements the yaml.Marshaller interface. +func (re *Regexp) MarshalYAML() (interface{}, error) { + return re.String(), nil +} + +// Duration encapsulates a time.Duration and makes it YAML marshallable. +// +// TODO(fabxc): Since we have custom types for most things, including timestamps, +// we might want to move this into our model as well, eventually. +type Duration time.Duration + +// UnmarshalYAML implements the yaml.Unmarshaller interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + dur, err := utility.StringToDuration(s) + if err != nil { + return err + } + *d = Duration(dur) + return nil +} + +// MarshalYAML implements the yaml.Marshaller interface. +func (d Duration) MarshalYAML() (interface{}, error) { + return utility.DurationToString(time.Duration(d)), nil +} diff --git a/config/config.proto b/config/config.proto deleted file mode 100644 index c0d534449..000000000 --- a/config/config.proto +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package io.prometheus; - -// A label/value pair suitable for attaching to timeseries. -message LabelPair { - // The name of the label. Must adhere to the regex "[a-zA-Z_][a-zA-Z0-9_]*". - optional string name = 1; - // The value of the label. May contain any characters. - optional string value = 2; -} - -// A set of label/value pairs. -message LabelPairs { - repeated LabelPair label = 1; -} - -// The global Prometheus configuration section. -message GlobalConfig { - // How frequently to scrape targets by default. Must be a valid Prometheus - // duration string in the form "[0-9]+[smhdwy]". - optional string scrape_interval = 1 [default = "1m"]; - // How frequently to evaluate rules by default. Must be a valid Prometheus - // duration string in the form "[0-9]+[smhdwy]". - optional string evaluation_interval = 2 [default = "1m"]; - // The labels to add to any timeseries that this Prometheus instance scrapes. - optional LabelPairs labels = 3; - // The list of file names of rule files to load. - repeated string rule_file = 4; -} - -// A labeled group of targets to scrape for a job. -message TargetGroup { - // The list of endpoints to scrape via HTTP. - repeated string target = 1; - // The labels to add to any timeseries scraped for this target group. - optional LabelPairs labels = 2; -} - -// The configuration for DNS based service discovery. -message DNSConfig { - // The list of DNS-SD service names pointing to SRV records - // containing endpoint information. - repeated string name = 1; - // Discovery refresh period when using DNS-SD to discover targets. Must be a - // valid Prometheus duration string in the form "[0-9]+[smhdwy]". - optional string refresh_interval = 2 [default = "30s"]; -} - -// The configuration for relabeling of target label sets. -message RelabelConfig { - // A list of labels from which values are taken and concatenated - // with the configured separator in order. - repeated string source_label = 1; - // Regex against which the concatenation is matched. - required string regex = 2; - // The label to which the resulting string is written in a replacement. - optional string target_label = 3; - // Replacement is the regex replacement pattern to be used. - optional string replacement = 4; - // Separator is the string between concatenated values from the source labels. - optional string separator = 5 [default = ";"]; - - // Action is the action to be performed for the relabeling. - enum Action { - REPLACE = 0; // Performs a regex replacement. - KEEP = 1; // Drops targets for which the input does not match the regex. - DROP = 2; // Drops targets for which the input does match the regex. - } - optional Action action = 6 [default = REPLACE]; -} - -// The configuration for a Prometheus job to scrape. -// -// The next field no. is 11. -message ScrapeConfig { - // The job name. Must adhere to the regex "[a-zA-Z_][a-zA-Z0-9_-]*". - required string job_name = 1; - // How frequently to scrape targets from this job. Overrides the global - // default. Must be a valid Prometheus duration string in the form - // "[0-9]+[smhdwy]". - optional string scrape_interval = 2; - // Per-target timeout when scraping this job. Must be a valid Prometheus - // duration string in the form "[0-9]+[smhdwy]". - optional string scrape_timeout = 7 [default = "10s"]; - // List of DNS service discovery configurations. - repeated DNSConfig dns_config = 9; - // List of labeled target groups for this job. - repeated TargetGroup target_group = 5; - // List of relabel configurations. - repeated RelabelConfig relabel_config = 10; - // The HTTP resource path on which to fetch metrics from targets. - optional string metrics_path = 6 [default = "/metrics"]; - // The URL scheme with which to fetch metrics from targets. - optional string scheme = 8 [default = "http"]; -} - -// The top-level Prometheus configuration. -message PrometheusConfig { - // Global Prometheus configuration options. If omitted, an empty global - // configuration with default values (see GlobalConfig definition) will be - // created. - optional GlobalConfig global = 1; - // The list of scrape configs. - repeated ScrapeConfig scrape_config = 3; -} diff --git a/config/config_test.go b/config/config_test.go index 80dfb3028..f4c1fa721 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,80 +1,152 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package config import ( - "path" + "reflect" + "regexp" "strings" "testing" + "time" + + "gopkg.in/yaml.v2" + + clientmodel "github.com/prometheus/client_golang/model" ) -var fixturesPath = "fixtures" +var expectedConf = &Config{DefaultedConfig{ + GlobalConfig: &GlobalConfig{DefaultedGlobalConfig{ + ScrapeInterval: Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + EvaluationInterval: Duration(30 * time.Second), -var configTests = []struct { - inputFile string - shouldFail bool - errContains string + Labels: clientmodel.LabelSet{ + "monitor": "codelab", + "foo": "bar", + }, + }}, + + RuleFiles: []string{ + "first.rules", + "second.rules", + }, + + ScrapeConfigs: []*ScrapeConfig{ + {DefaultedScrapeConfig{ + JobName: "prometheus", + + ScrapeInterval: Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + + MetricsPath: DefaultScrapeConfig.MetricsPath, + Scheme: DefaultScrapeConfig.Scheme, + + TargetGroups: []*TargetGroup{ + { + Targets: []clientmodel.LabelSet{ + {clientmodel.AddressLabel: "localhost:9090"}, + {clientmodel.AddressLabel: "localhost:9191"}, + }, + Labels: clientmodel.LabelSet{ + "my": "label", + "your": "label", + }, + }, + }, + + RelabelConfigs: []*RelabelConfig{ + {DefaultedRelabelConfig{ + SourceLabels: clientmodel.LabelNames{"job", "__meta_dns_srv_name"}, + TargetLabel: "job", + Separator: ";", + Regex: &Regexp{*regexp.MustCompile("(.*)some-[regex]$")}, + Replacement: "foo-${1}", + Action: RelabelReplace, + }}, + }, + }}, + {DefaultedScrapeConfig{ + JobName: "service-x", + + ScrapeInterval: Duration(50 * time.Second), + ScrapeTimeout: Duration(5 * time.Second), + + MetricsPath: "/my_path", + Scheme: "http", + + DNSConfigs: []*DNSConfig{ + {DefaultedDNSConfig{ + Names: []string{ + "first.dns.address.domain.com", + "second.dns.address.domain.com", + }, + RefreshInterval: Duration(15 * time.Second), + }}, + {DefaultedDNSConfig{ + Names: []string{ + "first.dns.address.domain.com", + }, + RefreshInterval: Duration(30 * time.Second), + }}, + }, + + RelabelConfigs: []*RelabelConfig{ + {DefaultedRelabelConfig{ + SourceLabels: clientmodel.LabelNames{"job"}, + Regex: &Regexp{*regexp.MustCompile("(.*)some-[regex]$")}, + Separator: ";", + Action: RelabelDrop, + }}, + }, + }}, + }, +}, ""} + +func TestLoadConfig(t *testing.T) { + c, err := LoadFromFile("testdata/conf.good.yml") + if err != nil { + t.Errorf("Error parsing %s: %s", "testdata/conf.good.yml", err) + } + bgot, err := yaml.Marshal(c) + if err != nil { + t.Errorf("%s", err) + } + bexp, err := yaml.Marshal(expectedConf) + if err != nil { + t.Errorf("%s", err) + } + expectedConf.original = c.original + + if !reflect.DeepEqual(c, expectedConf) { + t.Errorf("%s: unexpected config result: \n\n%s\n expected\n\n%s", "testdata/conf.good.yml", bgot, bexp) + } +} + +var expectedErrors = []struct { + filename string + errMsg string }{ { - inputFile: "minimal.conf.input", + filename: "jobname.bad.yml", + errMsg: `"prom^etheus" is not a valid job name`, }, { - inputFile: "sample.conf.input", + filename: "jobname_dup.bad.yml", + errMsg: `found multiple scrape configs with job name "prometheus"`, }, { - inputFile: "empty.conf.input", + filename: "labelname.bad.yml", + errMsg: `"not$allowed" is not a valid label name`, }, { - inputFile: "sd_targets.conf.input", - }, { - inputFile: "full.conf.input", - }, { - inputFile: "invalid_proto_format.conf.input", - shouldFail: true, - errContains: "unknown field name", - }, - { - inputFile: "invalid_scrape_interval.conf.input", - shouldFail: true, - errContains: "invalid global scrape interval", - }, - { - inputFile: "invalid_job_name.conf.input", - shouldFail: true, - errContains: "invalid job name", - }, - { - inputFile: "invalid_label_name.conf.input", - shouldFail: true, - errContains: "invalid label name", - }, - { - inputFile: "repeated_job_name.conf.input", - shouldFail: true, - errContains: "found multiple scrape configs configured with the same job name: \"testjob1\"", + filename: "regex.bad.yml", + errMsg: "error parsing regexp", }, } -func TestConfigs(t *testing.T) { - for i, configTest := range configTests { - _, err := LoadFromFile(path.Join(fixturesPath, configTest.inputFile)) - - if err != nil { - if !configTest.shouldFail { - t.Fatalf("%d. Error parsing config %v: %v", i, configTest.inputFile, err) - } else { - if !strings.Contains(err.Error(), configTest.errContains) { - t.Fatalf("%d. Expected error containing '%v', got: %v", i, configTest.errContains, err) - } - } +func TestBadConfigs(t *testing.T) { + for _, ee := range expectedErrors { + _, err := LoadFromFile("testdata/" + ee.filename) + if err == nil { + t.Errorf("Expected error parsing %s but got none", ee.filename) + } + if !strings.Contains(err.Error(), ee.errMsg) { + t.Errorf("Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err) } } } diff --git a/config/fixtures/empty.conf.input b/config/fixtures/empty.conf.input deleted file mode 100644 index e69de29bb..000000000 diff --git a/config/fixtures/full.conf.input b/config/fixtures/full.conf.input deleted file mode 100644 index 5e5b34092..000000000 --- a/config/fixtures/full.conf.input +++ /dev/null @@ -1,89 +0,0 @@ -global < - scrape_interval: "30s" - evaluation_interval: "30s" - labels: < - label: < - name: "monitor" - value: "test" - > - label: < - name: "more" - value: "test" - > - > - rule_file: "prometheus.rules" - rule_file: "prometheus.more.rules" -> - -scrape_config: < - job_name: "prometheus" - scrape_interval: "15s" - metrics_path: "/metrics" - scheme: "http" - - target_group: < - target: "localhost:9090" - > -> - -scrape_config: < - job_name: "myjob" - scrape_interval: "15s" - metrics_path: "/metrics" - scheme: "http" - - dns_config: < - name: "first.srv.name" - name: "second.srv.name" - refresh_interval: "1h" - > - - dns_config: < - name: "first2.srv.name" - name: "second2.srv.name" - refresh_interval: "1m" - > - - relabel_config: < - source_label: "l1" - source_label: "l2" - regex: "^foobar.*$" - target_label: "l3" - replacement: "static" - > - relabel_config: < - source_label: "l4" - regex: "^foobar.*$" - action: DROP - > - relabel_config: < - source_label: "l4" - regex: "^foobar.*$" - action: KEEP - > - - target_group: < - target: "localhost:9090" - target: "localhost:9091" - labels: < - label: < - name: "tg1" - value: "tg1" - > - > - > - target_group: < - target: "my.domain:9090" - target: "my.domain:9091" - labels: < - label: < - name: "tg2" - value: "tg2" - > - label: < - name: "tg2_1" - value: "tg2_1" - > - > - > -> diff --git a/config/fixtures/invalid_job_name.conf.input b/config/fixtures/invalid_job_name.conf.input deleted file mode 100644 index dcebbccb4..000000000 --- a/config/fixtures/invalid_job_name.conf.input +++ /dev/null @@ -1,3 +0,0 @@ -scrape_config: < - job_name: "1testjob" -> diff --git a/config/fixtures/invalid_label_name.conf.input b/config/fixtures/invalid_label_name.conf.input deleted file mode 100644 index f85538649..000000000 --- a/config/fixtures/invalid_label_name.conf.input +++ /dev/null @@ -1,10 +0,0 @@ -global < - scrape_interval: "30s" - evaluation_interval: "30s" - labels: < - label: < - name: "monitor-test" - value: "test" - > - > -> diff --git a/config/fixtures/invalid_proto_format.conf.input b/config/fixtures/invalid_proto_format.conf.input deleted file mode 100644 index ba311005b..000000000 --- a/config/fixtures/invalid_proto_format.conf.input +++ /dev/null @@ -1,11 +0,0 @@ -global < - scrape_interval: "30s" - evaluation_interval: "30s" - unknown_field: "foo" - labels: < - label: < - name: "monitor" - value: "test" - > - > -> diff --git a/config/fixtures/invalid_scrape_interval.conf.input b/config/fixtures/invalid_scrape_interval.conf.input deleted file mode 100644 index 537d50996..000000000 --- a/config/fixtures/invalid_scrape_interval.conf.input +++ /dev/null @@ -1,10 +0,0 @@ -global < - scrape_interval: "30" - evaluation_interval: "30s" - labels: < - label: < - name: "monitor" - value: "test" - > - > -> diff --git a/config/fixtures/minimal.conf.input b/config/fixtures/minimal.conf.input deleted file mode 100644 index 9a436411f..000000000 --- a/config/fixtures/minimal.conf.input +++ /dev/null @@ -1,22 +0,0 @@ -global < - scrape_interval: "30s" - evaluation_interval: "30s" - labels: < - label: < - name: "monitor" - value: "test" - > - > - rule_file: "prometheus.rules" -> - -scrape_config: < - job_name: "prometheus" - scrape_interval: "15s" - metrics_path: "/metrics" - scheme: "http" - - target_group: < - target: "localhost:9090" - > -> diff --git a/config/fixtures/repeated_job_name.conf.input b/config/fixtures/repeated_job_name.conf.input deleted file mode 100644 index 3ca4fa468..000000000 --- a/config/fixtures/repeated_job_name.conf.input +++ /dev/null @@ -1,11 +0,0 @@ -scrape_config: < - job_name: "testjob1" -> - -scrape_config: < - job_name: "testjob2" -> - -scrape_config: < - job_name: "testjob1" -> diff --git a/config/fixtures/sample.conf.input b/config/fixtures/sample.conf.input deleted file mode 100644 index 6bd42873f..000000000 --- a/config/fixtures/sample.conf.input +++ /dev/null @@ -1,57 +0,0 @@ -global < - scrape_interval: "30s" - evaluation_interval: "30s" - labels: < - label: < - name: "monitor" - value: "test" - > - > - rule_file: "prometheus.rules" -> - -scrape_config: < - job_name: "prometheus" - scrape_interval: "15s" - - target_group: < - target: "localhost:9090" - labels: < - label: < - name: "group" - value: "canary" - > - > - > -> - -scrape_config: < - job_name: "random" - scrape_interval: "30s" - - target_group: < - target: "random.com:8080" - target: "random.com:8081" - target: "random.com:8082" - target: "random.com:8083" - target: "random.com:8084" - - labels: < - label: < - name: "group" - value: "production" - > - > - > - target_group: < - target: "random.com:8085" - target: "random.com:8086" - - labels: < - label: < - name: "group" - value: "canary" - > - > - > -> diff --git a/config/fixtures/sd_targets.conf.input b/config/fixtures/sd_targets.conf.input deleted file mode 100644 index 2f9acd68e..000000000 --- a/config/fixtures/sd_targets.conf.input +++ /dev/null @@ -1,8 +0,0 @@ -scrape_config: < - job_name: "testjob" - dns_config: < - name: "sd_name" - name: "sd_name2" - refresh_interval: "15s" - > -> diff --git a/config/generated/config.pb.go b/config/generated/config.pb.go deleted file mode 100644 index 6898a9af8..000000000 --- a/config/generated/config.pb.go +++ /dev/null @@ -1,406 +0,0 @@ -// Code generated by protoc-gen-go. -// source: config.proto -// DO NOT EDIT! - -/* -Package io_prometheus is a generated protocol buffer package. - -It is generated from these files: - config.proto - -It has these top-level messages: - LabelPair - LabelPairs - GlobalConfig - TargetGroup - DNSConfig - RelabelConfig - ScrapeConfig - PrometheusConfig -*/ -package io_prometheus - -import proto "github.com/golang/protobuf/proto" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = math.Inf - -// Action is the action to be performed for the relabeling. -type RelabelConfig_Action int32 - -const ( - RelabelConfig_REPLACE RelabelConfig_Action = 0 - RelabelConfig_KEEP RelabelConfig_Action = 1 - RelabelConfig_DROP RelabelConfig_Action = 2 -) - -var RelabelConfig_Action_name = map[int32]string{ - 0: "REPLACE", - 1: "KEEP", - 2: "DROP", -} -var RelabelConfig_Action_value = map[string]int32{ - "REPLACE": 0, - "KEEP": 1, - "DROP": 2, -} - -func (x RelabelConfig_Action) Enum() *RelabelConfig_Action { - p := new(RelabelConfig_Action) - *p = x - return p -} -func (x RelabelConfig_Action) String() string { - return proto.EnumName(RelabelConfig_Action_name, int32(x)) -} -func (x *RelabelConfig_Action) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RelabelConfig_Action_value, data, "RelabelConfig_Action") - if err != nil { - return err - } - *x = RelabelConfig_Action(value) - return nil -} - -// A label/value pair suitable for attaching to timeseries. -type LabelPair struct { - // The name of the label. Must adhere to the regex "[a-zA-Z_][a-zA-Z0-9_]*". - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // The value of the label. May contain any characters. - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} - -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -// A set of label/value pairs. -type LabelPairs struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LabelPairs) Reset() { *m = LabelPairs{} } -func (m *LabelPairs) String() string { return proto.CompactTextString(m) } -func (*LabelPairs) ProtoMessage() {} - -func (m *LabelPairs) GetLabel() []*LabelPair { - if m != nil { - return m.Label - } - return nil -} - -// The global Prometheus configuration section. -type GlobalConfig struct { - // How frequently to scrape targets by default. Must be a valid Prometheus - // duration string in the form "[0-9]+[smhdwy]". - ScrapeInterval *string `protobuf:"bytes,1,opt,name=scrape_interval,def=1m" json:"scrape_interval,omitempty"` - // How frequently to evaluate rules by default. Must be a valid Prometheus - // duration string in the form "[0-9]+[smhdwy]". - EvaluationInterval *string `protobuf:"bytes,2,opt,name=evaluation_interval,def=1m" json:"evaluation_interval,omitempty"` - // The labels to add to any timeseries that this Prometheus instance scrapes. - Labels *LabelPairs `protobuf:"bytes,3,opt,name=labels" json:"labels,omitempty"` - // The list of file names of rule files to load. - RuleFile []string `protobuf:"bytes,4,rep,name=rule_file" json:"rule_file,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GlobalConfig) Reset() { *m = GlobalConfig{} } -func (m *GlobalConfig) String() string { return proto.CompactTextString(m) } -func (*GlobalConfig) ProtoMessage() {} - -const Default_GlobalConfig_ScrapeInterval string = "1m" -const Default_GlobalConfig_EvaluationInterval string = "1m" - -func (m *GlobalConfig) GetScrapeInterval() string { - if m != nil && m.ScrapeInterval != nil { - return *m.ScrapeInterval - } - return Default_GlobalConfig_ScrapeInterval -} - -func (m *GlobalConfig) GetEvaluationInterval() string { - if m != nil && m.EvaluationInterval != nil { - return *m.EvaluationInterval - } - return Default_GlobalConfig_EvaluationInterval -} - -func (m *GlobalConfig) GetLabels() *LabelPairs { - if m != nil { - return m.Labels - } - return nil -} - -func (m *GlobalConfig) GetRuleFile() []string { - if m != nil { - return m.RuleFile - } - return nil -} - -// A labeled group of targets to scrape for a job. -type TargetGroup struct { - // The list of endpoints to scrape via HTTP. - Target []string `protobuf:"bytes,1,rep,name=target" json:"target,omitempty"` - // The labels to add to any timeseries scraped for this target group. - Labels *LabelPairs `protobuf:"bytes,2,opt,name=labels" json:"labels,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TargetGroup) Reset() { *m = TargetGroup{} } -func (m *TargetGroup) String() string { return proto.CompactTextString(m) } -func (*TargetGroup) ProtoMessage() {} - -func (m *TargetGroup) GetTarget() []string { - if m != nil { - return m.Target - } - return nil -} - -func (m *TargetGroup) GetLabels() *LabelPairs { - if m != nil { - return m.Labels - } - return nil -} - -// The configuration for DNS based service discovery. -type DNSConfig struct { - // The list of DNS-SD service names pointing to SRV records - // containing endpoint information. - Name []string `protobuf:"bytes,1,rep,name=name" json:"name,omitempty"` - // Discovery refresh period when using DNS-SD to discover targets. Must be a - // valid Prometheus duration string in the form "[0-9]+[smhdwy]". - RefreshInterval *string `protobuf:"bytes,2,opt,name=refresh_interval,def=30s" json:"refresh_interval,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DNSConfig) Reset() { *m = DNSConfig{} } -func (m *DNSConfig) String() string { return proto.CompactTextString(m) } -func (*DNSConfig) ProtoMessage() {} - -const Default_DNSConfig_RefreshInterval string = "30s" - -func (m *DNSConfig) GetName() []string { - if m != nil { - return m.Name - } - return nil -} - -func (m *DNSConfig) GetRefreshInterval() string { - if m != nil && m.RefreshInterval != nil { - return *m.RefreshInterval - } - return Default_DNSConfig_RefreshInterval -} - -// The configuration for relabeling of target label sets. -type RelabelConfig struct { - // A list of labels from which values are taken and concatenated - // with the configured separator in order. - SourceLabel []string `protobuf:"bytes,1,rep,name=source_label" json:"source_label,omitempty"` - // Regex against which the concatenation is matched. - Regex *string `protobuf:"bytes,2,req,name=regex" json:"regex,omitempty"` - // The label to which the resulting string is written in a replacement. - TargetLabel *string `protobuf:"bytes,3,opt,name=target_label" json:"target_label,omitempty"` - // Replacement is the regex replacement pattern to be used. - Replacement *string `protobuf:"bytes,4,opt,name=replacement" json:"replacement,omitempty"` - // Separator is the string between concatenated values from the source labels. - Separator *string `protobuf:"bytes,5,opt,name=separator,def=;" json:"separator,omitempty"` - Action *RelabelConfig_Action `protobuf:"varint,6,opt,name=action,enum=io.prometheus.RelabelConfig_Action,def=0" json:"action,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RelabelConfig) Reset() { *m = RelabelConfig{} } -func (m *RelabelConfig) String() string { return proto.CompactTextString(m) } -func (*RelabelConfig) ProtoMessage() {} - -const Default_RelabelConfig_Separator string = ";" -const Default_RelabelConfig_Action RelabelConfig_Action = RelabelConfig_REPLACE - -func (m *RelabelConfig) GetSourceLabel() []string { - if m != nil { - return m.SourceLabel - } - return nil -} - -func (m *RelabelConfig) GetRegex() string { - if m != nil && m.Regex != nil { - return *m.Regex - } - return "" -} - -func (m *RelabelConfig) GetTargetLabel() string { - if m != nil && m.TargetLabel != nil { - return *m.TargetLabel - } - return "" -} - -func (m *RelabelConfig) GetReplacement() string { - if m != nil && m.Replacement != nil { - return *m.Replacement - } - return "" -} - -func (m *RelabelConfig) GetSeparator() string { - if m != nil && m.Separator != nil { - return *m.Separator - } - return Default_RelabelConfig_Separator -} - -func (m *RelabelConfig) GetAction() RelabelConfig_Action { - if m != nil && m.Action != nil { - return *m.Action - } - return Default_RelabelConfig_Action -} - -// The configuration for a Prometheus job to scrape. -// -// The next field no. is 11. -type ScrapeConfig struct { - // The job name. Must adhere to the regex "[a-zA-Z_][a-zA-Z0-9_-]*". - JobName *string `protobuf:"bytes,1,req,name=job_name" json:"job_name,omitempty"` - // How frequently to scrape targets from this job. Overrides the global - // default. Must be a valid Prometheus duration string in the form - // "[0-9]+[smhdwy]". - ScrapeInterval *string `protobuf:"bytes,2,opt,name=scrape_interval" json:"scrape_interval,omitempty"` - // Per-target timeout when scraping this job. Must be a valid Prometheus - // duration string in the form "[0-9]+[smhdwy]". - ScrapeTimeout *string `protobuf:"bytes,7,opt,name=scrape_timeout,def=10s" json:"scrape_timeout,omitempty"` - // List of DNS service discovery configurations. - DnsConfig []*DNSConfig `protobuf:"bytes,9,rep,name=dns_config" json:"dns_config,omitempty"` - // List of labeled target groups for this job. - TargetGroup []*TargetGroup `protobuf:"bytes,5,rep,name=target_group" json:"target_group,omitempty"` - // List of relabel configurations. - RelabelConfig []*RelabelConfig `protobuf:"bytes,10,rep,name=relabel_config" json:"relabel_config,omitempty"` - // The HTTP resource path on which to fetch metrics from targets. - MetricsPath *string `protobuf:"bytes,6,opt,name=metrics_path,def=/metrics" json:"metrics_path,omitempty"` - // The URL scheme with which to fetch metrics from targets. - Scheme *string `protobuf:"bytes,8,opt,name=scheme,def=http" json:"scheme,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ScrapeConfig) Reset() { *m = ScrapeConfig{} } -func (m *ScrapeConfig) String() string { return proto.CompactTextString(m) } -func (*ScrapeConfig) ProtoMessage() {} - -const Default_ScrapeConfig_ScrapeTimeout string = "10s" -const Default_ScrapeConfig_MetricsPath string = "/metrics" -const Default_ScrapeConfig_Scheme string = "http" - -func (m *ScrapeConfig) GetJobName() string { - if m != nil && m.JobName != nil { - return *m.JobName - } - return "" -} - -func (m *ScrapeConfig) GetScrapeInterval() string { - if m != nil && m.ScrapeInterval != nil { - return *m.ScrapeInterval - } - return "" -} - -func (m *ScrapeConfig) GetScrapeTimeout() string { - if m != nil && m.ScrapeTimeout != nil { - return *m.ScrapeTimeout - } - return Default_ScrapeConfig_ScrapeTimeout -} - -func (m *ScrapeConfig) GetDnsConfig() []*DNSConfig { - if m != nil { - return m.DnsConfig - } - return nil -} - -func (m *ScrapeConfig) GetTargetGroup() []*TargetGroup { - if m != nil { - return m.TargetGroup - } - return nil -} - -func (m *ScrapeConfig) GetRelabelConfig() []*RelabelConfig { - if m != nil { - return m.RelabelConfig - } - return nil -} - -func (m *ScrapeConfig) GetMetricsPath() string { - if m != nil && m.MetricsPath != nil { - return *m.MetricsPath - } - return Default_ScrapeConfig_MetricsPath -} - -func (m *ScrapeConfig) GetScheme() string { - if m != nil && m.Scheme != nil { - return *m.Scheme - } - return Default_ScrapeConfig_Scheme -} - -// The top-level Prometheus configuration. -type PrometheusConfig struct { - // Global Prometheus configuration options. If omitted, an empty global - // configuration with default values (see GlobalConfig definition) will be - // created. - Global *GlobalConfig `protobuf:"bytes,1,opt,name=global" json:"global,omitempty"` - // The list of scrape configs. - ScrapeConfig []*ScrapeConfig `protobuf:"bytes,3,rep,name=scrape_config" json:"scrape_config,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PrometheusConfig) Reset() { *m = PrometheusConfig{} } -func (m *PrometheusConfig) String() string { return proto.CompactTextString(m) } -func (*PrometheusConfig) ProtoMessage() {} - -func (m *PrometheusConfig) GetGlobal() *GlobalConfig { - if m != nil { - return m.Global - } - return nil -} - -func (m *PrometheusConfig) GetScrapeConfig() []*ScrapeConfig { - if m != nil { - return m.ScrapeConfig - } - return nil -} - -func init() { - proto.RegisterEnum("io.prometheus.RelabelConfig_Action", RelabelConfig_Action_name, RelabelConfig_Action_value) -} diff --git a/config/load.go b/config/load.go deleted file mode 100644 index 3ee7f8695..000000000 --- a/config/load.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "io/ioutil" - - "github.com/golang/protobuf/proto" - - pb "github.com/prometheus/prometheus/config/generated" -) - -// LoadFromString returns a config parsed from the provided string. -func LoadFromString(configStr string) (*Config, error) { - configProto := pb.PrometheusConfig{} - if err := proto.UnmarshalText(configStr, &configProto); err != nil { - return nil, err - } - if configProto.Global == nil { - configProto.Global = &pb.GlobalConfig{} - } - for _, scfg := range configProto.GetScrapeConfig() { - if scfg.ScrapeInterval == nil { - scfg.ScrapeInterval = proto.String(configProto.Global.GetScrapeInterval()) - } - } - - config := &Config{configProto} - err := config.Validate() - - return config, err -} - -// LoadFromFile returns a config parsed from the file of the provided name. -func LoadFromFile(fileName string) (*Config, error) { - configStr, err := ioutil.ReadFile(fileName) - if err != nil { - return nil, err - } - - return LoadFromString(string(configStr)) -} diff --git a/config/load_test.go b/config/load_test.go deleted file mode 100644 index 5e1aa423f..000000000 --- a/config/load_test.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "testing" -) - -func TestLoadFromFile(t *testing.T) { - _, err := LoadFromFile("file-does-not-exist.conf") - if err == nil { - t.Error("Error expected on non-existing config file path but got none") - } -} diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml new file mode 100644 index 000000000..290d2b5cc --- /dev/null +++ b/config/testdata/conf.good.yml @@ -0,0 +1,61 @@ +# my global config +global_config: + scrape_interval: 15s + evaluation_interval: 30s + # scrape_timeout is set to the global default (10s). + + labels: + monitor: codelab + foo: bar + +rule_files: + - "first.rules" + - "second.rules" + +scrape_configs: + - job_name: prometheus + + # scrape_interval is defined by the configured global (15s). + # scrape_timeout is defined by the global default (10s). + + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. + + labels: + foo: baz + + target_groups: + - targets: ['localhost:9090', 'localhost:9191'] + labels: + my: label + your: label + + relabel_configs: + - source_labels: [job, __meta_dns_srv_name] + regex: (.*)some-[regex]$ + target_label: job + replacement: foo-${1} + # action defaults to 'replace' + + + - job_name: service-x + + scrape_interval: 50s + scrape_timeout: 5s + + metrics_path: /my_path + # scheme defaults to 'http'. + + dns_configs: + - refresh_interval: 15s + names: + - first.dns.address.domain.com + - second.dns.address.domain.com + - names: + - first.dns.address.domain.com + # refresh_interval defaults to 30s. + + relabel_configs: + - source_labels: [job] + regex: (.*)some-[regex]$ + action: drop diff --git a/config/testdata/jobname.bad.yml b/config/testdata/jobname.bad.yml new file mode 100644 index 000000000..2d08e1f82 --- /dev/null +++ b/config/testdata/jobname.bad.yml @@ -0,0 +1,2 @@ +scrape_configs: + - job_name: prom^etheus diff --git a/config/testdata/jobname_dup.bad.yml b/config/testdata/jobname_dup.bad.yml new file mode 100644 index 000000000..0265493c3 --- /dev/null +++ b/config/testdata/jobname_dup.bad.yml @@ -0,0 +1,5 @@ +# Two scrape configs with the same job names are not allowed. +scrape_configs: + - job_name: prometheus + - job_name: service-x + - job_name: prometheus diff --git a/config/testdata/labelname.bad.yml b/config/testdata/labelname.bad.yml new file mode 100644 index 000000000..e940d58e7 --- /dev/null +++ b/config/testdata/labelname.bad.yml @@ -0,0 +1,3 @@ +global_config: + labels: + not$allowed: value diff --git a/config/testdata/regex.bad.yml b/config/testdata/regex.bad.yml new file mode 100644 index 000000000..4cfe792fa --- /dev/null +++ b/config/testdata/regex.bad.yml @@ -0,0 +1,4 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - regex: abc(def diff --git a/main.go b/main.go index 83cceb2ec..eba792e32 100644 --- a/main.go +++ b/main.go @@ -161,7 +161,7 @@ func NewPrometheus() *prometheus { ruleManager := manager.NewRuleManager(&manager.RuleManagerOptions{ SampleAppender: sampleAppender, NotificationHandler: notificationHandler, - EvaluationInterval: conf.EvaluationInterval(), + EvaluationInterval: time.Duration(conf.GlobalConfig.EvaluationInterval), Storage: memStorage, PrometheusURL: web.MustBuildServerURL(*pathPrefix), PathPrefix: *pathPrefix, diff --git a/retrieval/relabel.go b/retrieval/relabel.go index e0ddb232d..337a1b67a 100644 --- a/retrieval/relabel.go +++ b/retrieval/relabel.go @@ -1,13 +1,12 @@ package retrieval import ( - "regexp" + "fmt" "strings" clientmodel "github.com/prometheus/client_golang/model" "github.com/prometheus/prometheus/config" - pb "github.com/prometheus/prometheus/config/generated" ) // Relabel returns a relabeled copy of the given label set. The relabel configurations @@ -31,40 +30,34 @@ func Relabel(labels clientmodel.LabelSet, cfgs ...*config.RelabelConfig) (client } func relabel(labels clientmodel.LabelSet, cfg *config.RelabelConfig) (clientmodel.LabelSet, error) { - pat, err := regexp.Compile(cfg.GetRegex()) - if err != nil { - return nil, err + values := make([]string, 0, len(cfg.SourceLabels)) + for _, ln := range cfg.SourceLabels { + values = append(values, string(labels[ln])) } + val := strings.Join(values, cfg.Separator) - values := make([]string, 0, len(cfg.GetSourceLabel())) - for _, name := range cfg.GetSourceLabel() { - values = append(values, string(labels[clientmodel.LabelName(name)])) - } - val := strings.Join(values, cfg.GetSeparator()) - - switch cfg.GetAction() { - case pb.RelabelConfig_DROP: - if pat.MatchString(val) { + switch cfg.Action { + case config.RelabelDrop: + if cfg.Regex.MatchString(val) { return nil, nil } - case pb.RelabelConfig_KEEP: - if !pat.MatchString(val) { + case config.RelabelKeep: + if !cfg.Regex.MatchString(val) { return nil, nil } - case pb.RelabelConfig_REPLACE: + case config.RelabelReplace: // If there is no match no replacement must take place. - if !pat.MatchString(val) { + if !cfg.Regex.MatchString(val) { break } - res := pat.ReplaceAllString(val, cfg.GetReplacement()) - ln := clientmodel.LabelName(cfg.GetTargetLabel()) + res := cfg.Regex.ReplaceAllString(val, cfg.Replacement) if res == "" { - delete(labels, ln) + delete(labels, cfg.TargetLabel) } else { - labels[ln] = clientmodel.LabelValue(res) + labels[cfg.TargetLabel] = clientmodel.LabelValue(res) } default: - panic("retrieval.relabel: unknown relabel action type") + panic(fmt.Errorf("retrieval.relabel: unknown relabel action type %q", cfg.Action)) } return labels, nil } diff --git a/retrieval/relabel_test.go b/retrieval/relabel_test.go index 0bdfe4315..70398a54f 100644 --- a/retrieval/relabel_test.go +++ b/retrieval/relabel_test.go @@ -2,20 +2,18 @@ package retrieval import ( "reflect" + "regexp" "testing" - "github.com/golang/protobuf/proto" - clientmodel "github.com/prometheus/client_golang/model" "github.com/prometheus/prometheus/config" - pb "github.com/prometheus/prometheus/config/generated" ) func TestRelabel(t *testing.T) { tests := []struct { input clientmodel.LabelSet - relabel []pb.RelabelConfig + relabel []config.DefaultedRelabelConfig output clientmodel.LabelSet }{ { @@ -24,14 +22,14 @@ func TestRelabel(t *testing.T) { "b": "bar", "c": "baz", }, - relabel: []pb.RelabelConfig{ + relabel: []config.DefaultedRelabelConfig{ { - SourceLabel: []string{"a"}, - Regex: proto.String("f(.*)"), - TargetLabel: proto.String("d"), - Separator: proto.String(";"), - Replacement: proto.String("ch${1}-ch${1}"), - Action: pb.RelabelConfig_REPLACE.Enum(), + SourceLabels: clientmodel.LabelNames{"a"}, + Regex: &config.Regexp{*regexp.MustCompile("f(.*)")}, + TargetLabel: clientmodel.LabelName("d"), + Separator: ";", + Replacement: "ch${1}-ch${1}", + Action: config.RelabelReplace, }, }, output: clientmodel.LabelSet{ @@ -47,21 +45,22 @@ func TestRelabel(t *testing.T) { "b": "bar", "c": "baz", }, - relabel: []pb.RelabelConfig{ + relabel: []config.DefaultedRelabelConfig{ { - SourceLabel: []string{"a", "b"}, - Regex: proto.String("^f(.*);(.*)r$"), - TargetLabel: proto.String("a"), - Separator: proto.String(";"), - Replacement: proto.String("b${1}${2}m"), // boobam + SourceLabels: clientmodel.LabelNames{"a", "b"}, + Regex: &config.Regexp{*regexp.MustCompile("^f(.*);(.*)r$")}, + TargetLabel: clientmodel.LabelName("a"), + Separator: ";", + Replacement: "b${1}${2}m", // boobam + Action: config.RelabelReplace, }, { - SourceLabel: []string{"c", "a"}, - Regex: proto.String("(b).*b(.*)ba(.*)"), - TargetLabel: proto.String("d"), - Separator: proto.String(";"), - Replacement: proto.String("$1$2$2$3"), - Action: pb.RelabelConfig_REPLACE.Enum(), + SourceLabels: clientmodel.LabelNames{"c", "a"}, + Regex: &config.Regexp{*regexp.MustCompile("(b).*b(.*)ba(.*)")}, + TargetLabel: clientmodel.LabelName("d"), + Separator: ";", + Replacement: "$1$2$2$3", + Action: config.RelabelReplace, }, }, output: clientmodel.LabelSet{ @@ -75,18 +74,18 @@ func TestRelabel(t *testing.T) { input: clientmodel.LabelSet{ "a": "foo", }, - relabel: []pb.RelabelConfig{ + relabel: []config.DefaultedRelabelConfig{ { - SourceLabel: []string{"a"}, - Regex: proto.String("o$"), - Action: pb.RelabelConfig_DROP.Enum(), + SourceLabels: clientmodel.LabelNames{"a"}, + Regex: &config.Regexp{*regexp.MustCompile("o$")}, + Action: config.RelabelDrop, }, { - SourceLabel: []string{"a"}, - Regex: proto.String("f(.*)"), - TargetLabel: proto.String("d"), - Separator: proto.String(";"), - Replacement: proto.String("ch$1-ch$1"), - Action: pb.RelabelConfig_REPLACE.Enum(), + SourceLabels: clientmodel.LabelNames{"a"}, + Regex: &config.Regexp{*regexp.MustCompile("f(.*)")}, + TargetLabel: clientmodel.LabelName("d"), + Separator: ";", + Replacement: "ch$1-ch$1", + Action: config.RelabelReplace, }, }, output: nil, @@ -95,11 +94,11 @@ func TestRelabel(t *testing.T) { input: clientmodel.LabelSet{ "a": "foo", }, - relabel: []pb.RelabelConfig{ + relabel: []config.DefaultedRelabelConfig{ { - SourceLabel: []string{"a"}, - Regex: proto.String("no-match"), - Action: pb.RelabelConfig_DROP.Enum(), + SourceLabels: clientmodel.LabelNames{"a"}, + Regex: &config.Regexp{*regexp.MustCompile("no-match")}, + Action: config.RelabelDrop, }, }, output: clientmodel.LabelSet{ @@ -110,11 +109,11 @@ func TestRelabel(t *testing.T) { input: clientmodel.LabelSet{ "a": "foo", }, - relabel: []pb.RelabelConfig{ + relabel: []config.DefaultedRelabelConfig{ { - SourceLabel: []string{"a"}, - Regex: proto.String("no-match"), - Action: pb.RelabelConfig_KEEP.Enum(), + SourceLabels: clientmodel.LabelNames{"a"}, + Regex: &config.Regexp{*regexp.MustCompile("no-match")}, + Action: config.RelabelKeep, }, }, output: nil, @@ -123,11 +122,11 @@ func TestRelabel(t *testing.T) { input: clientmodel.LabelSet{ "a": "foo", }, - relabel: []pb.RelabelConfig{ + relabel: []config.DefaultedRelabelConfig{ { - SourceLabel: []string{"a"}, - Regex: proto.String("^f"), - Action: pb.RelabelConfig_KEEP.Enum(), + SourceLabels: clientmodel.LabelNames{"a"}, + Regex: &config.Regexp{*regexp.MustCompile("^f")}, + Action: config.RelabelKeep, }, }, output: clientmodel.LabelSet{ @@ -139,13 +138,13 @@ func TestRelabel(t *testing.T) { input: clientmodel.LabelSet{ "a": "boo", }, - relabel: []pb.RelabelConfig{ + relabel: []config.DefaultedRelabelConfig{ { - SourceLabel: []string{"a"}, - Regex: proto.String("^f"), - Action: pb.RelabelConfig_REPLACE.Enum(), - TargetLabel: proto.String("b"), - Replacement: proto.String("bar"), + SourceLabels: clientmodel.LabelNames{"a"}, + Regex: &config.Regexp{*regexp.MustCompile("^f")}, + TargetLabel: clientmodel.LabelName("b"), + Replacement: "bar", + Action: config.RelabelReplace, }, }, output: clientmodel.LabelSet{ @@ -157,7 +156,6 @@ func TestRelabel(t *testing.T) { for i, test := range tests { var relabel []*config.RelabelConfig for _, rl := range test.relabel { - proto.SetDefaults(&rl) relabel = append(relabel, &config.RelabelConfig{rl}) } res, err := Relabel(test.input, relabel...) diff --git a/retrieval/target.go b/retrieval/target.go index cdd3acdca..bd1db8dc6 100644 --- a/retrieval/target.go +++ b/retrieval/target.go @@ -190,12 +190,12 @@ func (t *target) Update(cfg *config.ScrapeConfig, baseLabels clientmodel.LabelSe t.Lock() defer t.Unlock() - t.url.Scheme = cfg.GetScheme() + t.url.Scheme = cfg.Scheme t.url.Path = string(baseLabels[clientmodel.MetricsPathLabel]) - t.scrapeInterval = cfg.ScrapeInterval() - t.deadline = cfg.ScrapeTimeout() - t.httpClient = utility.NewDeadlineClient(cfg.ScrapeTimeout()) + t.scrapeInterval = time.Duration(cfg.ScrapeInterval) + t.deadline = time.Duration(cfg.ScrapeTimeout) + t.httpClient = utility.NewDeadlineClient(time.Duration(cfg.ScrapeTimeout)) t.baseLabels = clientmodel.LabelSet{} // All remaining internal labels will not be part of the label set. diff --git a/retrieval/target_test.go b/retrieval/target_test.go index 20b0fcee3..062d27568 100644 --- a/retrieval/target_test.go +++ b/retrieval/target_test.go @@ -26,8 +26,6 @@ import ( clientmodel "github.com/prometheus/client_golang/model" - "github.com/golang/protobuf/proto" - "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/utility" ) @@ -91,9 +89,6 @@ func TestTargetScrapeWithFullChannel(t *testing.T) { } func TestTargetRecordScrapeHealth(t *testing.T) { - scfg := &config.ScrapeConfig{} - proto.SetDefaults(&scfg.ScrapeConfig) - testTarget := newTestTarget("example.url", 0, clientmodel.LabelSet{clientmodel.JobLabel: "testjob"}) now := clientmodel.Now() @@ -150,9 +145,6 @@ func TestTargetScrapeTimeout(t *testing.T) { ) defer server.Close() - scfg := &config.ScrapeConfig{} - proto.SetDefaults(&scfg.ScrapeConfig) - var testTarget Target = newTestTarget(server.URL, 10*time.Millisecond, clientmodel.LabelSet{}) appender := nopAppender{} diff --git a/retrieval/targetmanager.go b/retrieval/targetmanager.go index 6e9f29f49..4cf253c3e 100644 --- a/retrieval/targetmanager.go +++ b/retrieval/targetmanager.go @@ -17,13 +17,13 @@ import ( "fmt" "strings" "sync" + "time" "github.com/golang/glog" clientmodel "github.com/prometheus/client_golang/model" "github.com/prometheus/prometheus/config" - pb "github.com/prometheus/prometheus/config/generated" "github.com/prometheus/prometheus/retrieval/discovery" "github.com/prometheus/prometheus/storage" ) @@ -124,7 +124,7 @@ func (tm *TargetManager) handleTargetUpdates(cfg *config.ScrapeConfig, ch <-chan // Thus, oscilliating label sets for targets with the same source, // but providers from different configs, are prevented. func fullSource(cfg *config.ScrapeConfig, src string) string { - return cfg.GetJobName() + ":" + src + return cfg.JobName + ":" + src } // Stop all background processing. @@ -289,7 +289,7 @@ func (tm *TargetManager) applyConfig(cfg *config.Config) error { // Only apply changes if everything was successful. providers := map[*config.ScrapeConfig][]TargetProvider{} - for _, scfg := range cfg.ScrapeConfigs() { + for _, scfg := range cfg.ScrapeConfigs { provs, err := ProvidersFromConfig(scfg) if err != nil { return err @@ -299,7 +299,7 @@ func (tm *TargetManager) applyConfig(cfg *config.Config) error { tm.m.Lock() defer tm.m.Unlock() - tm.globalLabels = cfg.GlobalLabels() + tm.globalLabels = cfg.GlobalConfig.Labels tm.providers = providers return nil } @@ -315,7 +315,10 @@ func (tm *TargetManager) targetsFromGroup(tg *config.TargetGroup, cfg *config.Sc // set already. Apply the labelsets in order of decreasing precedence. labelsets := []clientmodel.LabelSet{ tg.Labels, - cfg.Labels(), + clientmodel.LabelSet{ + clientmodel.MetricsPathLabel: clientmodel.LabelValue(cfg.MetricsPath), + clientmodel.JobLabel: clientmodel.LabelValue(cfg.JobName), + }, tm.globalLabels, } for _, lset := range labelsets { @@ -330,7 +333,7 @@ func (tm *TargetManager) targetsFromGroup(tg *config.TargetGroup, cfg *config.Sc return nil, fmt.Errorf("instance %d in target group %s has no address", i, tg) } - labels, err := Relabel(labels, cfg.RelabelConfigs()...) + labels, err := Relabel(labels, cfg.RelabelConfigs...) if err != nil { return nil, fmt.Errorf("error while relabelling instance %d in target group %s: %s", i, tg, err) } @@ -357,13 +360,12 @@ func (tm *TargetManager) targetsFromGroup(tg *config.TargetGroup, cfg *config.Sc func ProvidersFromConfig(cfg *config.ScrapeConfig) ([]TargetProvider, error) { var providers []TargetProvider - for _, dnscfg := range cfg.DNSConfigs() { - dnsSD := discovery.NewDNSDiscovery(dnscfg.GetName(), dnscfg.RefreshInterval()) + for _, dnscfg := range cfg.DNSConfigs { + dnsSD := discovery.NewDNSDiscovery(dnscfg.Names, time.Duration(dnscfg.RefreshInterval)) providers = append(providers, dnsSD) } - if tgs := cfg.GetTargetGroup(); tgs != nil { - static := NewStaticProvider(tgs) - providers = append(providers, static) + if len(cfg.TargetGroups) > 0 { + providers = append(providers, NewStaticProvider(cfg.TargetGroups)) } return providers, nil } @@ -375,25 +377,13 @@ type StaticProvider struct { // NewStaticProvider returns a StaticProvider configured with the given // target groups. -func NewStaticProvider(groups []*pb.TargetGroup) *StaticProvider { - prov := &StaticProvider{} - +func NewStaticProvider(groups []*config.TargetGroup) *StaticProvider { for i, tg := range groups { - g := &config.TargetGroup{ - Source: fmt.Sprintf("static:%d", i), - Labels: clientmodel.LabelSet{}, - } - for _, pair := range tg.GetLabels().GetLabel() { - g.Labels[clientmodel.LabelName(pair.GetName())] = clientmodel.LabelValue(pair.GetValue()) - } - for _, t := range tg.GetTarget() { - g.Targets = append(g.Targets, clientmodel.LabelSet{ - clientmodel.AddressLabel: clientmodel.LabelValue(t), - }) - } - prov.TargetGroups = append(prov.TargetGroups, g) + tg.Source = fmt.Sprintf("static:%d", i) + } + return &StaticProvider{ + TargetGroups: groups, } - return prov } // Run implements the TargetProvider interface. diff --git a/retrieval/targetmanager_test.go b/retrieval/targetmanager_test.go index 7aa090314..6becfb221 100644 --- a/retrieval/targetmanager_test.go +++ b/retrieval/targetmanager_test.go @@ -15,25 +15,26 @@ package retrieval import ( "reflect" + "regexp" "testing" "time" - "github.com/golang/protobuf/proto" - clientmodel "github.com/prometheus/client_golang/model" "github.com/prometheus/prometheus/config" - pb "github.com/prometheus/prometheus/config/generated" ) func TestTargetManagerChan(t *testing.T) { - testJob1 := &config.ScrapeConfig{pb.ScrapeConfig{ - JobName: proto.String("test_job1"), - ScrapeInterval: proto.String("1m"), - TargetGroup: []*pb.TargetGroup{ - {Target: []string{"example.org:80", "example.com:80"}}, + testJob1 := &config.ScrapeConfig{config.DefaultedScrapeConfig{ + JobName: "test_job1", + ScrapeInterval: config.Duration(1 * time.Minute), + TargetGroups: []*config.TargetGroup{{ + Targets: []clientmodel.LabelSet{ + {clientmodel.AddressLabel: "example.org:80"}, + {clientmodel.AddressLabel: "example.com:80"}, + }, }}, - } + }} prov1 := &fakeTargetProvider{ sources: []string{"src1", "src2"}, update: make(chan *config.TargetGroup), @@ -153,63 +154,76 @@ func TestTargetManagerChan(t *testing.T) { } func TestTargetManagerConfigUpdate(t *testing.T) { - testJob1 := &pb.ScrapeConfig{ - JobName: proto.String("test_job1"), - ScrapeInterval: proto.String("1m"), - TargetGroup: []*pb.TargetGroup{ - {Target: []string{"example.org:80", "example.com:80"}}, - }, - } - testJob2 := &pb.ScrapeConfig{ - JobName: proto.String("test_job2"), - ScrapeInterval: proto.String("1m"), - TargetGroup: []*pb.TargetGroup{ - { - Target: []string{"example.org:8080", "example.com:8081"}, - Labels: &pb.LabelPairs{Label: []*pb.LabelPair{ - {Name: proto.String("foo"), Value: proto.String("bar")}, - {Name: proto.String("boom"), Value: proto.String("box")}, - }}, + testJob1 := &config.ScrapeConfig{config.DefaultedScrapeConfig{ + JobName: "test_job1", + ScrapeInterval: config.Duration(1 * time.Minute), + TargetGroups: []*config.TargetGroup{{ + Targets: []clientmodel.LabelSet{ + {clientmodel.AddressLabel: "example.org:80"}, + {clientmodel.AddressLabel: "example.com:80"}, }, - {Target: []string{"test.com:1234"}}, + }}, + }} + testJob2 := &config.ScrapeConfig{config.DefaultedScrapeConfig{ + JobName: "test_job2", + ScrapeInterval: config.Duration(1 * time.Minute), + TargetGroups: []*config.TargetGroup{ { - Target: []string{"test.com:1235"}, - Labels: &pb.LabelPairs{Label: []*pb.LabelPair{ - {Name: proto.String("instance"), Value: proto.String("fixed")}, - }}, + Targets: []clientmodel.LabelSet{ + {clientmodel.AddressLabel: "example.org:8080"}, + {clientmodel.AddressLabel: "example.com:8081"}, + }, + Labels: clientmodel.LabelSet{ + "foo": "bar", + "boom": "box", + }, + }, + { + Targets: []clientmodel.LabelSet{ + {clientmodel.AddressLabel: "test.com:1234"}, + }, + }, + { + Targets: []clientmodel.LabelSet{ + {clientmodel.AddressLabel: "test.com:1235"}, + }, + Labels: clientmodel.LabelSet{"instance": "fixed"}, }, }, - RelabelConfig: []*pb.RelabelConfig{ - { - SourceLabel: []string{string(clientmodel.AddressLabel)}, - Regex: proto.String(`^test\.(.*?):(.*)`), - Replacement: proto.String("foo.${1}:${2}"), - TargetLabel: proto.String(string(clientmodel.AddressLabel)), - }, { + RelabelConfigs: []*config.RelabelConfig{ + {config.DefaultedRelabelConfig{ + SourceLabels: clientmodel.LabelNames{clientmodel.AddressLabel}, + Regex: &config.Regexp{*regexp.MustCompile(`^test\.(.*?):(.*)`)}, + Replacement: "foo.${1}:${2}", + TargetLabel: clientmodel.AddressLabel, + Action: config.RelabelReplace, + }}, + {config.DefaultedRelabelConfig{ // Add a new label for example.* targets. - SourceLabel: []string{string(clientmodel.AddressLabel), "boom", "foo"}, - Regex: proto.String("^example.*?-b([a-z-]+)r$"), - TargetLabel: proto.String("new"), - Replacement: proto.String("$1"), - Separator: proto.String("-"), - }, { + SourceLabels: clientmodel.LabelNames{clientmodel.AddressLabel, "boom", "foo"}, + Regex: &config.Regexp{*regexp.MustCompile("^example.*?-b([a-z-]+)r$")}, + TargetLabel: "new", + Replacement: "$1", + Separator: "-", + Action: config.RelabelReplace, + }}, + {config.DefaultedRelabelConfig{ // Drop an existing label. - SourceLabel: []string{"boom"}, - Regex: proto.String(".*"), - TargetLabel: proto.String("boom"), - Replacement: proto.String(""), - }, + SourceLabels: clientmodel.LabelNames{"boom"}, + Regex: &config.Regexp{*regexp.MustCompile(".*")}, + TargetLabel: "boom", + Replacement: "", + Action: config.RelabelReplace, + }}, }, - } - proto.SetDefaults(testJob1) - proto.SetDefaults(testJob2) + }} sequence := []struct { - scrapeConfigs []*pb.ScrapeConfig + scrapeConfigs []*config.ScrapeConfig expected map[string][]clientmodel.LabelSet }{ { - scrapeConfigs: []*pb.ScrapeConfig{testJob1}, + scrapeConfigs: []*config.ScrapeConfig{testJob1}, expected: map[string][]clientmodel.LabelSet{ "test_job1:static:0": { {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "example.org:80"}, @@ -217,7 +231,7 @@ func TestTargetManagerConfigUpdate(t *testing.T) { }, }, }, { - scrapeConfigs: []*pb.ScrapeConfig{testJob1}, + scrapeConfigs: []*config.ScrapeConfig{testJob1}, expected: map[string][]clientmodel.LabelSet{ "test_job1:static:0": { {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "example.org:80"}, @@ -225,7 +239,7 @@ func TestTargetManagerConfigUpdate(t *testing.T) { }, }, }, { - scrapeConfigs: []*pb.ScrapeConfig{testJob1, testJob2}, + scrapeConfigs: []*config.ScrapeConfig{testJob1, testJob2}, expected: map[string][]clientmodel.LabelSet{ "test_job1:static:0": { {clientmodel.JobLabel: "test_job1", clientmodel.InstanceLabel: "example.org:80"}, @@ -243,10 +257,10 @@ func TestTargetManagerConfigUpdate(t *testing.T) { }, }, }, { - scrapeConfigs: []*pb.ScrapeConfig{}, + scrapeConfigs: []*config.ScrapeConfig{}, expected: map[string][]clientmodel.LabelSet{}, }, { - scrapeConfigs: []*pb.ScrapeConfig{testJob2}, + scrapeConfigs: []*config.ScrapeConfig{testJob2}, expected: map[string][]clientmodel.LabelSet{ "test_job2:static:0": { {clientmodel.JobLabel: "test_job2", clientmodel.InstanceLabel: "example.org:8080", "foo": "bar", "new": "ox-ba"}, @@ -261,8 +275,9 @@ func TestTargetManagerConfigUpdate(t *testing.T) { }, }, } + conf := &config.Config{DefaultedConfig: config.DefaultConfig} - targetManager, err := NewTargetManager(&config.Config{}, nopAppender{}) + targetManager, err := NewTargetManager(conf, nopAppender{}) if err != nil { t.Fatal(err) } @@ -270,10 +285,8 @@ func TestTargetManagerConfigUpdate(t *testing.T) { defer targetManager.Stop() for i, step := range sequence { - cfg := pb.PrometheusConfig{ - ScrapeConfig: step.scrapeConfigs, - } - err := targetManager.ApplyConfig(&config.Config{cfg}) + conf.ScrapeConfigs = step.scrapeConfigs + err := targetManager.ApplyConfig(conf) if err != nil { t.Fatal(err) } diff --git a/rules/manager/manager.go b/rules/manager/manager.go index 1e7e070f4..bd50d45d6 100644 --- a/rules/manager/manager.go +++ b/rules/manager/manager.go @@ -268,7 +268,7 @@ func (m *ruleManager) runIteration() { } func (m *ruleManager) AddRulesFromConfig(config *config.Config) error { - for _, ruleFile := range config.Global.RuleFile { + for _, ruleFile := range config.RuleFiles { newRules, err := rules.LoadRulesFromFile(ruleFile) if err != nil { return fmt.Errorf("%s: %s", ruleFile, err) From 86087120ddc8b75a2fbbf54549cd41fc682af68c Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Thu, 7 May 2015 16:47:18 +0200 Subject: [PATCH 09/27] Replace example config with new YAML format. --- config/config.go | 69 ++++++++++++++++---------- config/config_test.go | 6 +-- config/testdata/conf.good.yml | 4 +- config/testdata/labelname.bad.yml | 2 +- documentation/examples/prometheus.conf | 30 ----------- documentation/examples/prometheus.yml | 30 +++++++++++ retrieval/targetmanager.go | 2 +- 7 files changed, 79 insertions(+), 64 deletions(-) delete mode 100644 documentation/examples/prometheus.conf create mode 100644 documentation/examples/prometheus.yml diff --git a/config/config.go b/config/config.go index 1c501b66f..87fe01595 100644 --- a/config/config.go +++ b/config/config.go @@ -66,7 +66,7 @@ var ( } // The default DNS SD configuration. - DefaultDNSConfig = DefaultedDNSConfig{ + DefaultDNSSDConfig = DefaultedDNSSDConfig{ RefreshInterval: Duration(30 * time.Second), } ) @@ -80,7 +80,7 @@ type Config struct { original string } -func (c *Config) String() string { +func (c Config) String() string { if c.original != "" { return c.original } @@ -117,12 +117,12 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { // DefaultedConfig is a proxy type for Config. type DefaultedConfig struct { - GlobalConfig *GlobalConfig `yaml:"global_config"` + GlobalConfig *GlobalConfig `yaml:"global"` RuleFiles []string `yaml:"rule_files,omitempty"` ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` } -// GlobalConfig configures values that used across other configuration +// GlobalConfig configures values that are used across other configuration // objects. type GlobalConfig struct { // DefaultedGlobalConfig contains the actual fields for GlobalConfig. @@ -141,11 +141,11 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // DefaultedGlobalConfig is a proxy type for GlobalConfig. type DefaultedGlobalConfig struct { // How frequently to scrape targets by default. - ScrapeInterval Duration `yaml:"scrape_interval"` + ScrapeInterval Duration `yaml:"scrape_interval,omitempty"` // The default timeout when scraping targets. - ScrapeTimeout Duration `yaml:"scrape_timeout"` + ScrapeTimeout Duration `yaml:"scrape_timeout,omitempty"` // How frequently to evaluate rules by default. - EvaluationInterval Duration `yaml:"evaluation_interval"` + EvaluationInterval Duration `yaml:"evaluation_interval,omitempty"` // The labels to add to any timeseries that this Prometheus instance scrapes. Labels clientmodel.LabelSet `yaml:"labels,omitempty"` @@ -175,18 +175,18 @@ type DefaultedScrapeConfig struct { // The job name to which the job label is set by default. JobName string `yaml:"job_name"` // How frequently to scrape the targets of this scrape config. - ScrapeInterval Duration `yaml:"scrape_interval"` + ScrapeInterval Duration `yaml:"scrape_interval,omitempty"` // The timeout for scraping targets of this config. - ScrapeTimeout Duration `yaml:"scrape_timeout"` + ScrapeTimeout Duration `yaml:"scrape_timeout,omitempty"` // The HTTP resource path on which to fetch metrics from targets. - MetricsPath string `yaml:"metrics_path"` + MetricsPath string `yaml:"metrics_path,omitempty"` // The URL scheme with which to fetch metrics from targets. - Scheme string `yaml:"scheme"` + Scheme string `yaml:"scheme,omitempty"` // List of labeled target groups for this job. TargetGroups []*TargetGroup `yaml:"target_groups,omitempty"` // List of DNS service discovery configurations. - DNSConfigs []*DNSConfig `yaml:"dns_configs,omitempty"` + DNSSDConfigs []*DNSSDConfig `yaml:"dns_sd_configs,omitempty"` // List of relabel configurations. RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"` } @@ -203,7 +203,7 @@ type TargetGroup struct { Source string `yaml:"-", json:"-"` } -func (tg *TargetGroup) String() string { +func (tg TargetGroup) String() string { return tg.Source } @@ -229,29 +229,44 @@ func (tg *TargetGroup) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } -// DNSConfig is the configuration for DNS based service discovery. -type DNSConfig struct { - // DefaultedDNSConfig contains the actual fields for DNSConfig. - DefaultedDNSConfig `yaml:",inline"` +// MarshalYAML implements the yaml.Marshaller interface. +func (tg TargetGroup) MarshalYAML() (interface{}, error) { + g := &struct { + Targets []string `yaml:"targets"` + Labels clientmodel.LabelSet `yaml:"labels,omitempty"` + }{ + Targets: make([]string, 0, len(tg.Targets)), + Labels: tg.Labels, + } + for _, t := range tg.Targets { + g.Targets = append(g.Targets, string(t[clientmodel.AddressLabel])) + } + return g, nil +} + +// DNSSDConfig is the configuration for DNS based service discovery. +type DNSSDConfig struct { + // DefaultedDNSSDConfig contains the actual fields for DNSSDConfig. + DefaultedDNSSDConfig `yaml:",inline"` } // UnmarshalYAML implements the yaml.Unmarshaller interface. -func (c *DNSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - c.DefaultedDNSConfig = DefaultDNSConfig - err := unmarshal(&c.DefaultedDNSConfig) +func (c *DNSSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + c.DefaultedDNSSDConfig = DefaultDNSSDConfig + err := unmarshal(&c.DefaultedDNSSDConfig) if err != nil { return err } if len(c.Names) == 0 { - return fmt.Errorf("DNS config must contain at least one SRV server name") + return fmt.Errorf("DNS config must contain at least one SRV record name") } return nil } -// DefaultedDNSConfig is a proxy type for DNSConfig. -type DefaultedDNSConfig struct { +// DefaultedDNSSDConfig is a proxy type for DNSSDConfig. +type DefaultedDNSSDConfig struct { Names []string `yaml:"names"` - RefreshInterval Duration `yaml:"refresh_interval"` + RefreshInterval Duration `yaml:"refresh_interval,omitempty"` } // RelabelAction is the action to be performed on relabeling. @@ -298,7 +313,7 @@ type DefaultedRelabelConfig struct { // with the configured separator in order. SourceLabels clientmodel.LabelNames `yaml:"source_labels,flow"` // Separator is the string between concatenated values from the source labels. - Separator string `yaml:"separator"` + Separator string `yaml:"separator,omitempty"` // Regex against which the concatenation is matched. Regex *Regexp `yaml:"regex"` // The label to which the resulting string is written in a replacement. @@ -306,7 +321,7 @@ type DefaultedRelabelConfig struct { // Replacement is the regex replacement pattern to be used. Replacement string `yaml:"replacement,omitempty"` // Action is the action to be performed for the relabeling. - Action RelabelAction `yaml:"action"` + Action RelabelAction `yaml:"action,omitempty"` } // Regexp encapsulates a regexp.Regexp and makes it YAML marshallable. @@ -329,7 +344,7 @@ func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements the yaml.Marshaller interface. -func (re *Regexp) MarshalYAML() (interface{}, error) { +func (re Regexp) MarshalYAML() (interface{}, error) { return re.String(), nil } diff --git a/config/config_test.go b/config/config_test.go index f4c1fa721..792ad83c1 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -72,15 +72,15 @@ var expectedConf = &Config{DefaultedConfig{ MetricsPath: "/my_path", Scheme: "http", - DNSConfigs: []*DNSConfig{ - {DefaultedDNSConfig{ + DNSSDConfigs: []*DNSSDConfig{ + {DefaultedDNSSDConfig{ Names: []string{ "first.dns.address.domain.com", "second.dns.address.domain.com", }, RefreshInterval: Duration(15 * time.Second), }}, - {DefaultedDNSConfig{ + {DefaultedDNSSDConfig{ Names: []string{ "first.dns.address.domain.com", }, diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 290d2b5cc..a1ff4c192 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -1,5 +1,5 @@ # my global config -global_config: +global: scrape_interval: 15s evaluation_interval: 30s # scrape_timeout is set to the global default (10s). @@ -46,7 +46,7 @@ scrape_configs: metrics_path: /my_path # scheme defaults to 'http'. - dns_configs: + dns_sd_configs: - refresh_interval: 15s names: - first.dns.address.domain.com diff --git a/config/testdata/labelname.bad.yml b/config/testdata/labelname.bad.yml index e940d58e7..66ea324cf 100644 --- a/config/testdata/labelname.bad.yml +++ b/config/testdata/labelname.bad.yml @@ -1,3 +1,3 @@ -global_config: +global: labels: not$allowed: value diff --git a/documentation/examples/prometheus.conf b/documentation/examples/prometheus.conf deleted file mode 100644 index 3bbaad72c..000000000 --- a/documentation/examples/prometheus.conf +++ /dev/null @@ -1,30 +0,0 @@ -# Global default settings. -global { - scrape_interval: "15s" # By default, scrape targets every 15 seconds. - evaluation_interval: "15s" # By default, evaluate rules every 15 seconds. - - # Attach these extra labels to all timeseries collected by this Prometheus instance. - labels: { - label: { - name: "monitor" - value: "codelab-monitor" - } - } - - # Load and evaluate rules in this file every 'evaluation_interval' seconds. This field may be repeated. - #rule_file: "prometheus.rules" -} - -# A job definition containing exactly one endpoint to scrape: Here it's prometheus itself. -job: { - # The job name is added as a label `job={job-name}` to any timeseries scraped from this job. - name: "prometheus" - # Override the global default and scrape targets from this job every 5 seconds. - scrape_interval: "5s" - - # Let's define a group of targets to scrape for this job. In this case, only one. - target_group: { - # These endpoints are scraped via HTTP. - target: "http://localhost:9090/metrics" - } -} diff --git a/documentation/examples/prometheus.yml b/documentation/examples/prometheus.yml new file mode 100644 index 000000000..075645395 --- /dev/null +++ b/documentation/examples/prometheus.yml @@ -0,0 +1,30 @@ +# my global config +global_config: + scrape_interval: 15s # By default, scrape targets every 15 seconds. + evaluation_interval: 15s # By default, scrape targets every 15 seconds. + # scrape_timeout is set to the global default (10s). + + # Attach these extra labels to all timeseries collected by this Prometheus instance. + labels: + monitor: codelab-monitor + +# Load and evaluate rules in this file every 'evaluation_interval' seconds. +rule_files: + # - "first.rules" + # - "second.rules" + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: prometheus + + # Override the global default and scrape targets from this job every 5 seconds. + scrape_interval: 5s + scrape_timeout: 10s + + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. + + target_groups: + - targets: ['localhost:9090'] \ No newline at end of file diff --git a/retrieval/targetmanager.go b/retrieval/targetmanager.go index 4cf253c3e..beebe7bd6 100644 --- a/retrieval/targetmanager.go +++ b/retrieval/targetmanager.go @@ -360,7 +360,7 @@ func (tm *TargetManager) targetsFromGroup(tg *config.TargetGroup, cfg *config.Sc func ProvidersFromConfig(cfg *config.ScrapeConfig) ([]TargetProvider, error) { var providers []TargetProvider - for _, dnscfg := range cfg.DNSConfigs { + for _, dnscfg := range cfg.DNSSDConfigs { dnsSD := discovery.NewDNSDiscovery(dnscfg.Names, time.Duration(dnscfg.RefreshInterval)) providers = append(providers, dnsSD) } From bb540fd9fdd6cffcddf811cf66510a690419b22d Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Tue, 12 May 2015 16:52:56 +0200 Subject: [PATCH 10/27] Implement config reloading on SIGHUP. With this commit, sending SIGHUP to the Prometheus process will reload and apply the configuration file. The different components attempt to handle failing changes gracefully. --- main.go | 53 ++++++++++++++++----------- retrieval/target.go | 1 + retrieval/targetmanager.go | 65 ++++++++++++--------------------- retrieval/targetmanager_test.go | 12 ++---- rules/manager.go | 35 ++++++++++++++++-- web/status.go | 10 +++++ 6 files changed, 103 insertions(+), 73 deletions(-) diff --git a/main.go b/main.go index 1325c09ab..47d94b46c 100644 --- a/main.go +++ b/main.go @@ -92,13 +92,6 @@ type prometheus struct { // NewPrometheus creates a new prometheus object based on flag values. // Call Serve() to start serving and Close() for clean shutdown. func NewPrometheus() *prometheus { - conf, err := config.LoadFromFile(*configFile) - if err != nil { - glog.Errorf("Couldn't load configuration (-config.file=%s): %v", *configFile, err) - glog.Errorf("Note: The configuration format has changed with version 0.14, please check the documentation.") - os.Exit(2) - } - notificationHandler := notification.NewNotificationHandler(*alertmanagerURL, *notificationQueueCapacity) var syncStrategy local.SyncStrategy @@ -155,26 +148,17 @@ func NewPrometheus() *prometheus { sampleAppender = fanout } - targetManager, err := retrieval.NewTargetManager(conf, sampleAppender) - if err != nil { - glog.Errorf("Error creating target manager: %s", err) - os.Exit(1) - } + targetManager := retrieval.NewTargetManager(sampleAppender) queryEngine := promql.NewEngine(memStorage) ruleManager := rules.NewManager(&rules.ManagerOptions{ SampleAppender: sampleAppender, NotificationHandler: notificationHandler, - EvaluationInterval: time.Duration(conf.GlobalConfig.EvaluationInterval), QueryEngine: queryEngine, PrometheusURL: web.MustBuildServerURL(*pathPrefix), PathPrefix: *pathPrefix, }) - if err := ruleManager.LoadRuleFiles(conf.RuleFiles...); err != nil { - glog.Errorf("Error loading rule files: %s", err) - os.Exit(1) - } flags := map[string]string{} flag.VisitAll(func(f *flag.Flag) { @@ -182,7 +166,6 @@ func NewPrometheus() *prometheus { }) prometheusStatus := &web.PrometheusStatusHandler{ BuildInfo: BuildInfo, - Config: conf.String(), RuleManager: ruleManager, TargetPools: targetManager.Pools, Flags: flags, @@ -229,9 +212,27 @@ func NewPrometheus() *prometheus { webService: webService, } webService.QuitChan = make(chan struct{}) + + p.reloadConfig() + return p } +func (p *prometheus) reloadConfig() { + glog.Infof("Loading configuration file %s", *configFile) + + conf, err := config.LoadFromFile(*configFile) + if err != nil { + glog.Errorf("Couldn't load configuration (-config.file=%s): %v", *configFile, err) + glog.Errorf("Note: The configuration format has changed with version 0.14, please check the documentation.") + return + } + + p.webService.StatusHandler.ApplyConfig(conf) + p.targetManager.ApplyConfig(conf) + p.ruleManager.ApplyConfig(conf) +} + // Serve starts the Prometheus server. It returns after the server has been shut // down. The method installs an interrupt handler, allowing to trigger a // shutdown by sending SIGTERM to the process. @@ -252,15 +253,25 @@ func (p *prometheus) Serve() { } }() - notifier := make(chan os.Signal) - signal.Notify(notifier, os.Interrupt, syscall.SIGTERM) + hup := make(chan os.Signal) + signal.Notify(hup, syscall.SIGHUP) + go func() { + for range hup { + p.reloadConfig() + } + }() + + term := make(chan os.Signal) + signal.Notify(term, os.Interrupt, syscall.SIGTERM) select { - case <-notifier: + case <-term: glog.Warning("Received SIGTERM, exiting gracefully...") case <-p.webService.QuitChan: glog.Warning("Received termination request via web service, exiting gracefully...") } + close(hup) + p.targetManager.Stop() p.ruleManager.Stop() p.queryEngine.Stop() diff --git a/retrieval/target.go b/retrieval/target.go index bd1db8dc6..73ce5faf7 100644 --- a/retrieval/target.go +++ b/retrieval/target.go @@ -285,6 +285,7 @@ func (t *target) RunScraper(sampleAppender storage.SampleAppender) { // On changed scrape interval the new interval becomes effective // after the next scrape. if lastScrapeInterval != t.scrapeInterval { + ticker.Stop() ticker = time.NewTicker(t.scrapeInterval) lastScrapeInterval = t.scrapeInterval } diff --git a/retrieval/targetmanager.go b/retrieval/targetmanager.go index beebe7bd6..43d9d165a 100644 --- a/retrieval/targetmanager.go +++ b/retrieval/targetmanager.go @@ -62,16 +62,13 @@ type TargetManager struct { providers map[*config.ScrapeConfig][]TargetProvider } -// NewTargetManager creates a new TargetManager based on the given config. -func NewTargetManager(cfg *config.Config, sampleAppender storage.SampleAppender) (*TargetManager, error) { +// NewTargetManager creates a new TargetManager. +func NewTargetManager(sampleAppender storage.SampleAppender) *TargetManager { tm := &TargetManager{ sampleAppender: sampleAppender, targets: make(map[string][]Target), } - if err := tm.applyConfig(cfg); err != nil { - return nil, err - } - return tm, nil + return tm } // Run starts background processing to handle target updates. @@ -129,19 +126,17 @@ func fullSource(cfg *config.ScrapeConfig, src string) string { // Stop all background processing. func (tm *TargetManager) Stop() { - tm.stop(true) + tm.m.Lock() + defer tm.m.Unlock() + + if tm.running { + tm.stop(true) + } } // stop background processing of the target manager. If removeTargets is true, // existing targets will be stopped and removed. func (tm *TargetManager) stop(removeTargets bool) { - tm.m.Lock() - defer tm.m.Unlock() - - if !tm.running { - return - } - glog.Info("Stopping target manager...") defer glog.Info("Target manager stopped.") @@ -273,35 +268,23 @@ func (tm *TargetManager) Pools() map[string][]Target { // ApplyConfig resets the manager's target providers and job configurations as defined // by the new cfg. The state of targets that are valid in the new configuration remains unchanged. -func (tm *TargetManager) ApplyConfig(cfg *config.Config) error { - tm.stop(false) - // Even if updating the config failed, we want to continue rather than stop scraping anything. - defer tm.Run() - - if err := tm.applyConfig(cfg); err != nil { - glog.Warningf("Error updating config, changes not applied: %s", err) - return err - } - return nil -} - -func (tm *TargetManager) applyConfig(cfg *config.Config) error { - // Only apply changes if everything was successful. - providers := map[*config.ScrapeConfig][]TargetProvider{} - - for _, scfg := range cfg.ScrapeConfigs { - provs, err := ProvidersFromConfig(scfg) - if err != nil { - return err - } - providers[scfg] = provs - } +func (tm *TargetManager) ApplyConfig(cfg *config.Config) { tm.m.Lock() defer tm.m.Unlock() + if tm.running { + tm.stop(false) + // Even if updating the config failed, we want to continue rather than stop scraping anything. + defer tm.Run() + } + providers := map[*config.ScrapeConfig][]TargetProvider{} + + for _, scfg := range cfg.ScrapeConfigs { + providers[scfg] = ProvidersFromConfig(scfg) + } + tm.globalLabels = cfg.GlobalConfig.Labels tm.providers = providers - return nil } // targetsFromGroup builds targets based on the given TargetGroup and config. @@ -335,7 +318,7 @@ func (tm *TargetManager) targetsFromGroup(tg *config.TargetGroup, cfg *config.Sc labels, err := Relabel(labels, cfg.RelabelConfigs...) if err != nil { - return nil, fmt.Errorf("error while relabelling instance %d in target group %s: %s", i, tg, err) + return nil, fmt.Errorf("error while relabeling instance %d in target group %s: %s", i, tg, err) } // Check if the target was dropped. if labels == nil { @@ -357,7 +340,7 @@ func (tm *TargetManager) targetsFromGroup(tg *config.TargetGroup, cfg *config.Sc } // ProvidersFromConfig returns all TargetProviders configured in cfg. -func ProvidersFromConfig(cfg *config.ScrapeConfig) ([]TargetProvider, error) { +func ProvidersFromConfig(cfg *config.ScrapeConfig) []TargetProvider { var providers []TargetProvider for _, dnscfg := range cfg.DNSSDConfigs { @@ -367,7 +350,7 @@ func ProvidersFromConfig(cfg *config.ScrapeConfig) ([]TargetProvider, error) { if len(cfg.TargetGroups) > 0 { providers = append(providers, NewStaticProvider(cfg.TargetGroups)) } - return providers, nil + return providers } // StaticProvider holds a list of target groups that never change. diff --git a/retrieval/targetmanager_test.go b/retrieval/targetmanager_test.go index 6becfb221..f6aea2a40 100644 --- a/retrieval/targetmanager_test.go +++ b/retrieval/targetmanager_test.go @@ -277,19 +277,15 @@ func TestTargetManagerConfigUpdate(t *testing.T) { } conf := &config.Config{DefaultedConfig: config.DefaultConfig} - targetManager, err := NewTargetManager(conf, nopAppender{}) - if err != nil { - t.Fatal(err) - } + targetManager := NewTargetManager(nopAppender{}) + targetManager.ApplyConfig(conf) + targetManager.Run() defer targetManager.Stop() for i, step := range sequence { conf.ScrapeConfigs = step.scrapeConfigs - err := targetManager.ApplyConfig(conf) - if err != nil { - t.Fatal(err) - } + targetManager.ApplyConfig(conf) <-time.After(1 * time.Millisecond) diff --git a/rules/manager.go b/rules/manager.go index fe6b12b00..917b07c76 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -24,6 +24,7 @@ import ( clientmodel "github.com/prometheus/client_golang/model" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/notification" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" @@ -120,7 +121,11 @@ func NewManager(o *ManagerOptions) *Manager { func (m *Manager) Run() { defer glog.Info("Rule manager stopped.") - ticker := time.NewTicker(m.interval) + m.Lock() + lastInterval := m.interval + m.Unlock() + + ticker := time.NewTicker(lastInterval) defer ticker.Stop() for { @@ -137,6 +142,14 @@ func (m *Manager) Run() { start := time.Now() m.runIteration() iterationDuration.Observe(float64(time.Since(start) / time.Millisecond)) + + m.Lock() + if lastInterval != m.interval { + ticker.Stop() + ticker = time.NewTicker(m.interval) + lastInterval = m.interval + } + m.Unlock() case <-m.done: return } @@ -255,11 +268,27 @@ func (m *Manager) runIteration() { wg.Wait() } -// LoadRuleFiles loads alerting and recording rules from the given files. -func (m *Manager) LoadRuleFiles(filenames ...string) error { +// ApplyConfig updates the rule manager's state as the config requires. If +// loading the new rules failed the old rule set is restored. +func (m *Manager) ApplyConfig(conf *config.Config) { m.Lock() defer m.Unlock() + m.interval = time.Duration(conf.GlobalConfig.EvaluationInterval) + + rulesSnapshot := make([]Rule, len(m.rules)) + copy(rulesSnapshot, m.rules) + m.rules = m.rules[:0] + + if err := m.loadRuleFiles(conf.RuleFiles...); err != nil { + // If loading the new rules failed, restore the old rule set. + m.rules = rulesSnapshot + glog.Errorf("Error loading rules, previous rule set restored: %s", err) + } +} + +// loadRuleFiles loads alerting and recording rules from the given files. +func (m *Manager) loadRuleFiles(filenames ...string) error { for _, fn := range filenames { content, err := ioutil.ReadFile(fn) if err != nil { diff --git a/web/status.go b/web/status.go index 7008c8dfe..6840505b9 100644 --- a/web/status.go +++ b/web/status.go @@ -18,6 +18,7 @@ import ( "sync" "time" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/retrieval" "github.com/prometheus/prometheus/rules" ) @@ -47,5 +48,14 @@ func (h *PrometheusStatusHandler) TargetStateToClass() map[retrieval.TargetState } func (h *PrometheusStatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.mu.RLock() executeTemplate(w, "status", h, h.PathPrefix) + h.mu.RUnlock() +} + +// ApplyConfig updates the status handler's state as the new config requires. +func (h *PrometheusStatusHandler) ApplyConfig(conf *config.Config) { + h.mu.Lock() + h.Config = conf.String() + h.mu.Unlock() } From d5aa012fd01318c7319499cf40f783d2c3adff06 Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Fri, 15 May 2015 12:47:50 +0200 Subject: [PATCH 11/27] Make HTTP basic auth configurable for scrape targets. --- config/config.go | 10 +++++++++- config/config_test.go | 6 +++++- config/testdata/conf.good.yml | 6 +++++- retrieval/target.go | 3 +++ 4 files changed, 22 insertions(+), 3 deletions(-) diff --git a/config/config.go b/config/config.go index 87fe01595..17137bb67 100644 --- a/config/config.go +++ b/config/config.go @@ -182,6 +182,8 @@ type DefaultedScrapeConfig struct { MetricsPath string `yaml:"metrics_path,omitempty"` // The URL scheme with which to fetch metrics from targets. Scheme string `yaml:"scheme,omitempty"` + // The HTTP basic authentication credentials for the targets. + BasicAuth *BasicAuth `yaml:"basic_auth"` // List of labeled target groups for this job. TargetGroups []*TargetGroup `yaml:"target_groups,omitempty"` @@ -191,7 +193,13 @@ type DefaultedScrapeConfig struct { RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"` } -// A labeled group of targets to scrape for a job. +// BasicAuth contains basic HTTP authentication credentials. +type BasicAuth struct { + Username string `yaml:"username"` + Password string `yaml:"password"` +} + +// TargetGroup is a set of targets with a common label set. type TargetGroup struct { // Targets is a list of targets identified by a label set. Each target is // uniquely identifiable in the group by its address label. diff --git a/config/config_test.go b/config/config_test.go index 792ad83c1..0b7aabd43 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -69,8 +69,12 @@ var expectedConf = &Config{DefaultedConfig{ ScrapeInterval: Duration(50 * time.Second), ScrapeTimeout: Duration(5 * time.Second), + BasicAuth: &BasicAuth{ + Username: "admin", + Password: "password", + }, MetricsPath: "/my_path", - Scheme: "http", + Scheme: "https", DNSSDConfigs: []*DNSSDConfig{ {DefaultedDNSSDConfig{ diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index a1ff4c192..943901edf 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -40,11 +40,15 @@ scrape_configs: - job_name: service-x + basic_auth: + username: admin + password: password + scrape_interval: 50s scrape_timeout: 5s metrics_path: /my_path - # scheme defaults to 'http'. + scheme: https dns_sd_configs: - refresh_interval: 15s diff --git a/retrieval/target.go b/retrieval/target.go index 73ce5faf7..2f40ec132 100644 --- a/retrieval/target.go +++ b/retrieval/target.go @@ -192,6 +192,9 @@ func (t *target) Update(cfg *config.ScrapeConfig, baseLabels clientmodel.LabelSe t.url.Scheme = cfg.Scheme t.url.Path = string(baseLabels[clientmodel.MetricsPathLabel]) + if cfg.BasicAuth != nil { + t.url.User = url.UserPassword(cfg.BasicAuth.Username, cfg.BasicAuth.Password) + } t.scrapeInterval = time.Duration(cfg.ScrapeInterval) t.deadline = time.Duration(cfg.ScrapeTimeout) From 3b21c7037af2183f61ceb8e7c3e83deaaa490491 Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Wed, 13 May 2015 11:28:04 +0200 Subject: [PATCH 12/27] Add file SD to configuration. --- config/config.go | 46 ++++++++++++++++++++++++++++++++--- config/config_test.go | 11 +++++++++ config/testdata/conf.good.yml | 8 ++++++ 3 files changed, 62 insertions(+), 3 deletions(-) diff --git a/config/config.go b/config/config.go index 17137bb67..81ab4fb06 100644 --- a/config/config.go +++ b/config/config.go @@ -14,7 +14,10 @@ import ( "github.com/prometheus/prometheus/utility" ) -var jobNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_-]*$") +var ( + patJobName = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_-]*$`) + patFileSDName = regexp.MustCompile(`^[^*]*(\*[^/]*)?\.(json|yml|yaml)$`) +) // Load parses the YAML input s into a Config. func Load(s string) (*Config, error) { @@ -69,6 +72,11 @@ var ( DefaultDNSSDConfig = DefaultedDNSSDConfig{ RefreshInterval: Duration(30 * time.Second), } + + // The default file SD configuration. + DefaultFileSDConfig = DefaultedFileSDConfig{ + RefreshInterval: Duration(30 * time.Second), + } ) // Config is the top-level configuration for Prometheus's config files. @@ -164,7 +172,7 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if err != nil { return err } - if !jobNameRE.MatchString(c.JobName) { + if !patJobName.MatchString(c.JobName) { return fmt.Errorf("%q is not a valid job name", c.JobName) } return nil @@ -189,6 +197,8 @@ type DefaultedScrapeConfig struct { TargetGroups []*TargetGroup `yaml:"target_groups,omitempty"` // List of DNS service discovery configurations. DNSSDConfigs []*DNSSDConfig `yaml:"dns_sd_configs,omitempty"` + // List of file service discovery configurations. + FileSDConfigs []*FileSDConfig `yaml:"file_sd_configs,omitempty"` // List of relabel configurations. RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"` } @@ -266,7 +276,7 @@ func (c *DNSSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } if len(c.Names) == 0 { - return fmt.Errorf("DNS config must contain at least one SRV record name") + return fmt.Errorf("DNS-SD config must contain at least one SRV record name") } return nil } @@ -277,6 +287,36 @@ type DefaultedDNSSDConfig struct { RefreshInterval Duration `yaml:"refresh_interval,omitempty"` } +// FileSDConfig is the configuration for file based discovery. +type FileSDConfig struct { + // DefaultedFileSDConfig contains the actual fields for FileSDConfig. + DefaultedFileSDConfig `yaml:",inline"` +} + +// UnmarshalYAML implements the yaml.Unmarshaller interface. +func (c *FileSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + c.DefaultedFileSDConfig = DefaultFileSDConfig + err := unmarshal(&c.DefaultedFileSDConfig) + if err != nil { + return err + } + if len(c.Names) == 0 { + return fmt.Errorf("file discovery config must contain at least on path name") + } + for _, name := range c.Names { + if !patFileSDName.MatchString(name) { + return fmt.Errorf("path name %q is not valid for file discovery", name) + } + } + return nil +} + +// DefaultedFileSDConfig is a proxy type for FileSDConfig. +type DefaultedFileSDConfig struct { + Names []string `yaml:"names"` + RefreshInterval Duration `yaml:"refresh_interval,omitempty"` +} + // RelabelAction is the action to be performed on relabeling. type RelabelAction string diff --git a/config/config_test.go b/config/config_test.go index 0b7aabd43..40ea0fa7e 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -52,6 +52,17 @@ var expectedConf = &Config{DefaultedConfig{ }, }, + FileSDConfigs: []*FileSDConfig{ + {DefaultedFileSDConfig{ + Names: []string{"foo/*.slow.json", "foo/*.slow.yml"}, + RefreshInterval: Duration(10 * time.Minute), + }}, + {DefaultedFileSDConfig{ + Names: []string{"bar/*.yaml"}, + RefreshInterval: Duration(30 * time.Second), + }}, + }, + RelabelConfigs: []*RelabelConfig{ {DefaultedRelabelConfig{ SourceLabels: clientmodel.LabelNames{"job", "__meta_dns_srv_name"}, diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 943901edf..cde9eeed6 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -24,6 +24,14 @@ scrape_configs: labels: foo: baz + file_sd_configs: + - names: + - foo/*.slow.json + - foo/*.slow.yml + refresh_interval: 10m + - names: + - bar/*.yaml + target_groups: - targets: ['localhost:9090', 'localhost:9191'] labels: From 36016cb308f4881cfd1ea5399de0611940a1b7c2 Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Wed, 13 May 2015 11:48:39 +0200 Subject: [PATCH 13/27] Add fsnotify to godeps. --- Godeps/Godeps.json | 4 + .../src/gopkg.in/fsnotify.v1/.gitignore | 6 + .../src/gopkg.in/fsnotify.v1/.travis.yml | 15 + .../src/gopkg.in/fsnotify.v1/AUTHORS | 34 + .../src/gopkg.in/fsnotify.v1/CHANGELOG.md | 263 ++++ .../src/gopkg.in/fsnotify.v1/CONTRIBUTING.md | 77 ++ .../src/gopkg.in/fsnotify.v1/LICENSE | 28 + .../gopkg.in/fsnotify.v1/NotUsed.xcworkspace | 0 .../src/gopkg.in/fsnotify.v1/README.md | 59 + .../src/gopkg.in/fsnotify.v1/circle.yml | 26 + .../src/gopkg.in/fsnotify.v1/example_test.go | 42 + .../src/gopkg.in/fsnotify.v1/fsnotify.go | 62 + .../src/gopkg.in/fsnotify.v1/inotify.go | 306 +++++ .../gopkg.in/fsnotify.v1/inotify_poller.go | 186 +++ .../fsnotify.v1/inotify_poller_test.go | 228 ++++ .../src/gopkg.in/fsnotify.v1/inotify_test.go | 292 +++++ .../gopkg.in/fsnotify.v1/integration_test.go | 1135 +++++++++++++++++ .../src/gopkg.in/fsnotify.v1/kqueue.go | 463 +++++++ .../src/gopkg.in/fsnotify.v1/open_mode_bsd.go | 11 + .../gopkg.in/fsnotify.v1/open_mode_darwin.go | 12 + .../src/gopkg.in/fsnotify.v1/windows.go | 561 ++++++++ 21 files changed, 3810 insertions(+) create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/README.md create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go create mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index edfccf1ec..e4cf26b4f 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -68,6 +68,10 @@ "ImportPath": "golang.org/x/net/context", "Rev": "b6fdb7d8a4ccefede406f8fe0f017fb58265054c" }, + { + "ImportPath": "gopkg.in/fsnotify.v1", + "Rev": "96c060f6a6b7e0d6f75fddd10efeaca3e5d1bcb0", + }, { "ImportPath": "gopkg.in/yaml.v2", "Rev": "49c95bdc21843256fb6c4e0d370a05f24a0bf213" diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore new file mode 100644 index 000000000..4cd0cbaf4 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore @@ -0,0 +1,6 @@ +# Setup a Global .gitignore for OS and editor generated files: +# https://help.github.com/articles/ignoring-files +# git config --global core.excludesfile ~/.gitignore_global + +.vagrant +*.sublime-project diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml new file mode 100644 index 000000000..67467e140 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml @@ -0,0 +1,15 @@ +sudo: false +language: go + +go: + - 1.4.1 + +before_script: + - FIXED=$(go fmt ./... | wc -l); if [ $FIXED -gt 0 ]; then echo "gofmt - $FIXED file(s) not formatted correctly, please run gofmt to fix this." && exit 1; fi + +os: + - linux + - osx + +notifications: + email: false diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS new file mode 100644 index 000000000..4e0e8284e --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS @@ -0,0 +1,34 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' + +# Please keep the list sorted. + +Adrien Bustany +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Dave Cheney +Francisco Souza +Hari haran +John C Barstow +Kelvin Fo +Matt Layher +Nathan Youngman +Paul Hammond +Pieter Droogendijk +Pursuit92 +Rob Figueiredo +Soge Zhang +Tilak Sharma +Travis Cline +Tudor Golubenco +Yukang +bronze1man +debrando +henrikedwards diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md new file mode 100644 index 000000000..ea9428a2a --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md @@ -0,0 +1,263 @@ +# Changelog + +## v1.2.0 / 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/go-fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/go-fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/go-fsnotify/fsnotify/issues/59) + +## v1.1.1 / 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/go-fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## v1.1.0 / 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/go-fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/go-fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/go-fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/go-fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51) + +## v1.0.4 / 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/go-fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## v1.0.3 / 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/go-fsnotify/fsnotify/issues/36) + +## v1.0.2 / 2014-08-17 + +* [Fix] Missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## v1.0.0 / 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/go-fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/go-fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## v0.9.3 / 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51) + +## v0.9.2 / 2014-08-17 + +* [Backport] Fix missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## v0.9.1 / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## v0.9.0 / 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## v0.8.12 / 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## v0.8.11 / 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond) + +## v0.8.10 / 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## v0.8.9 / 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## v0.8.8 / 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## v0.8.7 / 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## v0.8.6 / 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## v0.8.5 / 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## v0.8.4 / 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## v0.8.3 / 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## v0.8.2 / 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## v0.8.1 / 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## v0.8.0 / 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## v0.7.4 / 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## v0.7.3 / 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## v0.7.2 / 2012-09-01 + +* kqueue: events for created directories + +## v0.7.1 / 2012-07-14 + +* [Fix] for renaming files + +## v0.7.0 / 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## v0.6.0 / 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## v0.5.1 / 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## v0.5.0 / 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## v0.4.0 / 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## v0.3.0 / 2012-02-19 + +* kqueue: add files when watch directory + +## v0.2.0 / 2011-12-30 + +* update to latest Go weekly code + +## v0.1.0 / 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 + diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md new file mode 100644 index 000000000..0f377f341 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing + +## Issues + +* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/go-fsnotify/fsnotify/issues). +* Please indicate the platform you are using fsnotify on. +* A code example to reproduce the problem is appreciated. + +## Pull Requests + +### Contributor License Agreement + +fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/go-fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/go-fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). + +Please indicate that you have signed the CLA in your pull request. + +### How fsnotify is Developed + +* Development is done on feature branches. +* Tests are run on BSD, Linux, OS X and Windows. +* Pull requests are reviewed and [applied to master][am] using [hub][]. + * Maintainers may modify or squash commits rather than asking contributors to. +* To issue a new release, the maintainers will: + * Update the CHANGELOG + * Tag a version, which will become available through gopkg.in. + +### How to Fork + +For smooth sailing, always use the original import path. Installing with `go get` makes this easy. + +1. Install from GitHub (`go get -u github.com/go-fsnotify/fsnotify`) +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Ensure everything works and the tests pass (see below) +4. Commit your changes (`git commit -am 'Add some feature'`) + +Contribute upstream: + +1. Fork fsnotify on GitHub +2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) +3. Push to the branch (`git push fork my-new-feature`) +4. Create a new Pull Request on GitHub + +This workflow is [thoroughly explained by Katrina Owen](https://blog.splice.com/contributing-open-source-git-repositories-go/). + +### Testing + +fsnotify uses build tags to compile different code on Linux, BSD, OS X, and Windows. + +Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + +To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. + +* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) +* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. +* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) +* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd go-fsnotify/fsnotify; go test'`. +* When you're done, you will want to halt or destroy the Vagrant boxes. + +Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. + +Right now there is no equivalent solution for Windows and OS X, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). + +### Maintainers + +Help maintaining fsnotify is welcome. To be a maintainer: + +* Submit a pull request and sign the CLA as above. +* You must be able to run the test suite on Mac, Windows, Linux and BSD. + +To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. + +All code changes should be internal pull requests. + +Releases are tagged using [Semantic Versioning](http://semver.org/). + +[hub]: https://github.com/github/hub +[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE new file mode 100644 index 000000000..f21e54080 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace new file mode 100644 index 000000000..e69de29bb diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/README.md b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/README.md new file mode 100644 index 000000000..7a0b24736 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/README.md @@ -0,0 +1,59 @@ +# File system notifications for Go + +[![Coverage](http://gocover.io/_badge/github.com/go-fsnotify/fsnotify)](http://gocover.io/github.com/go-fsnotify/fsnotify) [![GoDoc](https://godoc.org/gopkg.in/fsnotify.v1?status.svg)](https://godoc.org/gopkg.in/fsnotify.v1) + +Go 1.3+ required. + +Cross platform: Windows, Linux, BSD and OS X. + +|Adapter |OS |Status | +|----------|----------|----------| +|inotify |Linux, Android\*|Supported [![Build Status](https://travis-ci.org/go-fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/go-fsnotify/fsnotify)| +|kqueue |BSD, OS X, iOS\*|Supported [![Circle CI](https://circleci.com/gh/go-fsnotify/fsnotify.svg?style=svg)](https://circleci.com/gh/go-fsnotify/fsnotify)| +|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)| +|FSEvents |OS X |[Planned](https://github.com/go-fsnotify/fsnotify/issues/11)| +|FEN |Solaris 11 |[Planned](https://github.com/go-fsnotify/fsnotify/issues/12)| +|fanotify |Linux 2.6.37+ | | +|USN Journals |Windows |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/53)| +|Polling |*All* |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/9)| + +\* Android and iOS are untested. + +Please see [the documentation](https://godoc.org/gopkg.in/fsnotify.v1) for usage. Consult the [Wiki](https://github.com/go-fsnotify/fsnotify/wiki) for the FAQ and further information. + +## API stability + +Two major versions of fsnotify exist. + +**[fsnotify.v0](https://gopkg.in/fsnotify.v0)** is API-compatible with [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify). Bugfixes *may* be backported, but I recommend upgrading to v1. + +```go +import "gopkg.in/fsnotify.v0" +``` + +\* Refer to the package as fsnotify (without the .v0 suffix). + +**[fsnotify.v1](https://gopkg.in/fsnotify.v1)** provides [a new API](https://godoc.org/gopkg.in/fsnotify.v1) based on [this design document](http://goo.gl/MrYxyA). You can import v1 with: + +```go +import "gopkg.in/fsnotify.v1" +``` + +Further API changes are [planned](https://github.com/go-fsnotify/fsnotify/milestones), but a new major revision will be tagged, so you can depend on the v1 API. + +**Master** may have unreleased changes. Use it to test the very latest code or when [contributing][], but don't expect it to remain API-compatible: + +```go +import "github.com/go-fsnotify/fsnotify" +``` + +## Contributing + +Please refer to [CONTRIBUTING][] before opening an issue or pull request. + +## Example + +See [example_test.go](https://github.com/go-fsnotify/fsnotify/blob/master/example_test.go). + + +[contributing]: https://github.com/go-fsnotify/fsnotify/blob/master/CONTRIBUTING.md diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml new file mode 100644 index 000000000..204217fb0 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml @@ -0,0 +1,26 @@ +## OS X build (CircleCI iOS beta) + +# Pretend like it's an Xcode project, at least to get it running. +machine: + environment: + XCODE_WORKSPACE: NotUsed.xcworkspace + XCODE_SCHEME: NotUsed + # This is where the go project is actually checked out to: + CIRCLE_BUILD_DIR: $HOME/.go_project/src/github.com/go-fsnotify/fsnotify + +dependencies: + pre: + - brew upgrade go + +test: + override: + - go test ./... + +# Idealized future config, eventually with cross-platform build matrix :-) + +# machine: +# go: +# version: 1.4 +# os: +# - osx +# - linux diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go new file mode 100644 index 000000000..306379660 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!solaris + +package fsnotify_test + +import ( + "log" + + "github.com/go-fsnotify/fsnotify" +) + +func ExampleNewWatcher() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + done := make(chan bool) + go func() { + for { + select { + case event := <-watcher.Events: + log.Println("event:", event) + if event.Op&fsnotify.Write == fsnotify.Write { + log.Println("modified file:", event.Name) + } + case err := <-watcher.Errors: + log.Println("error:", err) + } + } + }() + + err = watcher.Add("/tmp/foo") + if err != nil { + log.Fatal(err) + } + <-done +} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go new file mode 100644 index 000000000..c899ee008 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go @@ -0,0 +1,62 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!solaris + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if e.Op&Create == Create { + buffer.WriteString("|CREATE") + } + if e.Op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if e.Op&Write == Write { + buffer.WriteString("|WRITE") + } + if e.Op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if e.Op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + + // If buffer remains empty, return no event names + if buffer.Len() == 0 { + return fmt.Sprintf("%q: ", e.Name) + } + + // Return a list of event names, with leading pipe character stripped + return fmt.Sprintf("%q: %s", e.Name, buffer.String()[1:]) +} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go new file mode 100644 index 000000000..d7759ec8c --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go @@ -0,0 +1,306 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := syscall.InotifyInit() + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + syscall.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = syscall.IN_MOVED_TO | syscall.IN_MOVED_FROM | + syscall.IN_CREATE | syscall.IN_ATTRIB | syscall.IN_MODIFY | + syscall.IN_MOVE_SELF | syscall.IN_DELETE | syscall.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + watchEntry, found := w.watches[name] + w.mu.Unlock() + if found { + watchEntry.flags |= flags + flags |= syscall.IN_MASK_ADD + } + wd, errno := syscall.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + w.mu.Lock() + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + w.mu.Unlock() + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // That means we can safely delete it from our watches, whatever inotify_rm_watch does. + delete(w.watches, name) + success, errno := syscall.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + return nil +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [syscall.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer syscall.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = syscall.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == syscall.EINTR { + continue + } + + // syscall.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < syscall.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occured while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-syscall.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name := w.paths[int(raw.Wd)] + w.mu.Unlock() + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent])) + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += syscall.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&syscall.IN_IGNORED == syscall.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&syscall.IN_CREATE == syscall.IN_CREATE || mask&syscall.IN_MOVED_TO == syscall.IN_MOVED_TO { + e.Op |= Create + } + if mask&syscall.IN_DELETE_SELF == syscall.IN_DELETE_SELF || mask&syscall.IN_DELETE == syscall.IN_DELETE { + e.Op |= Remove + } + if mask&syscall.IN_MODIFY == syscall.IN_MODIFY { + e.Op |= Write + } + if mask&syscall.IN_MOVE_SELF == syscall.IN_MOVE_SELF || mask&syscall.IN_MOVED_FROM == syscall.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&syscall.IN_ATTRIB == syscall.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go new file mode 100644 index 000000000..3b4178404 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go @@ -0,0 +1,186 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "syscall" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + poller.fd = fd + + // Create epoll fd + poller.epfd, errno = syscall.EpollCreate(1) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = syscall.Pipe2(poller.pipe[:], syscall.O_NONBLOCK) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := syscall.EpollEvent{ + Fd: int32(poller.fd), + Events: syscall.EPOLLIN, + } + errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = syscall.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: syscall.EPOLLIN, + } + errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]syscall.EpollEvent, 7) + for { + n, errno := syscall.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == syscall.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&syscall.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&syscall.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let syscall.Read pick up the error. + epollerr = true + } + if event.Events&syscall.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&syscall.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&syscall.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&syscall.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := syscall.Write(poller.pipe[1], buf) + if n == -1 { + if errno == syscall.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := syscall.Read(poller.pipe[0], buf) + if n == -1 { + if errno == syscall.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + syscall.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + syscall.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + syscall.Close(poller.epfd) + } +} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go new file mode 100644 index 000000000..af9f407f8 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go @@ -0,0 +1,228 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "syscall" + "testing" + "time" +) + +type testFd [2]int + +func makeTestFd(t *testing.T) testFd { + var tfd testFd + errno := syscall.Pipe(tfd[:]) + if errno != nil { + t.Fatalf("Failed to create pipe: %v", errno) + } + return tfd +} + +func (tfd testFd) fd() int { + return tfd[0] +} + +func (tfd testFd) closeWrite(t *testing.T) { + errno := syscall.Close(tfd[1]) + if errno != nil { + t.Fatalf("Failed to close write end of pipe: %v", errno) + } +} + +func (tfd testFd) put(t *testing.T) { + buf := make([]byte, 10) + _, errno := syscall.Write(tfd[1], buf) + if errno != nil { + t.Fatalf("Failed to write to pipe: %v", errno) + } +} + +func (tfd testFd) get(t *testing.T) { + buf := make([]byte, 10) + _, errno := syscall.Read(tfd[0], buf) + if errno != nil { + t.Fatalf("Failed to read from pipe: %v", errno) + } +} + +func (tfd testFd) close() { + syscall.Close(tfd[1]) + syscall.Close(tfd[0]) +} + +func makePoller(t *testing.T) (testFd, *fdPoller) { + tfd := makeTestFd(t) + poller, err := newFdPoller(tfd.fd()) + if err != nil { + t.Fatalf("Failed to create poller: %v", err) + } + return tfd, poller +} + +func TestPollerWithBadFd(t *testing.T) { + _, err := newFdPoller(-1) + if err != syscall.EBADF { + t.Fatalf("Expected EBADF, got: %v", err) + } +} + +func TestPollerWithData(t *testing.T) { + tfd, poller := makePoller(t) + defer tfd.close() + defer poller.close() + + tfd.put(t) + ok, err := poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if !ok { + t.Fatalf("expected poller to return true") + } + tfd.get(t) +} + +func TestPollerWithWakeup(t *testing.T) { + tfd, poller := makePoller(t) + defer tfd.close() + defer poller.close() + + err := poller.wake() + if err != nil { + t.Fatalf("wake failed: %v", err) + } + ok, err := poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if ok { + t.Fatalf("expected poller to return false") + } +} + +func TestPollerWithClose(t *testing.T) { + tfd, poller := makePoller(t) + defer tfd.close() + defer poller.close() + + tfd.closeWrite(t) + ok, err := poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if !ok { + t.Fatalf("expected poller to return true") + } +} + +func TestPollerWithWakeupAndData(t *testing.T) { + tfd, poller := makePoller(t) + defer tfd.close() + defer poller.close() + + tfd.put(t) + err := poller.wake() + if err != nil { + t.Fatalf("wake failed: %v", err) + } + + // both data and wakeup + ok, err := poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if !ok { + t.Fatalf("expected poller to return true") + } + + // data is still in the buffer, wakeup is cleared + ok, err = poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if !ok { + t.Fatalf("expected poller to return true") + } + + tfd.get(t) + // data is gone, only wakeup now + err = poller.wake() + if err != nil { + t.Fatalf("wake failed: %v", err) + } + ok, err = poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + if ok { + t.Fatalf("expected poller to return false") + } +} + +func TestPollerConcurrent(t *testing.T) { + tfd, poller := makePoller(t) + defer tfd.close() + defer poller.close() + + oks := make(chan bool) + live := make(chan bool) + defer close(live) + go func() { + defer close(oks) + for { + ok, err := poller.wait() + if err != nil { + t.Fatalf("poller failed: %v", err) + } + oks <- ok + if !<-live { + return + } + } + }() + + // Try a write + select { + case <-time.After(50 * time.Millisecond): + case <-oks: + t.Fatalf("poller did not wait") + } + tfd.put(t) + if !<-oks { + t.Fatalf("expected true") + } + tfd.get(t) + live <- true + + // Try a wakeup + select { + case <-time.After(50 * time.Millisecond): + case <-oks: + t.Fatalf("poller did not wait") + } + err := poller.wake() + if err != nil { + t.Fatalf("wake failed: %v", err) + } + if <-oks { + t.Fatalf("expected false") + } + live <- true + + // Try a close + select { + case <-time.After(50 * time.Millisecond): + case <-oks: + t.Fatalf("poller did not wait") + } + tfd.closeWrite(t) + if !<-oks { + t.Fatalf("expected true") + } + tfd.get(t) +} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go new file mode 100644 index 000000000..035ee8f95 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go @@ -0,0 +1,292 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "os" + "path/filepath" + "syscall" + "testing" + "time" +) + +func TestInotifyCloseRightAway(t *testing.T) { + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher") + } + + // Close immediately; it won't even reach the first syscall.Read. + w.Close() + + // Wait for the close to complete. + <-time.After(50 * time.Millisecond) + isWatcherReallyClosed(t, w) +} + +func TestInotifyCloseSlightlyLater(t *testing.T) { + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher") + } + + // Wait until readEvents has reached syscall.Read, and Close. + <-time.After(50 * time.Millisecond) + w.Close() + + // Wait for the close to complete. + <-time.After(50 * time.Millisecond) + isWatcherReallyClosed(t, w) +} + +func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) { + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher") + } + w.Add(testDir) + + // Wait until readEvents has reached syscall.Read, and Close. + <-time.After(50 * time.Millisecond) + w.Close() + + // Wait for the close to complete. + <-time.After(50 * time.Millisecond) + isWatcherReallyClosed(t, w) +} + +func TestInotifyCloseAfterRead(t *testing.T) { + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher") + } + + err = w.Add(testDir) + if err != nil { + t.Fatalf("Failed to add .") + } + + // Generate an event. + os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING")) + + // Wait for readEvents to read the event, then close the watcher. + <-time.After(50 * time.Millisecond) + w.Close() + + // Wait for the close to complete. + <-time.After(50 * time.Millisecond) + isWatcherReallyClosed(t, w) +} + +func isWatcherReallyClosed(t *testing.T, w *Watcher) { + select { + case err, ok := <-w.Errors: + if ok { + t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err) + } + default: + t.Fatalf("w.Errors would have blocked; readEvents is still alive!") + } + + select { + case _, ok := <-w.Events: + if ok { + t.Fatalf("w.Events is not closed; readEvents is still alive after closing") + } + default: + t.Fatalf("w.Events would have blocked; readEvents is still alive!") + } +} + +func TestInotifyCloseCreate(t *testing.T) { + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher: %v", err) + } + defer w.Close() + + err = w.Add(testDir) + if err != nil { + t.Fatalf("Failed to add testDir: %v", err) + } + h, err := os.Create(filepath.Join(testDir, "testfile")) + if err != nil { + t.Fatalf("Failed to create file in testdir: %v", err) + } + h.Close() + select { + case _ = <-w.Events: + case err := <-w.Errors: + t.Fatalf("Error from watcher: %v", err) + case <-time.After(50 * time.Millisecond): + t.Fatalf("Took too long to wait for event") + } + + // At this point, we've received one event, so the goroutine is ready. + // It's also blocking on syscall.Read. + // Now we try to swap the file descriptor under its nose. + w.Close() + w, err = NewWatcher() + defer w.Close() + if err != nil { + t.Fatalf("Failed to create second watcher: %v", err) + } + + <-time.After(50 * time.Millisecond) + err = w.Add(testDir) + if err != nil { + t.Fatalf("Error adding testDir again: %v", err) + } +} + +func TestInotifyStress(t *testing.T) { + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + testFile := filepath.Join(testDir, "testfile") + + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher: %v", err) + } + defer w.Close() + + killchan := make(chan struct{}) + defer close(killchan) + + err = w.Add(testDir) + if err != nil { + t.Fatalf("Failed to add testDir: %v", err) + } + + proc, err := os.FindProcess(os.Getpid()) + if err != nil { + t.Fatalf("Error finding process: %v", err) + } + + go func() { + for { + select { + case <-time.After(5 * time.Millisecond): + err := proc.Signal(syscall.SIGUSR1) + if err != nil { + t.Fatalf("Signal failed: %v", err) + } + case <-killchan: + return + } + } + }() + + go func() { + for { + select { + case <-time.After(11 * time.Millisecond): + err := w.poller.wake() + if err != nil { + t.Fatalf("Wake failed: %v", err) + } + case <-killchan: + return + } + } + }() + + go func() { + for { + select { + case <-killchan: + return + default: + handle, err := os.Create(testFile) + if err != nil { + t.Fatalf("Create failed: %v", err) + } + handle.Close() + time.Sleep(time.Millisecond) + err = os.Remove(testFile) + if err != nil { + t.Fatalf("Remove failed: %v", err) + } + } + } + }() + + creates := 0 + removes := 0 + after := time.After(5 * time.Second) + for { + select { + case <-after: + if creates-removes > 1 || creates-removes < -1 { + t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes) + } + if creates < 50 { + t.Fatalf("Expected at least 50 creates, got %d", creates) + } + return + case err := <-w.Errors: + t.Fatalf("Got an error from watcher: %v", err) + case evt := <-w.Events: + if evt.Name != testFile { + t.Fatalf("Got an event for an unknown file: %s", evt.Name) + } + if evt.Op == Create { + creates++ + } + if evt.Op == Remove { + removes++ + } + } + } +} + +func TestInotifyRemoveTwice(t *testing.T) { + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + testFile := filepath.Join(testDir, "testfile") + + handle, err := os.Create(testFile) + if err != nil { + t.Fatalf("Create failed: %v", err) + } + handle.Close() + + w, err := NewWatcher() + if err != nil { + t.Fatalf("Failed to create watcher: %v", err) + } + defer w.Close() + + err = w.Add(testFile) + if err != nil { + t.Fatalf("Failed to add testFile: %v", err) + } + + err = os.Remove(testFile) + if err != nil { + t.Fatalf("Failed to remove testFile: %v", err) + } + + err = w.Remove(testFile) + if err != syscall.EINVAL { + t.Fatalf("Expected EINVAL from Remove, got: %v", err) + } + + err = w.Remove(testFile) + if err == syscall.EINVAL { + t.Fatalf("Got EINVAL again, watch was not removed") + } +} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go new file mode 100644 index 000000000..59169c6af --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go @@ -0,0 +1,1135 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!solaris + +package fsnotify + +import ( + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "sync/atomic" + "testing" + "time" +) + +// An atomic counter +type counter struct { + val int32 +} + +func (c *counter) increment() { + atomic.AddInt32(&c.val, 1) +} + +func (c *counter) value() int32 { + return atomic.LoadInt32(&c.val) +} + +func (c *counter) reset() { + atomic.StoreInt32(&c.val, 0) +} + +// tempMkdir makes a temporary directory +func tempMkdir(t *testing.T) string { + dir, err := ioutil.TempDir("", "fsnotify") + if err != nil { + t.Fatalf("failed to create test directory: %s", err) + } + return dir +} + +// newWatcher initializes an fsnotify Watcher instance. +func newWatcher(t *testing.T) *Watcher { + watcher, err := NewWatcher() + if err != nil { + t.Fatalf("NewWatcher() failed: %s", err) + } + return watcher +} + +// addWatch adds a watch for a directory +func addWatch(t *testing.T, watcher *Watcher, dir string) { + if err := watcher.Add(dir); err != nil { + t.Fatalf("watcher.Add(%q) failed: %s", dir, err) + } +} + +func TestFsnotifyMultipleOperations(t *testing.T) { + watcher := newWatcher(t) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create directory that's not watched + testDirToMoveFiles := tempMkdir(t) + defer os.RemoveAll(testDirToMoveFiles) + + testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile") + testFileRenamed := filepath.Join(testDirToMoveFiles, "TestFsnotifySeqRename.testfile") + + addWatch(t, watcher, testDir) + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var createReceived, modifyReceived, deleteReceived, renameReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { + t.Logf("event received: %s", event) + if event.Op&Remove == Remove { + deleteReceived.increment() + } + if event.Op&Write == Write { + modifyReceived.increment() + } + if event.Op&Create == Create { + createReceived.increment() + } + if event.Op&Rename == Rename { + renameReceived.increment() + } + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + time.Sleep(time.Millisecond) + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + if err := testRename(testFile, testFileRenamed); err != nil { + t.Fatalf("rename failed: %s", err) + } + + // Modify the file outside of the watched dir + f, err = os.Open(testFileRenamed) + if err != nil { + t.Fatalf("open test renamed file failed: %s", err) + } + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // Recreate the file that was moved + f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Close() + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + cReceived := createReceived.value() + if cReceived != 2 { + t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) + } + mReceived := modifyReceived.value() + if mReceived != 1 { + t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1) + } + dReceived := deleteReceived.value() + rReceived := renameReceived.value() + if dReceived+rReceived != 1 { + t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", rReceived+dReceived, 1) + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } +} + +func TestFsnotifyMultipleCreates(t *testing.T) { + watcher := newWatcher(t) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile") + + addWatch(t, watcher, testDir) + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var createReceived, modifyReceived, deleteReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { + t.Logf("event received: %s", event) + if event.Op&Remove == Remove { + deleteReceived.increment() + } + if event.Op&Create == Create { + createReceived.increment() + } + if event.Op&Write == Write { + modifyReceived.increment() + } + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + time.Sleep(time.Millisecond) + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + os.Remove(testFile) + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // Recreate the file + f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Close() + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // Modify + f, err = os.OpenFile(testFile, os.O_WRONLY, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + time.Sleep(time.Millisecond) + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // Modify + f, err = os.OpenFile(testFile, os.O_WRONLY, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + time.Sleep(time.Millisecond) + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + cReceived := createReceived.value() + if cReceived != 2 { + t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) + } + mReceived := modifyReceived.value() + if mReceived < 3 { + t.Fatalf("incorrect number of modify events received after 500 ms (%d vs atleast %d)", mReceived, 3) + } + dReceived := deleteReceived.value() + if dReceived != 1 { + t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", dReceived, 1) + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } +} + +func TestFsnotifyDirOnly(t *testing.T) { + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create a file before watching directory + // This should NOT add any events to the fsnotify event queue + testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") + { + var f *os.File + f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + } + + addWatch(t, watcher, testDir) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + testFile := filepath.Join(testDir, "TestFsnotifyDirOnly.testfile") + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var createReceived, modifyReceived, deleteReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileAlreadyExists) { + t.Logf("event received: %s", event) + if event.Op&Remove == Remove { + deleteReceived.increment() + } + if event.Op&Write == Write { + modifyReceived.increment() + } + if event.Op&Create == Create { + createReceived.increment() + } + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + time.Sleep(time.Millisecond) + f.WriteString("data") + f.Sync() + f.Close() + + time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete + + os.Remove(testFile) + os.Remove(testFileAlreadyExists) + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + cReceived := createReceived.value() + if cReceived != 1 { + t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 1) + } + mReceived := modifyReceived.value() + if mReceived != 1 { + t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1) + } + dReceived := deleteReceived.value() + if dReceived != 2 { + t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2) + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } +} + +func TestFsnotifyDeleteWatchedDir(t *testing.T) { + watcher := newWatcher(t) + defer watcher.Close() + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create a file before watching directory + testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") + { + var f *os.File + f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + } + + addWatch(t, watcher, testDir) + + // Add a watch for testFile + addWatch(t, watcher, testFileAlreadyExists) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var deleteReceived counter + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFileAlreadyExists) { + t.Logf("event received: %s", event) + if event.Op&Remove == Remove { + deleteReceived.increment() + } + } else { + t.Logf("unexpected event received: %s", event) + } + } + }() + + os.RemoveAll(testDir) + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + dReceived := deleteReceived.value() + if dReceived < 2 { + t.Fatalf("did not receive at least %d delete events, received %d after 500 ms", 2, dReceived) + } +} + +func TestFsnotifySubDir(t *testing.T) { + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + testFile1 := filepath.Join(testDir, "TestFsnotifyFile1.testfile") + testSubDir := filepath.Join(testDir, "sub") + testSubDirFile := filepath.Join(testDir, "sub/TestFsnotifyFile1.testfile") + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var createReceived, deleteReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testSubDir) || event.Name == filepath.Clean(testFile1) { + t.Logf("event received: %s", event) + if event.Op&Create == Create { + createReceived.increment() + } + if event.Op&Remove == Remove { + deleteReceived.increment() + } + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + addWatch(t, watcher, testDir) + + // Create sub-directory + if err := os.Mkdir(testSubDir, 0777); err != nil { + t.Fatalf("failed to create test sub-directory: %s", err) + } + + // Create a file + var f *os.File + f, err := os.OpenFile(testFile1, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + + // Create a file (Should not see this! we are not watching subdir) + var fs *os.File + fs, err = os.OpenFile(testSubDirFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + fs.Sync() + fs.Close() + + time.Sleep(200 * time.Millisecond) + + // Make sure receive deletes for both file and sub-directory + os.RemoveAll(testSubDir) + os.Remove(testFile1) + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + cReceived := createReceived.value() + if cReceived != 2 { + t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) + } + dReceived := deleteReceived.value() + if dReceived != 2 { + t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2) + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } +} + +func TestFsnotifyRename(t *testing.T) { + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + addWatch(t, watcher, testDir) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + testFile := filepath.Join(testDir, "TestFsnotifyEvents.testfile") + testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var renameReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) { + if event.Op&Rename == Rename { + renameReceived.increment() + } + t.Logf("event received: %s", event) + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + f.WriteString("data") + f.Sync() + f.Close() + + // Add a watch for testFile + addWatch(t, watcher, testFile) + + if err := testRename(testFile, testFileRenamed); err != nil { + t.Fatalf("rename failed: %s", err) + } + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + if renameReceived.value() == 0 { + t.Fatal("fsnotify rename events have not been received after 500 ms") + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } + + os.Remove(testFileRenamed) +} + +func TestFsnotifyRenameToCreate(t *testing.T) { + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create directory to get file + testDirFrom := tempMkdir(t) + defer os.RemoveAll(testDirFrom) + + addWatch(t, watcher, testDir) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile") + testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var createReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) { + if event.Op&Create == Create { + createReceived.increment() + } + t.Logf("event received: %s", event) + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + + if err := testRename(testFile, testFileRenamed); err != nil { + t.Fatalf("rename failed: %s", err) + } + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + if createReceived.value() == 0 { + t.Fatal("fsnotify create events have not been received after 500 ms") + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } + + os.Remove(testFileRenamed) +} + +func TestFsnotifyRenameToOverwrite(t *testing.T) { + switch runtime.GOOS { + case "plan9", "windows": + t.Skipf("skipping test on %q (os.Rename over existing file does not create event).", runtime.GOOS) + } + + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create directory to get file + testDirFrom := tempMkdir(t) + defer os.RemoveAll(testDirFrom) + + testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile") + testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") + + // Create a file + var fr *os.File + fr, err := os.OpenFile(testFileRenamed, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + fr.Sync() + fr.Close() + + addWatch(t, watcher, testDir) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + var eventReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testFileRenamed) { + eventReceived.increment() + t.Logf("event received: %s", event) + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + + if err := testRename(testFile, testFileRenamed); err != nil { + t.Fatalf("rename failed: %s", err) + } + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + if eventReceived.value() == 0 { + t.Fatal("fsnotify events have not been received after 500 ms") + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(2 * time.Second): + t.Fatal("event stream was not closed after 2 seconds") + } + + os.Remove(testFileRenamed) +} + +func TestRemovalOfWatch(t *testing.T) { + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create a file before watching directory + testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") + { + var f *os.File + f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + } + + watcher := newWatcher(t) + defer watcher.Close() + + addWatch(t, watcher, testDir) + if err := watcher.Remove(testDir); err != nil { + t.Fatalf("Could not remove the watch: %v\n", err) + } + + go func() { + select { + case ev := <-watcher.Events: + t.Fatalf("We received event: %v\n", ev) + case <-time.After(500 * time.Millisecond): + t.Log("No event received, as expected.") + } + }() + + time.Sleep(200 * time.Millisecond) + // Modify the file outside of the watched dir + f, err := os.Open(testFileAlreadyExists) + if err != nil { + t.Fatalf("Open test file failed: %s", err) + } + f.WriteString("data") + f.Sync() + f.Close() + if err := os.Chmod(testFileAlreadyExists, 0700); err != nil { + t.Fatalf("chmod failed: %s", err) + } + time.Sleep(400 * time.Millisecond) +} + +func TestFsnotifyAttrib(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("attributes don't work on Windows.") + } + + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Receive errors on the error channel on a separate goroutine + go func() { + for err := range watcher.Errors { + t.Fatalf("error received: %s", err) + } + }() + + testFile := filepath.Join(testDir, "TestFsnotifyAttrib.testfile") + + // Receive events on the event channel on a separate goroutine + eventstream := watcher.Events + // The modifyReceived counter counts IsModify events that are not IsAttrib, + // and the attribReceived counts IsAttrib events (which are also IsModify as + // a consequence). + var modifyReceived counter + var attribReceived counter + done := make(chan bool) + go func() { + for event := range eventstream { + // Only count relevant events + if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { + if event.Op&Write == Write { + modifyReceived.increment() + } + if event.Op&Chmod == Chmod { + attribReceived.increment() + } + t.Logf("event received: %s", event) + } else { + t.Logf("unexpected event received: %s", event) + } + } + done <- true + }() + + // Create a file + // This should add at least one event to the fsnotify event queue + var f *os.File + f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + + f.WriteString("data") + f.Sync() + f.Close() + + // Add a watch for testFile + addWatch(t, watcher, testFile) + + if err := os.Chmod(testFile, 0700); err != nil { + t.Fatalf("chmod failed: %s", err) + } + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + // Creating/writing a file changes also the mtime, so IsAttrib should be set to true here + time.Sleep(500 * time.Millisecond) + if modifyReceived.value() != 0 { + t.Fatal("received an unexpected modify event when creating a test file") + } + if attribReceived.value() == 0 { + t.Fatal("fsnotify attribute events have not received after 500 ms") + } + + // Modifying the contents of the file does not set the attrib flag (although eg. the mtime + // might have been modified). + modifyReceived.reset() + attribReceived.reset() + + f, err = os.OpenFile(testFile, os.O_WRONLY, 0) + if err != nil { + t.Fatalf("reopening test file failed: %s", err) + } + + f.WriteString("more data") + f.Sync() + f.Close() + + time.Sleep(500 * time.Millisecond) + + if modifyReceived.value() != 1 { + t.Fatal("didn't receive a modify event after changing test file contents") + } + + if attribReceived.value() != 0 { + t.Fatal("did receive an unexpected attrib event after changing test file contents") + } + + modifyReceived.reset() + attribReceived.reset() + + // Doing a chmod on the file should trigger an event with the "attrib" flag set (the contents + // of the file are not changed though) + if err := os.Chmod(testFile, 0600); err != nil { + t.Fatalf("chmod failed: %s", err) + } + + time.Sleep(500 * time.Millisecond) + + if attribReceived.value() != 1 { + t.Fatal("didn't receive an attribute change after 500ms") + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() + t.Log("waiting for the event channel to become closed...") + select { + case <-done: + t.Log("event channel closed") + case <-time.After(1e9): + t.Fatal("event stream was not closed after 1 second") + } + + os.Remove(testFile) +} + +func TestFsnotifyClose(t *testing.T) { + watcher := newWatcher(t) + watcher.Close() + + var done int32 + go func() { + watcher.Close() + atomic.StoreInt32(&done, 1) + }() + + time.Sleep(50e6) // 50 ms + if atomic.LoadInt32(&done) == 0 { + t.Fatal("double Close() test failed: second Close() call didn't return") + } + + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + if err := watcher.Add(testDir); err == nil { + t.Fatal("expected error on Watch() after Close(), got nil") + } +} + +func TestFsnotifyFakeSymlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("symlinks don't work on Windows.") + } + + watcher := newWatcher(t) + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + var errorsReceived counter + // Receive errors on the error channel on a separate goroutine + go func() { + for errors := range watcher.Errors { + t.Logf("Received error: %s", errors) + errorsReceived.increment() + } + }() + + // Count the CREATE events received + var createEventsReceived, otherEventsReceived counter + go func() { + for ev := range watcher.Events { + t.Logf("event received: %s", ev) + if ev.Op&Create == Create { + createEventsReceived.increment() + } else { + otherEventsReceived.increment() + } + } + }() + + addWatch(t, watcher, testDir) + + if err := os.Symlink(filepath.Join(testDir, "zzz"), filepath.Join(testDir, "zzznew")); err != nil { + t.Fatalf("Failed to create bogus symlink: %s", err) + } + t.Logf("Created bogus symlink") + + // We expect this event to be received almost immediately, but let's wait 500 ms to be sure + time.Sleep(500 * time.Millisecond) + + // Should not be error, just no events for broken links (watching nothing) + if errorsReceived.value() > 0 { + t.Fatal("fsnotify errors have been received.") + } + if otherEventsReceived.value() > 0 { + t.Fatal("fsnotify other events received on the broken link") + } + + // Except for 1 create event (for the link itself) + if createEventsReceived.value() == 0 { + t.Fatal("fsnotify create events were not received after 500 ms") + } + if createEventsReceived.value() > 1 { + t.Fatal("fsnotify more create events received than expected") + } + + // Try closing the fsnotify instance + t.Log("calling Close()") + watcher.Close() +} + +// TestConcurrentRemovalOfWatch tests that concurrent calls to RemoveWatch do not race. +// See https://codereview.appspot.com/103300045/ +// go test -test.run=TestConcurrentRemovalOfWatch -test.cpu=1,1,1,1,1 -race +func TestConcurrentRemovalOfWatch(t *testing.T) { + if runtime.GOOS != "darwin" { + t.Skip("regression test for race only present on darwin") + } + + // Create directory to watch + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + // Create a file before watching directory + testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") + { + var f *os.File + f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + t.Fatalf("creating test file failed: %s", err) + } + f.Sync() + f.Close() + } + + watcher := newWatcher(t) + defer watcher.Close() + + addWatch(t, watcher, testDir) + + // Test that RemoveWatch can be invoked concurrently, with no data races. + removed1 := make(chan struct{}) + go func() { + defer close(removed1) + watcher.Remove(testDir) + }() + removed2 := make(chan struct{}) + go func() { + close(removed2) + watcher.Remove(testDir) + }() + <-removed1 + <-removed2 +} + +func TestClose(t *testing.T) { + // Regression test for #59 bad file descriptor from Close + testDir := tempMkdir(t) + defer os.RemoveAll(testDir) + + watcher := newWatcher(t) + if err := watcher.Add(testDir); err != nil { + t.Fatalf("Expected no error on Add, got %v", err) + } + err := watcher.Close() + if err != nil { + t.Fatalf("Expected no error on Close, got %v.", err) + } +} + +func testRename(file1, file2 string) error { + switch runtime.GOOS { + case "windows", "plan9": + return os.Rename(file1, file2) + default: + cmd := exec.Command("mv", file1, file2) + return cmd.Run() + } +} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go new file mode 100644 index 000000000..265622d20 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go @@ -0,0 +1,463 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "syscall" + "time" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan bool // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan bool), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + w.mu.Unlock() + + w.mu.Lock() + ws := w.watches + w.mu.Unlock() + + var err error + for name := range ws { + if e := w.Remove(name); e != nil && err == nil { + err = e + } + } + + // Send "quit" message to the reader goroutine: + w.done <- true + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + return w.addWatch(name, noteAllEvents) +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = syscall.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + syscall.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = syscall.NOTE_DELETE | syscall.NOTE_WRITE | syscall.NOTE_ATTRIB | syscall.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +func (w *Watcher) addWatch(name string, flags uint32) error { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return nil + } + + fi, err = os.Lstat(name) + if err != nil { + return nil + } + } + + watchfd, err = syscall.Open(name, openMode, 0700) + if watchfd == -1 { + return err + } + + isDir = fi.IsDir() + } + + const registerAdd = syscall.EV_ADD | syscall.EV_CLEAR | syscall.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + syscall.Close(watchfd) + return err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + watchDir := (flags&syscall.NOTE_WRITE) == syscall.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&syscall.NOTE_WRITE) != syscall.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return err + } + } + } + return nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]syscall.Kevent_t, 10) + + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + err := syscall.Close(w.kq) + if err != nil { + w.Errors <- err + } + close(w.Events) + close(w.Errors) + return + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != syscall.EINTR { + w.Errors <- err + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel + w.Events <- event + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + fileDir, _ := filepath.Split(event.Name) + fileDir = filepath.Clean(fileDir) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); os.IsExist(err) { + w.sendDirectoryChangeEvents(fileDir) + // FIXME: should this be for events on files or just isDir? + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&syscall.NOTE_DELETE == syscall.NOTE_DELETE { + e.Op |= Remove + } + if mask&syscall.NOTE_WRITE == syscall.NOTE_WRITE { + e.Op |= Write + } + if mask&syscall.NOTE_RENAME == syscall.NOTE_RENAME { + e.Op |= Rename + } + if mask&syscall.NOTE_ATTRIB == syscall.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + if err := w.internalWatch(filePath, fileInfo); err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + w.Errors <- err + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + w.Events <- newCreateEvent(filePath) + } + + // like watchDirectoryFiles (but without doing another ReadDir) + if err := w.internalWatch(filePath, fileInfo); err != nil { + return + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) error { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= syscall.NOTE_DELETE + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = syscall.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]syscall.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + syscall.SetKevent(&changes[i], fd, syscall.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := syscall.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []syscall.Kevent_t, timeout *syscall.Timespec) ([]syscall.Kevent_t, error) { + n, err := syscall.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) syscall.Timespec { + return syscall.NsecToTimespec(d.Nanoseconds()) +} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go new file mode 100644 index 000000000..c57ccb427 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "syscall" + +const openMode = syscall.O_NONBLOCK | syscall.O_RDONLY diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go new file mode 100644 index 000000000..174b2c331 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package fsnotify + +import "syscall" + +// note: this constant is not defined on BSD +const openMode = syscall.O_EVTONLY diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go new file mode 100644 index 000000000..811585227 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go @@ -0,0 +1,561 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sys_FS_ALL_EVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +const ( + // Options for AddWatch + sys_FS_ONESHOT = 0x80000000 + sys_FS_ONLYDIR = 0x1000000 + + // Events + sys_FS_ACCESS = 0x1 + sys_FS_ALL_EVENTS = 0xfff + sys_FS_ATTRIB = 0x4 + sys_FS_CLOSE = 0x18 + sys_FS_CREATE = 0x100 + sys_FS_DELETE = 0x200 + sys_FS_DELETE_SELF = 0x400 + sys_FS_MODIFY = 0x2 + sys_FS_MOVE = 0xc0 + sys_FS_MOVED_FROM = 0x40 + sys_FS_MOVED_TO = 0x80 + sys_FS_MOVE_SELF = 0x800 + + // Special events + sys_FS_IGNORED = 0x8000 + sys_FS_Q_OVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sys_FS_CREATE == sys_FS_CREATE || mask&sys_FS_MOVED_TO == sys_FS_MOVED_TO { + e.Op |= Create + } + if mask&sys_FS_DELETE == sys_FS_DELETE || mask&sys_FS_DELETE_SELF == sys_FS_DELETE_SELF { + e.Op |= Remove + } + if mask&sys_FS_MODIFY == sys_FS_MODIFY { + e.Op |= Write + } + if mask&sys_FS_MOVE == sys_FS_MOVE || mask&sys_FS_MOVE_SELF == sys_FS_MOVE_SELF || mask&sys_FS_MOVED_FROM == sys_FS_MOVED_FROM { + e.Op |= Rename + } + if mask&sys_FS_ATTRIB == sys_FS_ATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sys_FS_ONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(watch.path+"\\"+name, watch.names[name]&sys_FS_IGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(watch.path+"\\"+name, mask&sys_FS_IGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) { + if watch.mask&sys_FS_ONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sys_FS_Q_OVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) + name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) + fullname := watch.path + "\\" + name + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sys_FS_DELETE_SELF + case syscall.FILE_ACTION_MODIFIED: + mask = sys_FS_MODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sys_FS_MOVE_SELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sys_FS_ONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sys_FS_IGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sys_FS_ONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = watch.path + "\\" + watch.rename + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sys_FS_ACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sys_FS_MODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sys_FS_ATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sys_FS_MOVE|sys_FS_CREATE|sys_FS_DELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sys_FS_CREATE + case syscall.FILE_ACTION_REMOVED: + return sys_FS_DELETE + case syscall.FILE_ACTION_MODIFIED: + return sys_FS_MODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sys_FS_MOVED_FROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sys_FS_MOVED_TO + } + return 0 +} From 93548a8882966f5b699d9f87029de0bb963d8a3a Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Wed, 13 May 2015 12:03:31 +0200 Subject: [PATCH 14/27] Add initial file based service discovery. This commits adds file based service discovery which reads target groups from specified files. It detects changes based on file watches and regular refreshes. --- config/config.go | 27 +- config/config_test.go | 2 +- config/testdata/conf.good.yml | 1 + retrieval/discovery/file.go | 249 ++++++++++++++++++ retrieval/discovery/file_test.go | 93 +++++++ .../discovery/fixtures/target_groups.json | 11 + .../discovery/fixtures/target_groups.yml | 5 + retrieval/target.go | 18 -- retrieval/target_test.go | 4 +- retrieval/targetmanager.go | 17 ++ 10 files changed, 404 insertions(+), 23 deletions(-) create mode 100644 retrieval/discovery/file.go create mode 100644 retrieval/discovery/file_test.go create mode 100644 retrieval/discovery/fixtures/target_groups.json create mode 100644 retrieval/discovery/fixtures/target_groups.yml diff --git a/config/config.go b/config/config.go index 81ab4fb06..5da03e2de 100644 --- a/config/config.go +++ b/config/config.go @@ -1,6 +1,7 @@ package config import ( + "encoding/json" "fmt" "io/ioutil" "regexp" @@ -16,7 +17,7 @@ import ( var ( patJobName = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_-]*$`) - patFileSDName = regexp.MustCompile(`^[^*]*(\*[^/]*)?\.(json|yml|yaml)$`) + patFileSDName = regexp.MustCompile(`^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$`) ) // Load parses the YAML input s into a Config. @@ -262,6 +263,28 @@ func (tg TargetGroup) MarshalYAML() (interface{}, error) { return g, nil } +// UnmarshalJSON implements the json.Unmarshaller interface. +func (tg *TargetGroup) UnmarshalJSON(b []byte) error { + g := struct { + Targets []string `yaml:"targets"` + Labels clientmodel.LabelSet `yaml:"labels"` + }{} + if err := json.Unmarshal(b, &g); err != nil { + return err + } + tg.Targets = make([]clientmodel.LabelSet, 0, len(g.Targets)) + for _, t := range g.Targets { + if strings.Contains(t, "/") { + return fmt.Errorf("%q is not a valid hostname", t) + } + tg.Targets = append(tg.Targets, clientmodel.LabelSet{ + clientmodel.AddressLabel: clientmodel.LabelValue(t), + }) + } + tg.Labels = g.Labels + return nil +} + // DNSSDConfig is the configuration for DNS based service discovery. type DNSSDConfig struct { // DefaultedDNSSDConfig contains the actual fields for DNSSDConfig. @@ -301,7 +324,7 @@ func (c *FileSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } if len(c.Names) == 0 { - return fmt.Errorf("file discovery config must contain at least on path name") + return fmt.Errorf("file service discovery config must contain at least one path name") } for _, name := range c.Names { if !patFileSDName.MatchString(name) { diff --git a/config/config_test.go b/config/config_test.go index 40ea0fa7e..ad21d3e0c 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -54,7 +54,7 @@ var expectedConf = &Config{DefaultedConfig{ FileSDConfigs: []*FileSDConfig{ {DefaultedFileSDConfig{ - Names: []string{"foo/*.slow.json", "foo/*.slow.yml"}, + Names: []string{"foo/*.slow.json", "foo/*.slow.yml", "single/file.yml"}, RefreshInterval: Duration(10 * time.Minute), }}, {DefaultedFileSDConfig{ diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index cde9eeed6..6af1db01c 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -28,6 +28,7 @@ scrape_configs: - names: - foo/*.slow.json - foo/*.slow.yml + - single/file.yml refresh_interval: 10m - names: - bar/*.yaml diff --git a/retrieval/discovery/file.go b/retrieval/discovery/file.go new file mode 100644 index 000000000..cda4cbc1a --- /dev/null +++ b/retrieval/discovery/file.go @@ -0,0 +1,249 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "strings" + "time" + + "github.com/golang/glog" + "gopkg.in/fsnotify.v1" + "gopkg.in/yaml.v2" + + "github.com/prometheus/prometheus/config" +) + +// FileDiscovery provides service discovery functionality based +// on files that contain target groups in JSON or YAML format. Refreshing +// happens using file watches and periodic refreshes. +type FileDiscovery struct { + paths []string + watcher *fsnotify.Watcher + interval time.Duration + done chan struct{} + + // lastRefresh stores which files were found during the last refresh + // and how many target groups they contained. + // This is used to detect deleted target groups. + lastRefresh map[string]int +} + +// NewFileDiscovery returns a new file discovery for the given paths. +func NewFileDiscovery(paths []string, interval time.Duration) *FileDiscovery { + fd := &FileDiscovery{ + paths: paths, + interval: interval, + done: make(chan struct{}), + } + return fd +} + +// Sources implements the TargetProvider interface. +func (fd *FileDiscovery) Sources() []string { + var srcs []string + // As we allow multiple target groups per file we have no choice + // but to parse them all. + for _, p := range fd.listFiles() { + tgroups, err := readFile(p) + if err != nil { + glog.Errorf("Error reading file %q: ", p, err) + } + for _, tg := range tgroups { + srcs = append(srcs, tg.Source) + } + } + return srcs +} + +// listFiles returns a list of all files that match the configured patterns. +func (fd *FileDiscovery) listFiles() []string { + var paths []string + for _, p := range fd.paths { + files, err := filepath.Glob(p) + if err != nil { + glog.Errorf("Error expanding glob %q: %s", p, err) + continue + } + paths = append(paths, files...) + } + return paths +} + +// watchFiles sets watches on all full paths or directories that were configured for +// this file discovery. +func (fd *FileDiscovery) watchFiles() { + if fd.watcher == nil { + panic("no watcher configured") + } + for _, p := range fd.paths { + if idx := strings.LastIndex(p, "/"); idx > -1 { + p = p[:idx] + } else { + p = "./" + } + if err := fd.watcher.Add(p); err != nil { + glog.Errorf("Error adding file watch for %q: %s", p, err) + } + } +} + +// Run implements the TargetProvider interface. +func (fd *FileDiscovery) Run(ch chan<- *config.TargetGroup) { + defer close(ch) + + watcher, err := fsnotify.NewWatcher() + if err != nil { + glog.Errorf("Error creating file watcher: %s", err) + return + } + fd.watcher = watcher + + fd.refresh(ch) + + ticker := time.NewTicker(fd.interval) + defer ticker.Stop() + + for { + // Stopping has priority over refreshing. Thus we wrap the actual select + // clause to always catch done signals. + select { + case <-fd.done: + return + default: + select { + case event := <-fd.watcher.Events: + // fsnotify sometimes sends a bunch of events without name or operation. + // It's unclear what they are and why they are sent - filter them out. + if len(event.Name) == 0 { + break + } + // Everything but a chmod requires rereading. + if event.Op^fsnotify.Chmod == 0 { + break + } + // Changes to a file can spawn various sequences of events with + // different combinations of operations. For all practical purposes + // this is inaccurate. + // The most reliable solution is to reload everything if anything happens. + fd.refresh(ch) + + case <-ticker.C: + // Setting a new watch after an update might fail. Make sure we don't lose + // those files forever. + fd.refresh(ch) + + case err := <-fd.watcher.Errors: + if err != nil { + glog.Errorf("Error on file watch: %s", err) + } + + case <-fd.done: + return + } + } + } +} + +// refresh reads all files matching the discoveries patterns and sends the respective +// updated target groups through the channel. +func (fd *FileDiscovery) refresh(ch chan<- *config.TargetGroup) { + ref := map[string]int{} + for _, p := range fd.listFiles() { + tgroups, err := readFile(p) + if err != nil { + glog.Errorf("Error reading file %q: %s", p, err) + // Prevent deletion down below. + ref[p] = fd.lastRefresh[p] + continue + } + for _, tg := range tgroups { + ch <- tg + } + ref[p] = len(tgroups) + } + // Send empty updates for sources that disappeared. + for f, n := range fd.lastRefresh { + m, ok := ref[f] + if !ok || n > m { + for i := m; i < n; i++ { + ch <- &config.TargetGroup{Source: fileSource(f, i)} + } + } + } + fd.lastRefresh = ref + + fd.watchFiles() +} + +// fileSource returns a source ID for the i-th target group in the file. +func fileSource(filename string, i int) string { + return fmt.Sprintf("file:%s:%d", filename, i) +} + +// Stop implements the TargetProvider interface. +func (fd *FileDiscovery) Stop() { + glog.V(1).Info("Stopping file discovery for %s...", fd.paths) + + fd.done <- struct{}{} + // Closing the watcher will deadlock unless all events and errors are drained. + go func() { + for { + select { + case <-fd.watcher.Errors: + case <-fd.watcher.Events: + // Drain all events and errors. + case <-fd.done: + return + } + } + }() + fd.watcher.Close() + + fd.done <- struct{}{} + + glog.V(1).Info("File discovery for %s stopped.", fd.paths) +} + +// readFile reads a JSON or YAML list of targets groups from the file, depending on its +// file extension. It returns full configuration target groups. +func readFile(filename string) ([]*config.TargetGroup, error) { + content, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + var targetGroups []*config.TargetGroup + + switch ext := filepath.Ext(filename); strings.ToLower(ext) { + case ".json": + if err := json.Unmarshal(content, &targetGroups); err != nil { + return nil, err + } + case ".yml", ".yaml": + if err := yaml.Unmarshal(content, &targetGroups); err != nil { + return nil, err + } + default: + panic(fmt.Errorf("retrieval.FileDiscovery.readFile: unhandled file extension %q", ext)) + } + + for i, tg := range targetGroups { + tg.Source = fileSource(filename, i) + } + return targetGroups, nil +} diff --git a/retrieval/discovery/file_test.go b/retrieval/discovery/file_test.go new file mode 100644 index 000000000..ca020dc98 --- /dev/null +++ b/retrieval/discovery/file_test.go @@ -0,0 +1,93 @@ +package discovery + +import ( + "fmt" + "io" + "os" + "testing" + "time" + + "github.com/prometheus/prometheus/config" +) + +func TestFileSD(t *testing.T) { + testFileSD(t, ".yml") + testFileSD(t, ".json") + os.Remove("fixtures/_test.yml") + os.Remove("fixtures/_test.json") +} + +func testFileSD(t *testing.T, ext string) { + // As interval refreshing is more of a fallback, we only want to test + // whether file watches work as expected. + fsd := NewFileDiscovery([]string{"fixtures/_*" + ext}, 1*time.Hour) + + ch := make(chan *config.TargetGroup) + go fsd.Run(ch) + defer fsd.Stop() + + select { + case <-time.After(25 * time.Millisecond): + // Expected. + case tg := <-ch: + t.Fatalf("Unexpected target group in file discovery: %s", tg) + } + + newf, err := os.Create("fixtures/_test" + ext) + if err != nil { + t.Fatal(err) + } + defer newf.Close() + + f, err := os.Open("fixtures/target_groups" + ext) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + _, err = io.Copy(newf, f) + if err != nil { + t.Fatal(err) + } + newf.Close() + + // The files contain two target groups which are read and sent in order. + select { + case <-time.After(15 * time.Second): + t.Fatalf("Expected new target group but got none") + case tg := <-ch: + if tg.String() != fmt.Sprintf("file:fixtures/_test%s:0", ext) { + t.Fatalf("Unexpected target group", tg) + } + } + select { + case <-time.After(15 * time.Second): + t.Fatalf("Expected new target group but got none") + case tg := <-ch: + if tg.String() != fmt.Sprintf("file:fixtures/_test%s:1", ext) { + t.Fatalf("Unexpected target group %s", tg) + } + } + // Based on unknown circumstances, sometimes fsnotify will trigger more events in + // some runs (which might be empty, chains of different operations etc.). + // We have to drain those (as the target manager would) to avoid deadlocking and must + // not try to make sense of it all... + go func() { + for tg := range ch { + // Below we will change the file to a bad syntax. Previously extracted target + // groups must not be deleted via sending an empty target group. + if len(tg.Targets) == 0 { + t.Fatalf("Unexpected empty target group received: %s", tg) + } + } + }() + + newf, err = os.Create("fixtures/_test" + ext) + if err != nil { + t.Fatal(err) + } + if _, err := newf.Write([]byte("]gibberish\n][")); err != nil { + t.Fatal(err) + } + newf.Close() +} diff --git a/retrieval/discovery/fixtures/target_groups.json b/retrieval/discovery/fixtures/target_groups.json new file mode 100644 index 000000000..df4f0df19 --- /dev/null +++ b/retrieval/discovery/fixtures/target_groups.json @@ -0,0 +1,11 @@ +[ + { + "targets": ["localhost:9090", "example.org:443"], + "labels": { + "foo": "bar" + } + }, + { + "targets": ["my.domain"] + } +] diff --git a/retrieval/discovery/fixtures/target_groups.yml b/retrieval/discovery/fixtures/target_groups.yml new file mode 100644 index 000000000..a4823b2b9 --- /dev/null +++ b/retrieval/discovery/fixtures/target_groups.yml @@ -0,0 +1,5 @@ +- targets: ['localhost:9090', 'example.org:443'] + labels: + test: success + +- targets: ['my.domain'] diff --git a/retrieval/target.go b/retrieval/target.go index 2f40ec132..f0342bae2 100644 --- a/retrieval/target.go +++ b/retrieval/target.go @@ -213,8 +213,6 @@ func (t *target) Update(cfg *config.ScrapeConfig, baseLabels clientmodel.LabelSe } func (t *target) String() string { - t.RLock() - defer t.RUnlock() return t.url.Host } @@ -402,22 +400,6 @@ func (t *target) URL() string { // InstanceIdentifier implements Target. func (t *target) InstanceIdentifier() string { - // If we are given a port in the host port, use that. - if strings.Contains(t.url.Host, ":") { - return t.url.Host - } - - t.RLock() - defer t.RUnlock() - - // Otherwise, deduce port based on protocol. - if t.url.Scheme == "http" { - return fmt.Sprintf("%s:80", t.url.Host) - } else if t.url.Scheme == "https" { - return fmt.Sprintf("%s:443", t.url.Host) - } - - glog.Warningf("Unknown scheme %s when generating identifier, using host without port number.", t.url.Scheme) return t.url.Host } diff --git a/retrieval/target_test.go b/retrieval/target_test.go index 062d27568..857bd7dd8 100644 --- a/retrieval/target_test.go +++ b/retrieval/target_test.go @@ -34,7 +34,7 @@ func TestTargetInterface(t *testing.T) { } func TestBaseLabels(t *testing.T) { - target := newTestTarget("example.com", 0, clientmodel.LabelSet{"job": "some_job", "foo": "bar"}) + target := newTestTarget("example.com:80", 0, clientmodel.LabelSet{"job": "some_job", "foo": "bar"}) want := clientmodel.LabelSet{ clientmodel.JobLabel: "some_job", clientmodel.InstanceLabel: "example.com:80", @@ -89,7 +89,7 @@ func TestTargetScrapeWithFullChannel(t *testing.T) { } func TestTargetRecordScrapeHealth(t *testing.T) { - testTarget := newTestTarget("example.url", 0, clientmodel.LabelSet{clientmodel.JobLabel: "testjob"}) + testTarget := newTestTarget("example.url:80", 0, clientmodel.LabelSet{clientmodel.JobLabel: "testjob"}) now := clientmodel.Now() appender := &collectResultAppender{} diff --git a/retrieval/targetmanager.go b/retrieval/targetmanager.go index 43d9d165a..8c89e1089 100644 --- a/retrieval/targetmanager.go +++ b/retrieval/targetmanager.go @@ -294,6 +294,19 @@ func (tm *TargetManager) targetsFromGroup(tg *config.TargetGroup, cfg *config.Sc targets := make([]Target, 0, len(tg.Targets)) for i, labels := range tg.Targets { + addr := string(labels[clientmodel.AddressLabel]) + // If no port was provided, infer it based on the used scheme. + if !strings.Contains(addr, ":") { + switch cfg.Scheme { + case "http": + addr = fmt.Sprintf("%s:80", addr) + case "https": + addr = fmt.Sprintf("%s:443", addr) + default: + panic(fmt.Errorf("targetsFromGroup: invalid scheme %q", cfg.Scheme)) + } + labels[clientmodel.AddressLabel] = clientmodel.LabelValue(addr) + } // Copy labels into the labelset for the target if they are not // set already. Apply the labelsets in order of decreasing precedence. labelsets := []clientmodel.LabelSet{ @@ -347,6 +360,10 @@ func ProvidersFromConfig(cfg *config.ScrapeConfig) []TargetProvider { dnsSD := discovery.NewDNSDiscovery(dnscfg.Names, time.Duration(dnscfg.RefreshInterval)) providers = append(providers, dnsSD) } + for _, filecfg := range cfg.FileSDConfigs { + fileSD := discovery.NewFileDiscovery(filecfg.Names, time.Duration(filecfg.RefreshInterval)) + providers = append(providers, fileSD) + } if len(cfg.TargetGroups) > 0 { providers = append(providers, NewStaticProvider(cfg.TargetGroups)) } From 9ca47869ed54832228e6891751b5b950217d63e2 Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Fri, 15 May 2015 14:54:29 +0200 Subject: [PATCH 15/27] Provide full SD configs to discovery constructors. Some SD configs may have many options. To be readable and consistent, make all discovery constructors receive the full config rather than the separate arguments. --- retrieval/discovery/dns.go | 6 +++--- retrieval/discovery/file.go | 9 ++++----- retrieval/discovery/file_test.go | 6 +++++- retrieval/targetmanager.go | 11 ++++------- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/retrieval/discovery/dns.go b/retrieval/discovery/dns.go index 017ea476a..5b4206cb5 100644 --- a/retrieval/discovery/dns.go +++ b/retrieval/discovery/dns.go @@ -70,11 +70,11 @@ type DNSDiscovery struct { } // NewDNSDiscovery returns a new DNSDiscovery which periodically refreshes its targets. -func NewDNSDiscovery(names []string, refreshInterval time.Duration) *DNSDiscovery { +func NewDNSDiscovery(conf *config.DNSSDConfig) *DNSDiscovery { return &DNSDiscovery{ - names: names, + names: conf.Names, done: make(chan struct{}), - ticker: time.NewTicker(refreshInterval), + ticker: time.NewTicker(time.Duration(conf.RefreshInterval)), } } diff --git a/retrieval/discovery/file.go b/retrieval/discovery/file.go index cda4cbc1a..7e16b5f66 100644 --- a/retrieval/discovery/file.go +++ b/retrieval/discovery/file.go @@ -44,13 +44,12 @@ type FileDiscovery struct { } // NewFileDiscovery returns a new file discovery for the given paths. -func NewFileDiscovery(paths []string, interval time.Duration) *FileDiscovery { - fd := &FileDiscovery{ - paths: paths, - interval: interval, +func NewFileDiscovery(conf *config.FileSDConfig) *FileDiscovery { + return &FileDiscovery{ + paths: conf.Names, + interval: time.Duration(conf.RefreshInterval), done: make(chan struct{}), } - return fd } // Sources implements the TargetProvider interface. diff --git a/retrieval/discovery/file_test.go b/retrieval/discovery/file_test.go index ca020dc98..5b9f90fcc 100644 --- a/retrieval/discovery/file_test.go +++ b/retrieval/discovery/file_test.go @@ -20,7 +20,11 @@ func TestFileSD(t *testing.T) { func testFileSD(t *testing.T, ext string) { // As interval refreshing is more of a fallback, we only want to test // whether file watches work as expected. - fsd := NewFileDiscovery([]string{"fixtures/_*" + ext}, 1*time.Hour) + var conf config.FileSDConfig + conf.Names = []string{"fixtures/_*" + ext} + conf.RefreshInterval = config.Duration(1 * time.Hour) + + fsd := NewFileDiscovery(&conf) ch := make(chan *config.TargetGroup) go fsd.Run(ch) diff --git a/retrieval/targetmanager.go b/retrieval/targetmanager.go index 8c89e1089..cacc2126e 100644 --- a/retrieval/targetmanager.go +++ b/retrieval/targetmanager.go @@ -17,7 +17,6 @@ import ( "fmt" "strings" "sync" - "time" "github.com/golang/glog" @@ -356,13 +355,11 @@ func (tm *TargetManager) targetsFromGroup(tg *config.TargetGroup, cfg *config.Sc func ProvidersFromConfig(cfg *config.ScrapeConfig) []TargetProvider { var providers []TargetProvider - for _, dnscfg := range cfg.DNSSDConfigs { - dnsSD := discovery.NewDNSDiscovery(dnscfg.Names, time.Duration(dnscfg.RefreshInterval)) - providers = append(providers, dnsSD) + for _, c := range cfg.DNSSDConfigs { + providers = append(providers, discovery.NewDNSDiscovery(c)) } - for _, filecfg := range cfg.FileSDConfigs { - fileSD := discovery.NewFileDiscovery(filecfg.Names, time.Duration(filecfg.RefreshInterval)) - providers = append(providers, fileSD) + for _, c := range cfg.FileSDConfigs { + providers = append(providers, discovery.NewFileDiscovery(c)) } if len(cfg.TargetGroups) > 0 { providers = append(providers, NewStaticProvider(cfg.TargetGroups)) From dbc08d390e38fefc98e8be84b7088e5b7a4fc81f Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Mon, 18 May 2015 11:13:13 +0200 Subject: [PATCH 16/27] Move target status data into its own object --- retrieval/target.go | 126 +++++++++++++++++++++----------------- retrieval/target_test.go | 25 ++++---- web/templates/status.html | 10 +-- 3 files changed, 90 insertions(+), 71 deletions(-) diff --git a/retrieval/target.go b/retrieval/target.go index f0342bae2..8ad2d8f0c 100644 --- a/retrieval/target.go +++ b/retrieval/target.go @@ -110,12 +110,8 @@ const ( type Target interface { extraction.Ingester - // Return the last encountered scrape error, if any. - LastError() error - // Return the health of the target. - State() TargetState - // Return the last time a scrape was attempted. - LastScrape() time.Time + // Status returns the current status of the target. + Status() *TargetStatus // The URL to which the Target corresponds. Out of all of the available // points in this interface, this one is the best candidate to change given // the ways to express the endpoint. @@ -141,6 +137,53 @@ type Target interface { Update(*config.ScrapeConfig, clientmodel.LabelSet) } +// TargetStatus contains information about the current status of a scrape target. +type TargetStatus struct { + lastError error + lastScrape time.Time + state TargetState + + mu sync.RWMutex +} + +// LastError returns the error encountered during the last scrape. +func (ts *TargetStatus) LastError() error { + ts.mu.RLock() + defer ts.mu.RUnlock() + return ts.lastError +} + +// LastScrape returns the time of the last scrape. +func (ts *TargetStatus) LastScrape() time.Time { + ts.mu.RLock() + defer ts.mu.RUnlock() + return ts.lastScrape +} + +// State returns the last known health state of the target. +func (ts *TargetStatus) State() TargetState { + ts.mu.RLock() + defer ts.mu.RUnlock() + return ts.state +} + +func (ts *TargetStatus) setLastScrape(t time.Time) { + ts.mu.Lock() + defer ts.mu.Unlock() + ts.lastScrape = t +} + +func (ts *TargetStatus) setLastError(err error) { + ts.mu.Lock() + defer ts.mu.Unlock() + if err == nil { + ts.state = Healthy + } else { + ts.state = Unhealthy + } + ts.lastError = err +} + // target is a Target that refers to a singular HTTP or HTTPS endpoint. type target struct { // Closing scraperStopping signals that scraping should stop. @@ -150,6 +193,9 @@ type target struct { // Channel to buffer ingested samples. ingestedSamples chan clientmodel.Samples + // The status object for the target. It is only set once on initialization. + status *TargetStatus + // The HTTP client used to scrape the target's endpoint. httpClient *http.Client @@ -159,12 +205,6 @@ type target struct { url *url.URL // Any base labels that are added to this target and its metrics. baseLabels clientmodel.LabelSet - // The current health state of the target. - state TargetState - // The last encountered scrape error, if any. - lastError error - // The last time a scrape was attempted. - lastScrape time.Time // What is the deadline for the HTTP or HTTPS against this endpoint. deadline time.Duration // The time between two scrapes. @@ -177,6 +217,7 @@ func NewTarget(cfg *config.ScrapeConfig, baseLabels clientmodel.LabelSet) Target url: &url.URL{ Host: string(baseLabels[clientmodel.AddressLabel]), }, + status: &TargetStatus{}, scraperStopping: make(chan struct{}), scraperStopped: make(chan struct{}), } @@ -184,6 +225,11 @@ func NewTarget(cfg *config.ScrapeConfig, baseLabels clientmodel.LabelSet) Target return t } +// Status implements the Target interface. +func (t *target) Status() *TargetStatus { + return t.status +} + // Update overwrites settings in the target that are derived from the job config // it belongs to. func (t *target) Update(cfg *config.ScrapeConfig, baseLabels clientmodel.LabelSet) { @@ -256,9 +302,7 @@ func (t *target) RunScraper(sampleAppender storage.SampleAppender) { ticker := time.NewTicker(lastScrapeInterval) defer ticker.Stop() - t.Lock() // Writing t.lastScrape requires the lock. - t.lastScrape = time.Now() - t.Unlock() + t.status.setLastScrape(time.Now()) t.scrape(sampleAppender) // Explanation of the contraption below: @@ -277,12 +321,12 @@ func (t *target) RunScraper(sampleAppender storage.SampleAppender) { case <-t.scraperStopping: return case <-ticker.C: - t.Lock() - took := time.Since(t.lastScrape) - t.lastScrape = time.Now() + took := time.Since(t.status.LastScrape()) + t.status.setLastScrape(time.Now()) intervalStr := lastScrapeInterval.String() + t.Lock() // On changed scrape interval the new interval becomes effective // after the next scrape. if lastScrapeInterval != t.scrapeInterval { @@ -315,21 +359,14 @@ const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client func (t *target) scrape(sampleAppender storage.SampleAppender) (err error) { t.RLock() - timestamp := clientmodel.Now() + start := time.Now() - defer func(start time.Time) { - t.recordScrapeHealth(sampleAppender, timestamp, err == nil, time.Since(start)) + defer func() { t.RUnlock() - t.Lock() // Writing t.state and t.lastError requires the lock. - if err == nil { - t.state = Healthy - } else { - t.state = Unhealthy - } - t.lastError = err - t.Unlock() - }(time.Now()) + t.status.setLastError(err) + t.recordScrapeHealth(sampleAppender, clientmodel.TimestampFromTime(start), time.Since(start)) + }() req, err := http.NewRequest("GET", t.url.String(), nil) if err != nil { @@ -354,7 +391,7 @@ func (t *target) scrape(sampleAppender storage.SampleAppender) (err error) { t.ingestedSamples = make(chan clientmodel.Samples, ingestedSamplesCap) processOptions := &extraction.ProcessOptions{ - Timestamp: timestamp, + Timestamp: clientmodel.TimestampFromTime(start), } go func() { err = processor.ProcessSingle(resp.Body, t, processOptions) @@ -370,27 +407,6 @@ func (t *target) scrape(sampleAppender storage.SampleAppender) (err error) { return err } -// LastError implements Target. -func (t *target) LastError() error { - t.RLock() - defer t.RUnlock() - return t.lastError -} - -// State implements Target. -func (t *target) State() TargetState { - t.RLock() - defer t.RUnlock() - return t.state -} - -// LastScrape implements Target. -func (t *target) LastScrape() time.Time { - t.RLock() - defer t.RUnlock() - return t.lastScrape -} - // URL implements Target. func (t *target) URL() string { t.RLock() @@ -454,10 +470,10 @@ func (t *target) BaseLabelsWithoutJobAndInstance() clientmodel.LabelSet { return ls } -func (t *target) recordScrapeHealth(sampleAppender storage.SampleAppender, timestamp clientmodel.Timestamp, healthy bool, scrapeDuration time.Duration) { +func (t *target) recordScrapeHealth(sampleAppender storage.SampleAppender, timestamp clientmodel.Timestamp, scrapeDuration time.Duration) { healthMetric := clientmodel.Metric{} durationMetric := clientmodel.Metric{} - for label, value := range t.baseLabels { + for label, value := range t.BaseLabels() { healthMetric[label] = value durationMetric[label] = value } @@ -465,7 +481,7 @@ func (t *target) recordScrapeHealth(sampleAppender storage.SampleAppender, times durationMetric[clientmodel.MetricNameLabel] = clientmodel.LabelValue(scrapeDurationMetricName) healthValue := clientmodel.SampleValue(0) - if healthy { + if t.status.State() == Healthy { healthValue = clientmodel.SampleValue(1) } diff --git a/retrieval/target_test.go b/retrieval/target_test.go index 857bd7dd8..7d2c1327d 100644 --- a/retrieval/target_test.go +++ b/retrieval/target_test.go @@ -57,8 +57,8 @@ func TestTargetScrapeUpdatesState(t *testing.T) { testTarget := newTestTarget("bad schema", 0, nil) testTarget.scrape(nopAppender{}) - if testTarget.state != Unhealthy { - t.Errorf("Expected target state %v, actual: %v", Unhealthy, testTarget.state) + if testTarget.status.State() != Unhealthy { + t.Errorf("Expected target state %v, actual: %v", Unhealthy, testTarget.status.State()) } } @@ -80,11 +80,11 @@ func TestTargetScrapeWithFullChannel(t *testing.T) { testTarget := newTestTarget(server.URL, 10*time.Millisecond, clientmodel.LabelSet{"dings": "bums"}) testTarget.scrape(slowAppender{}) - if testTarget.state != Unhealthy { - t.Errorf("Expected target state %v, actual: %v", Unhealthy, testTarget.state) + if testTarget.status.State() != Unhealthy { + t.Errorf("Expected target state %v, actual: %v", Unhealthy, testTarget.status.State()) } - if testTarget.lastError != errIngestChannelFull { - t.Errorf("Expected target error %q, actual: %q", errIngestChannelFull, testTarget.lastError) + if testTarget.status.LastError() != errIngestChannelFull { + t.Errorf("Expected target error %q, actual: %q", errIngestChannelFull, testTarget.status.LastError()) } } @@ -93,7 +93,7 @@ func TestTargetRecordScrapeHealth(t *testing.T) { now := clientmodel.Now() appender := &collectResultAppender{} - testTarget.recordScrapeHealth(appender, now, true, 2*time.Second) + testTarget.recordScrapeHealth(appender, now, 2*time.Second) result := appender.result @@ -205,17 +205,17 @@ func TestTargetRunScraperScrapes(t *testing.T) { // Enough time for a scrape to happen. time.Sleep(2 * time.Millisecond) - if testTarget.lastScrape.IsZero() { + if testTarget.status.LastScrape().IsZero() { t.Errorf("Scrape hasn't occured.") } testTarget.StopScraper() // Wait for it to take effect. time.Sleep(2 * time.Millisecond) - last := testTarget.lastScrape + last := testTarget.status.LastScrape() // Enough time for a scrape to happen. time.Sleep(2 * time.Millisecond) - if testTarget.lastScrape != last { + if testTarget.status.LastScrape() != last { t.Errorf("Scrape occured after it was stopped.") } } @@ -249,7 +249,10 @@ func newTestTarget(targetURL string, deadline time.Duration, baseLabels clientmo Host: strings.TrimLeft(targetURL, "http://"), Path: "/metrics", }, - deadline: deadline, + deadline: deadline, + status: &TargetStatus{ + state: Healthy, + }, scrapeInterval: 1 * time.Millisecond, httpClient: utility.NewDeadlineClient(deadline), scraperStopping: make(chan struct{}), diff --git a/web/templates/status.html b/web/templates/status.html index c6e2de472..900470978 100644 --- a/web/templates/status.html +++ b/web/templates/status.html @@ -51,19 +51,19 @@ {{.URL}} - - {{.State}} + + {{.Status.State}} {{.BaseLabelsWithoutJobAndInstance}} - {{if .LastScrape.IsZero}}Never{{else}}{{since .LastScrape}} ago{{end}} + {{if .Status.LastScrape.IsZero}}Never{{else}}{{since .Status.LastScrape}} ago{{end}} - {{if .LastError}} - {{.LastError}} + {{if .Status.LastError}} + {{.Status.LastError}} {{end}}
- {{.URL}} + {{.URL}} @@ -56,7 +56,7 @@ - {{.BaseLabelsWithoutJobAndInstance}} + {{stripLabels .BaseLabels "job" "instance"}} {{if .Status.LastScrape.IsZero}}Never{{else}}{{since .Status.LastScrape}} ago{{end}} diff --git a/web/web.go b/web/web.go index bd53b0de5..1548046c3 100644 --- a/web/web.go +++ b/web/web.go @@ -29,10 +29,14 @@ import ( "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + clientmodel "github.com/prometheus/client_golang/model" + "github.com/prometheus/prometheus/web/api" "github.com/prometheus/prometheus/web/blob" ) +var localhostRepresentations = []string{"127.0.0.1", "localhost"} + // Commandline flags. var ( listenAddress = flag.String("web.listen-address", ":9090", "Address to listen on for the web interface, API, and telemetry.") @@ -150,28 +154,53 @@ func getConsoles(pathPrefix string) string { return "" } -func getTemplate(name string, pathPrefix string) (t *template.Template, err error) { - t = template.New("_base") +func getTemplate(name string, pathPrefix string) (*template.Template, error) { + t := template.New("_base") + var err error t.Funcs(template.FuncMap{ "since": time.Since, "getConsoles": func() string { return getConsoles(pathPrefix) }, "pathPrefix": func() string { return pathPrefix }, + "stripLabels": func(lset clientmodel.LabelSet, labels ...clientmodel.LabelName) clientmodel.LabelSet { + for _, ln := range labels { + delete(lset, ln) + } + return lset + }, + "globalURL": func(url string) string { + hostname, err := os.Hostname() + if err != nil { + glog.Warningf("Couldn't get hostname: %s, returning target.URL()", err) + return url + } + for _, localhostRepresentation := range localhostRepresentations { + url = strings.Replace(url, "//"+localhostRepresentation, "//"+hostname, 1) + } + return url + }, }) + file, err := getTemplateFile("_base") if err != nil { - glog.Error("Could not read base template: ", err) + glog.Errorln("Could not read base template:", err) return nil, err } - t.Parse(file) + t, err = t.Parse(file) + if err != nil { + glog.Errorln("Could not parse base template:", err) + } file, err = getTemplateFile(name) if err != nil { - glog.Error("Could not read base template: ", err) + glog.Error("Could not read template %d: ", name, err) return nil, err } - t.Parse(file) - return + t, err = t.Parse(file) + if err != nil { + glog.Errorf("Could not parse template %s: %s", name, err) + } + return t, err } func executeTemplate(w http.ResponseWriter, name string, data interface{}, pathPrefix string) { From 385919a65a284c340fa471a75f23a6746555199d Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Mon, 18 May 2015 13:14:41 +0200 Subject: [PATCH 18/27] Avoid inter-component blocking if ingestion/scraping blocks. Appending to the storage can block for a long time. Timing out scrapes can also cause longer blocks. This commit avoids that those blocks affect other compnents than the target itself. Also the Target interface was removed. --- retrieval/target.go | 159 +++++++++++++------------------- retrieval/target_test.go | 39 ++++---- retrieval/targetmanager.go | 20 ++-- retrieval/targetmanager_test.go | 2 +- web/status.go | 14 +-- web/templates/status.html | 3 +- web/web.go | 2 +- 7 files changed, 102 insertions(+), 137 deletions(-) diff --git a/retrieval/target.go b/retrieval/target.go index aae723789..4600cefb6 100644 --- a/retrieval/target.go +++ b/retrieval/target.go @@ -67,72 +67,35 @@ func init() { prometheus.MustRegister(targetIntervalLength) } -// TargetState describes the state of a Target. -type TargetState int +// TargetHealth describes the health state of a target. +type TargetHealth int -func (t TargetState) String() string { +func (t TargetHealth) String() string { switch t { - case Unknown: + case HealthUnknown: return "UNKNOWN" - case Healthy: + case HealthGood: return "HEALTHY" - case Unhealthy: + case HealthBad: return "UNHEALTHY" } - panic("unknown state") } const ( // Unknown is the state of a Target before it is first scraped. - Unknown TargetState = iota + HealthUnknown TargetHealth = iota // Healthy is the state of a Target that has been successfully scraped. - Healthy + HealthGood // Unhealthy is the state of a Target that was scraped unsuccessfully. - Unhealthy + HealthBad ) -// A Target represents an endpoint that should be interrogated for metrics. -// -// The protocol described by this type will likely change in future iterations, -// as it offers no good support for aggregated targets and fan out. Thusly, -// it is likely that the current Target and target uses will be -// wrapped with some resolver type. -// -// For the future, the Target protocol will abstract away the exact means that -// metrics are retrieved and deserialized from the given instance to which it -// refers. -// -// Target implements extraction.Ingester. -type Target interface { - extraction.Ingester - - // Status returns the current status of the target. - Status() *TargetStatus - // The URL to which the Target corresponds. Out of all of the available - // points in this interface, this one is the best candidate to change given - // the ways to express the endpoint. - URL() string - // Used to populate the `instance` label in metrics. - InstanceIdentifier() string - // Return the labels describing the targets. These are the base labels - // as well as internal labels. - fullLabels() clientmodel.LabelSet - // Return the target's base labels. - BaseLabels() clientmodel.LabelSet - // Start scraping the target in regular intervals. - RunScraper(storage.SampleAppender) - // Stop scraping, synchronous. - StopScraper() - // Update the target's state. - Update(*config.ScrapeConfig, clientmodel.LabelSet) -} - // TargetStatus contains information about the current status of a scrape target. type TargetStatus struct { lastError error lastScrape time.Time - state TargetState + health TargetHealth mu sync.RWMutex } @@ -151,11 +114,11 @@ func (ts *TargetStatus) LastScrape() time.Time { return ts.lastScrape } -// State returns the last known health state of the target. -func (ts *TargetStatus) State() TargetState { +// Health returns the last known health state of the target. +func (ts *TargetStatus) Health() TargetHealth { ts.mu.RLock() defer ts.mu.RUnlock() - return ts.state + return ts.health } func (ts *TargetStatus) setLastScrape(t time.Time) { @@ -168,15 +131,20 @@ func (ts *TargetStatus) setLastError(err error) { ts.mu.Lock() defer ts.mu.Unlock() if err == nil { - ts.state = Healthy + ts.health = HealthGood } else { - ts.state = Unhealthy + ts.health = HealthBad } ts.lastError = err } -// target is a Target that refers to a singular HTTP or HTTPS endpoint. -type target struct { +// Target refers to a singular HTTP or HTTPS endpoint. +type Target struct { + // The status object for the target. It is only set once on initialization. + status *TargetStatus + // The HTTP client used to scrape the target's endpoint. + httpClient *http.Client + // Closing scraperStopping signals that scraping should stop. scraperStopping chan struct{} // Closing scraperStopped signals that scraping has been stopped. @@ -184,14 +152,9 @@ type target struct { // Channel to buffer ingested samples. ingestedSamples chan clientmodel.Samples - // The status object for the target. It is only set once on initialization. - status *TargetStatus - // The HTTP client used to scrape the target's endpoint. - httpClient *http.Client - // Mutex protects the members below. sync.RWMutex - + // url is the URL to be scraped. Its host is immutable. url *url.URL // Any base labels that are added to this target and its metrics. baseLabels clientmodel.LabelSet @@ -202,8 +165,8 @@ type target struct { } // NewTarget creates a reasonably configured target for querying. -func NewTarget(cfg *config.ScrapeConfig, baseLabels clientmodel.LabelSet) Target { - t := &target{ +func NewTarget(cfg *config.ScrapeConfig, baseLabels clientmodel.LabelSet) *Target { + t := &Target{ url: &url.URL{ Host: string(baseLabels[clientmodel.AddressLabel]), }, @@ -215,14 +178,14 @@ func NewTarget(cfg *config.ScrapeConfig, baseLabels clientmodel.LabelSet) Target return t } -// Status implements the Target interface. -func (t *target) Status() *TargetStatus { +// Status returns the status of the target. +func (t *Target) Status() *TargetStatus { return t.status } // Update overwrites settings in the target that are derived from the job config // it belongs to. -func (t *target) Update(cfg *config.ScrapeConfig, baseLabels clientmodel.LabelSet) { +func (t *Target) Update(cfg *config.ScrapeConfig, baseLabels clientmodel.LabelSet) { t.Lock() defer t.Unlock() @@ -248,12 +211,15 @@ func (t *target) Update(cfg *config.ScrapeConfig, baseLabels clientmodel.LabelSe } } -func (t *target) String() string { +func (t *Target) String() string { return t.url.Host } -// Ingest implements Target and extraction.Ingester. -func (t *target) Ingest(s clientmodel.Samples) error { +// Ingest implements an extraction.Ingester. +func (t *Target) Ingest(s clientmodel.Samples) error { + t.RLock() + deadline := t.deadline + t.RUnlock() // Since the regular case is that ingestedSamples is ready to receive, // first try without setting a timeout so that we don't need to allocate // a timer most of the time. @@ -264,14 +230,17 @@ func (t *target) Ingest(s clientmodel.Samples) error { select { case t.ingestedSamples <- s: return nil - case <-time.After(t.deadline / 10): + case <-time.After(deadline / 10): return errIngestChannelFull } } } +// Ensure that Target implements extraction.Ingester at compile time. +var _ extraction.Ingester = (*Target)(nil) + // RunScraper implements Target. -func (t *target) RunScraper(sampleAppender storage.SampleAppender) { +func (t *Target) RunScraper(sampleAppender storage.SampleAppender) { defer close(t.scraperStopped) t.RLock() @@ -316,7 +285,7 @@ func (t *target) RunScraper(sampleAppender storage.SampleAppender) { intervalStr := lastScrapeInterval.String() - t.Lock() + t.RLock() // On changed scrape interval the new interval becomes effective // after the next scrape. if lastScrapeInterval != t.scrapeInterval { @@ -324,7 +293,7 @@ func (t *target) RunScraper(sampleAppender storage.SampleAppender) { ticker = time.NewTicker(t.scrapeInterval) lastScrapeInterval = t.scrapeInterval } - t.Unlock() + t.RUnlock() targetIntervalLength.WithLabelValues(intervalStr).Observe( float64(took) / float64(time.Second), // Sub-second precision. @@ -336,7 +305,7 @@ func (t *target) RunScraper(sampleAppender storage.SampleAppender) { } // StopScraper implements Target. -func (t *target) StopScraper() { +func (t *Target) StopScraper() { glog.V(1).Infof("Stopping scraper for target %v...", t) close(t.scraperStopping) @@ -347,18 +316,16 @@ func (t *target) StopScraper() { const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,application/json;schema="prometheus/telemetry";version=0.0.2;q=0.2,*/*;q=0.1` -func (t *target) scrape(sampleAppender storage.SampleAppender) (err error) { - t.RLock() +func (t *Target) scrape(sampleAppender storage.SampleAppender) (err error) { start := time.Now() + baseLabels := t.BaseLabels() defer func() { - t.RUnlock() - t.status.setLastError(err) - t.recordScrapeHealth(sampleAppender, clientmodel.TimestampFromTime(start), time.Since(start)) + recordScrapeHealth(sampleAppender, clientmodel.TimestampFromTime(start), baseLabels, t.status.Health(), time.Since(start)) }() - req, err := http.NewRequest("GET", t.url.String(), nil) + req, err := http.NewRequest("GET", t.URL(), nil) if err != nil { panic(err) } @@ -390,7 +357,7 @@ func (t *target) scrape(sampleAppender storage.SampleAppender) (err error) { for samples := range t.ingestedSamples { for _, s := range samples { - s.Metric.MergeFromLabelSet(t.baseLabels, clientmodel.ExporterLabelPrefix) + s.Metric.MergeFromLabelSet(baseLabels, clientmodel.ExporterLabelPrefix) sampleAppender.Append(s) } } @@ -398,19 +365,19 @@ func (t *target) scrape(sampleAppender storage.SampleAppender) (err error) { } // URL implements Target. -func (t *target) URL() string { +func (t *Target) URL() string { t.RLock() defer t.RUnlock() return t.url.String() } -// InstanceIdentifier implements Target. -func (t *target) InstanceIdentifier() string { +// InstanceIdentifier returns the identifier for the target. +func (t *Target) InstanceIdentifier() string { return t.url.Host } -// fullLabels implements Target. -func (t *target) fullLabels() clientmodel.LabelSet { +// fullLabels returns the base labels plus internal labels defining the target. +func (t *Target) fullLabels() clientmodel.LabelSet { t.RLock() defer t.RUnlock() lset := make(clientmodel.LabelSet, len(t.baseLabels)+2) @@ -422,8 +389,8 @@ func (t *target) fullLabels() clientmodel.LabelSet { return lset } -// BaseLabels implements Target. -func (t *target) BaseLabels() clientmodel.LabelSet { +// BaseLabels returns a copy of the target's base labels. +func (t *Target) BaseLabels() clientmodel.LabelSet { t.RLock() defer t.RUnlock() lset := make(clientmodel.LabelSet, len(t.baseLabels)) @@ -433,22 +400,26 @@ func (t *target) BaseLabels() clientmodel.LabelSet { return lset } -func (t *target) recordScrapeHealth(sampleAppender storage.SampleAppender, timestamp clientmodel.Timestamp, scrapeDuration time.Duration) { - t.RLock() - healthMetric := make(clientmodel.Metric, len(t.baseLabels)+1) - durationMetric := make(clientmodel.Metric, len(t.baseLabels)+1) +func recordScrapeHealth( + sampleAppender storage.SampleAppender, + timestamp clientmodel.Timestamp, + baseLabels clientmodel.LabelSet, + health TargetHealth, + scrapeDuration time.Duration, +) { + healthMetric := make(clientmodel.Metric, len(baseLabels)+1) + durationMetric := make(clientmodel.Metric, len(baseLabels)+1) healthMetric[clientmodel.MetricNameLabel] = clientmodel.LabelValue(scrapeHealthMetricName) durationMetric[clientmodel.MetricNameLabel] = clientmodel.LabelValue(scrapeDurationMetricName) - for label, value := range t.baseLabels { + for label, value := range baseLabels { healthMetric[label] = value durationMetric[label] = value } - t.RUnlock() healthValue := clientmodel.SampleValue(0) - if t.status.State() == Healthy { + if health == HealthGood { healthValue = clientmodel.SampleValue(1) } diff --git a/retrieval/target_test.go b/retrieval/target_test.go index a70837791..6a411f86a 100644 --- a/retrieval/target_test.go +++ b/retrieval/target_test.go @@ -29,10 +29,6 @@ import ( "github.com/prometheus/prometheus/utility" ) -func TestTargetInterface(t *testing.T) { - var _ Target = &target{} -} - func TestBaseLabels(t *testing.T) { target := newTestTarget("example.com:80", 0, clientmodel.LabelSet{"job": "some_job", "foo": "bar"}) want := clientmodel.LabelSet{ @@ -50,8 +46,8 @@ func TestTargetScrapeUpdatesState(t *testing.T) { testTarget := newTestTarget("bad schema", 0, nil) testTarget.scrape(nopAppender{}) - if testTarget.status.State() != Unhealthy { - t.Errorf("Expected target state %v, actual: %v", Unhealthy, testTarget.status.State()) + if testTarget.status.Health() != HealthBad { + t.Errorf("Expected target state %v, actual: %v", HealthBad, testTarget.status.Health()) } } @@ -73,8 +69,8 @@ func TestTargetScrapeWithFullChannel(t *testing.T) { testTarget := newTestTarget(server.URL, 10*time.Millisecond, clientmodel.LabelSet{"dings": "bums"}) testTarget.scrape(slowAppender{}) - if testTarget.status.State() != Unhealthy { - t.Errorf("Expected target state %v, actual: %v", Unhealthy, testTarget.status.State()) + if testTarget.status.Health() != HealthBad { + t.Errorf("Expected target state %v, actual: %v", HealthBad, testTarget.status.Health()) } if testTarget.status.LastError() != errIngestChannelFull { t.Errorf("Expected target error %q, actual: %q", errIngestChannelFull, testTarget.status.LastError()) @@ -86,7 +82,8 @@ func TestTargetRecordScrapeHealth(t *testing.T) { now := clientmodel.Now() appender := &collectResultAppender{} - testTarget.recordScrapeHealth(appender, now, 2*time.Second) + testTarget.status.setLastError(nil) + recordScrapeHealth(appender, now, testTarget.BaseLabels(), testTarget.status.Health(), 2*time.Second) result := appender.result @@ -138,13 +135,13 @@ func TestTargetScrapeTimeout(t *testing.T) { ) defer server.Close() - var testTarget Target = newTestTarget(server.URL, 10*time.Millisecond, clientmodel.LabelSet{}) + testTarget := newTestTarget(server.URL, 10*time.Millisecond, clientmodel.LabelSet{}) appender := nopAppender{} // scrape once without timeout signal <- true - if err := testTarget.(*target).scrape(appender); err != nil { + if err := testTarget.scrape(appender); err != nil { t.Fatal(err) } @@ -153,12 +150,12 @@ func TestTargetScrapeTimeout(t *testing.T) { // now scrape again signal <- true - if err := testTarget.(*target).scrape(appender); err != nil { + if err := testTarget.scrape(appender); err != nil { t.Fatal(err) } // now timeout - if err := testTarget.(*target).scrape(appender); err == nil { + if err := testTarget.scrape(appender); err == nil { t.Fatal("expected scrape to timeout") } else { signal <- true // let handler continue @@ -166,7 +163,7 @@ func TestTargetScrapeTimeout(t *testing.T) { // now scrape again without timeout signal <- true - if err := testTarget.(*target).scrape(appender); err != nil { + if err := testTarget.scrape(appender); err != nil { t.Fatal(err) } } @@ -224,28 +221,26 @@ func BenchmarkScrape(b *testing.B) { ) defer server.Close() - var testTarget Target = newTestTarget(server.URL, 100*time.Millisecond, clientmodel.LabelSet{"dings": "bums"}) + testTarget := newTestTarget(server.URL, 100*time.Millisecond, clientmodel.LabelSet{"dings": "bums"}) appender := nopAppender{} b.ResetTimer() for i := 0; i < b.N; i++ { - if err := testTarget.(*target).scrape(appender); err != nil { + if err := testTarget.scrape(appender); err != nil { b.Fatal(err) } } } -func newTestTarget(targetURL string, deadline time.Duration, baseLabels clientmodel.LabelSet) *target { - t := &target{ +func newTestTarget(targetURL string, deadline time.Duration, baseLabels clientmodel.LabelSet) *Target { + t := &Target{ url: &url.URL{ Scheme: "http", Host: strings.TrimLeft(targetURL, "http://"), Path: "/metrics", }, - deadline: deadline, - status: &TargetStatus{ - state: Healthy, - }, + deadline: deadline, + status: &TargetStatus{}, scrapeInterval: 1 * time.Millisecond, httpClient: utility.NewDeadlineClient(deadline), scraperStopping: make(chan struct{}), diff --git a/retrieval/targetmanager.go b/retrieval/targetmanager.go index 8b71790bc..7763a5604 100644 --- a/retrieval/targetmanager.go +++ b/retrieval/targetmanager.go @@ -56,7 +56,7 @@ type TargetManager struct { running bool // Targets by their source ID. - targets map[string][]Target + targets map[string][]*Target // Providers by the scrape configs they are derived from. providers map[*config.ScrapeConfig][]TargetProvider } @@ -65,7 +65,7 @@ type TargetManager struct { func NewTargetManager(sampleAppender storage.SampleAppender) *TargetManager { tm := &TargetManager{ sampleAppender: sampleAppender, - targets: make(map[string][]Target), + targets: make(map[string][]*Target), } return tm } @@ -165,7 +165,7 @@ func (tm *TargetManager) removeTargets(f func(string) bool) { } wg.Add(len(targets)) for _, target := range targets { - go func(t Target) { + go func(t *Target) { t.StopScraper() wg.Done() }(target) @@ -197,7 +197,7 @@ func (tm *TargetManager) updateTargetGroup(tgroup *config.TargetGroup, cfg *conf // Replace the old targets with the new ones while keeping the state // of intersecting targets. for i, tnew := range newTargets { - var match Target + var match *Target for j, told := range oldTargets { if told == nil { continue @@ -214,7 +214,7 @@ func (tm *TargetManager) updateTargetGroup(tgroup *config.TargetGroup, cfg *conf // Updating is blocked during a scrape. We don't want those wait times // to build up. wg.Add(1) - go func(t Target) { + go func(t *Target) { match.Update(cfg, t.fullLabels()) wg.Done() }(tnew) @@ -227,7 +227,7 @@ func (tm *TargetManager) updateTargetGroup(tgroup *config.TargetGroup, cfg *conf for _, told := range oldTargets { if told != nil { wg.Add(1) - go func(t Target) { + go func(t *Target) { t.StopScraper() wg.Done() }(told) @@ -250,11 +250,11 @@ func (tm *TargetManager) updateTargetGroup(tgroup *config.TargetGroup, cfg *conf } // Pools returns the targets currently being scraped bucketed by their job name. -func (tm *TargetManager) Pools() map[string][]Target { +func (tm *TargetManager) Pools() map[string][]*Target { tm.m.RLock() defer tm.m.RUnlock() - pools := map[string][]Target{} + pools := map[string][]*Target{} for _, ts := range tm.targets { for _, t := range ts { @@ -287,11 +287,11 @@ func (tm *TargetManager) ApplyConfig(cfg *config.Config) { } // targetsFromGroup builds targets based on the given TargetGroup and config. -func (tm *TargetManager) targetsFromGroup(tg *config.TargetGroup, cfg *config.ScrapeConfig) ([]Target, error) { +func (tm *TargetManager) targetsFromGroup(tg *config.TargetGroup, cfg *config.ScrapeConfig) ([]*Target, error) { tm.m.RLock() defer tm.m.RUnlock() - targets := make([]Target, 0, len(tg.Targets)) + targets := make([]*Target, 0, len(tg.Targets)) for i, labels := range tg.Targets { addr := string(labels[clientmodel.AddressLabel]) // If no port was provided, infer it based on the used scheme. diff --git a/retrieval/targetmanager_test.go b/retrieval/targetmanager_test.go index f6aea2a40..aaa251539 100644 --- a/retrieval/targetmanager_test.go +++ b/retrieval/targetmanager_test.go @@ -45,7 +45,7 @@ func TestTargetManagerChan(t *testing.T) { providers: map[*config.ScrapeConfig][]TargetProvider{ testJob1: []TargetProvider{prov1}, }, - targets: make(map[string][]Target), + targets: make(map[string][]*Target), } go targetManager.Run() defer targetManager.Stop() diff --git a/web/status.go b/web/status.go index 6840505b9..f536f5561 100644 --- a/web/status.go +++ b/web/status.go @@ -32,18 +32,18 @@ type PrometheusStatusHandler struct { Flags map[string]string RuleManager *rules.Manager - TargetPools func() map[string][]retrieval.Target + TargetPools func() map[string][]*retrieval.Target Birth time.Time PathPrefix string } -// TargetStateToClass returns a map of TargetState to the name of a Bootstrap CSS class. -func (h *PrometheusStatusHandler) TargetStateToClass() map[retrieval.TargetState]string { - return map[retrieval.TargetState]string{ - retrieval.Unknown: "warning", - retrieval.Healthy: "success", - retrieval.Unhealthy: "danger", +// TargetHealthToClass returns a map of TargetHealth to the name of a Bootstrap CSS class. +func (h *PrometheusStatusHandler) TargetHealthToClass() map[retrieval.TargetHealth]string { + return map[retrieval.TargetHealth]string{ + retrieval.HealthUnknown: "warning", + retrieval.HealthBad: "success", + retrieval.HealthGood: "danger", } } diff --git a/web/templates/status.html b/web/templates/status.html index ba37f458b..c9609e786 100644 --- a/web/templates/status.html +++ b/web/templates/status.html @@ -32,7 +32,6 @@

Targets

- {{$stateToClass := .TargetStateToClass}} {{range $job, $pool := call .TargetPools}} @@ -51,7 +50,7 @@ {{.URL}} diff --git a/web/web.go b/web/web.go index 1548046c3..ff0917b2d 100644 --- a/web/web.go +++ b/web/web.go @@ -193,7 +193,7 @@ func getTemplate(name string, pathPrefix string) (*template.Template, error) { file, err = getTemplateFile(name) if err != nil { - glog.Error("Could not read template %d: ", name, err) + glog.Error("Could not read template %s: %s", name, err) return nil, err } t, err = t.Parse(file) From 8de50619f170203e21f1ed3d5bd9ba6e80fefaee Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Tue, 19 May 2015 11:30:42 +0200 Subject: [PATCH 19/27] Increase target test wait times On slow systems such as Travis CI occasionally the tests fail because the wait times are too short. --- retrieval/target_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/retrieval/target_test.go b/retrieval/target_test.go index 6a411f86a..9a5ad481d 100644 --- a/retrieval/target_test.go +++ b/retrieval/target_test.go @@ -135,7 +135,7 @@ func TestTargetScrapeTimeout(t *testing.T) { ) defer server.Close() - testTarget := newTestTarget(server.URL, 10*time.Millisecond, clientmodel.LabelSet{}) + testTarget := newTestTarget(server.URL, 25*time.Millisecond, clientmodel.LabelSet{}) appender := nopAppender{} @@ -146,7 +146,7 @@ func TestTargetScrapeTimeout(t *testing.T) { } // let the deadline lapse - time.Sleep(15 * time.Millisecond) + time.Sleep(30 * time.Millisecond) // now scrape again signal <- true @@ -194,17 +194,17 @@ func TestTargetRunScraperScrapes(t *testing.T) { go testTarget.RunScraper(nopAppender{}) // Enough time for a scrape to happen. - time.Sleep(2 * time.Millisecond) + time.Sleep(10 * time.Millisecond) if testTarget.status.LastScrape().IsZero() { t.Errorf("Scrape hasn't occured.") } testTarget.StopScraper() // Wait for it to take effect. - time.Sleep(2 * time.Millisecond) + time.Sleep(5 * time.Millisecond) last := testTarget.status.LastScrape() // Enough time for a scrape to happen. - time.Sleep(2 * time.Millisecond) + time.Sleep(10 * time.Millisecond) if testTarget.status.LastScrape() != last { t.Errorf("Scrape occured after it was stopped.") } From d8440d75f13a09109735d6f69320bf77dc87ef6a Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Mon, 18 May 2015 19:26:28 +0200 Subject: [PATCH 20/27] Do not start storage processing before Start() is called. --- main.go | 24 ++++++++++++++---------- storage/local/interface.go | 2 +- storage/local/persistence.go | 5 ++++- storage/local/persistence_test.go | 1 + storage/local/storage.go | 27 +++++++++++++++++---------- storage/local/storage_test.go | 12 ++++++------ storage/local/test_helpers.go | 6 ++---- 7 files changed, 45 insertions(+), 32 deletions(-) diff --git a/main.go b/main.go index 47d94b46c..99b005547 100644 --- a/main.go +++ b/main.go @@ -118,11 +118,7 @@ func NewPrometheus() *prometheus { PedanticChecks: *storagePedanticChecks, SyncStrategy: syncStrategy, } - memStorage, err := local.NewMemorySeriesStorage(o) - if err != nil { - glog.Error("Error opening memory series storage: ", err) - os.Exit(1) - } + memStorage := local.NewMemorySeriesStorage(o) var sampleAppender storage.SampleAppender var remoteStorageQueues []*remote.StorageQueueManager @@ -213,38 +209,47 @@ func NewPrometheus() *prometheus { } webService.QuitChan = make(chan struct{}) - p.reloadConfig() + if !p.reloadConfig() { + os.Exit(1) + } return p } -func (p *prometheus) reloadConfig() { +func (p *prometheus) reloadConfig() bool { glog.Infof("Loading configuration file %s", *configFile) conf, err := config.LoadFromFile(*configFile) if err != nil { glog.Errorf("Couldn't load configuration (-config.file=%s): %v", *configFile, err) glog.Errorf("Note: The configuration format has changed with version 0.14, please check the documentation.") - return + return false } p.webService.StatusHandler.ApplyConfig(conf) p.targetManager.ApplyConfig(conf) p.ruleManager.ApplyConfig(conf) + + return true } // Serve starts the Prometheus server. It returns after the server has been shut // down. The method installs an interrupt handler, allowing to trigger a // shutdown by sending SIGTERM to the process. func (p *prometheus) Serve() { + if err := p.storage.Start(); err != nil { + glog.Error("Error opening memory series storage: ", err) + os.Exit(1) + } for _, q := range p.remoteStorageQueues { go q.Run() } + go p.ruleManager.Run() go p.notificationHandler.Run() go p.targetManager.Run() - p.storage.Start() + registry.MustRegister(p) go func() { err := p.webService.ServeForever(*pathPrefix) @@ -387,6 +392,5 @@ func main() { } p := NewPrometheus() - registry.MustRegister(p) p.Serve() } diff --git a/storage/local/interface.go b/storage/local/interface.go index 34ed30c7b..60df2d2f7 100644 --- a/storage/local/interface.go +++ b/storage/local/interface.go @@ -48,7 +48,7 @@ type Storage interface { // Run the various maintenance loops in goroutines. Returns when the // storage is ready to use. Keeps everything running in the background // until Stop is called. - Start() + Start() error // Stop shuts down the Storage gracefully, flushes all pending // operations, stops all maintenance loops,and frees all resources. Stop() error diff --git a/storage/local/persistence.go b/storage/local/persistence.go index 7b2a34cec..96d0adf08 100644 --- a/storage/local/persistence.go +++ b/storage/local/persistence.go @@ -268,10 +268,13 @@ func newPersistence(basePath string, dirty, pedanticChecks bool, shouldSync sync p.labelPairToFingerprints = labelPairToFingerprints p.labelNameToLabelValues = labelNameToLabelValues - go p.processIndexingQueue() return p, nil } +func (p *persistence) run() { + p.processIndexingQueue() +} + // Describe implements prometheus.Collector. func (p *persistence) Describe(ch chan<- *prometheus.Desc) { ch <- p.indexingQueueLength.Desc() diff --git a/storage/local/persistence_test.go b/storage/local/persistence_test.go index 20bfe6216..00e36b792 100644 --- a/storage/local/persistence_test.go +++ b/storage/local/persistence_test.go @@ -42,6 +42,7 @@ func newTestPersistence(t *testing.T, encoding chunkEncoding) (*persistence, tes dir.Close() t.Fatal(err) } + go p.run() return p, test.NewCallbackCloser(func() { p.close() dir.Close() diff --git a/storage/local/storage.go b/storage/local/storage.go index c6db45b2b..56371ca77 100644 --- a/storage/local/storage.go +++ b/storage/local/storage.go @@ -82,6 +82,8 @@ type memorySeriesStorage struct { fpLocker *fingerprintLocker fpToSeries *seriesMap + options *MemorySeriesStorageOptions + loopStopping, loopStopped chan struct{} maxMemoryChunks int dropAfter time.Duration @@ -124,10 +126,12 @@ type MemorySeriesStorageOptions struct { // NewMemorySeriesStorage returns a newly allocated Storage. Storage.Serve still // has to be called to start the storage. -func NewMemorySeriesStorage(o *MemorySeriesStorageOptions) (Storage, error) { +func NewMemorySeriesStorage(o *MemorySeriesStorageOptions) Storage { s := &memorySeriesStorage{ fpLocker: newFingerprintLocker(1024), + options: o, + loopStopping: make(chan struct{}), loopStopped: make(chan struct{}), maxMemoryChunks: o.MemoryChunks, @@ -185,9 +189,13 @@ func NewMemorySeriesStorage(o *MemorySeriesStorageOptions) (Storage, error) { []string{seriesLocationLabel}, ), } + return s +} +// Start implements Storage. +func (s *memorySeriesStorage) Start() error { var syncStrategy syncStrategy - switch o.SyncStrategy { + switch s.options.SyncStrategy { case Never: syncStrategy = func() bool { return false } case Always: @@ -198,33 +206,32 @@ func NewMemorySeriesStorage(o *MemorySeriesStorageOptions) (Storage, error) { panic("unknown sync strategy") } - p, err := newPersistence(o.PersistenceStoragePath, o.Dirty, o.PedanticChecks, syncStrategy) + p, err := newPersistence(s.options.PersistenceStoragePath, s.options.Dirty, s.options.PedanticChecks, syncStrategy) if err != nil { - return nil, err + return err } s.persistence = p glog.Info("Loading series map and head chunks...") s.fpToSeries, s.numChunksToPersist, err = p.loadSeriesMapAndHeads() if err != nil { - return nil, err + return err } glog.Infof("%d series loaded.", s.fpToSeries.length()) s.numSeries.Set(float64(s.fpToSeries.length())) mapper, err := newFPMapper(s.fpToSeries, p) if err != nil { - return nil, err + return err } s.mapper = mapper - return s, nil -} + go s.persistence.run() -// Start implements Storage. -func (s *memorySeriesStorage) Start() { go s.handleEvictList() go s.loop() + + return nil } // Stop implements Storage. diff --git a/storage/local/storage_test.go b/storage/local/storage_test.go index 8ae4d3507..16ec17b01 100644 --- a/storage/local/storage_test.go +++ b/storage/local/storage_test.go @@ -163,9 +163,9 @@ func TestLoop(t *testing.T) { CheckpointInterval: 250 * time.Millisecond, SyncStrategy: Adaptive, } - storage, err := NewMemorySeriesStorage(o) - if err != nil { - t.Fatalf("Error creating storage: %s", err) + storage := NewMemorySeriesStorage(o) + if err := storage.Start; err != nil { + t.Fatalf("Error starting storage: %s", err) } storage.Start() for _, s := range samples { @@ -731,9 +731,9 @@ func benchmarkFuzz(b *testing.B, encoding chunkEncoding) { CheckpointInterval: time.Second, SyncStrategy: Adaptive, } - s, err := NewMemorySeriesStorage(o) - if err != nil { - b.Fatalf("Error creating storage: %s", err) + s := NewMemorySeriesStorage(o) + if err := s.Start(); err != nil { + b.Fatalf("Error starting storage: %s", err) } s.Start() defer s.Stop() diff --git a/storage/local/test_helpers.go b/storage/local/test_helpers.go index a645bd1f5..21f6aa008 100644 --- a/storage/local/test_helpers.go +++ b/storage/local/test_helpers.go @@ -48,14 +48,12 @@ func NewTestStorage(t test.T, encoding chunkEncoding) (*memorySeriesStorage, tes CheckpointInterval: time.Hour, SyncStrategy: Adaptive, } - storage, err := NewMemorySeriesStorage(o) - if err != nil { + storage := NewMemorySeriesStorage(o) + if err := storage.Start(); err != nil { directory.Close() t.Fatalf("Error creating storage: %s", err) } - storage.Start() - closer := &testStorageCloser{ storage: storage, directory: directory, From 5d3024fd3ed97747d477fca23e7e77f7509f001b Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Tue, 19 May 2015 14:38:50 +0200 Subject: [PATCH 21/27] Restructure component initialization --- main.go | 35 +++++++++++++++-------------------- web/web.go | 13 ++++++++++--- 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/main.go b/main.go index 99b005547..8d596756a 100644 --- a/main.go +++ b/main.go @@ -237,27 +237,35 @@ func (p *prometheus) reloadConfig() bool { // down. The method installs an interrupt handler, allowing to trigger a // shutdown by sending SIGTERM to the process. func (p *prometheus) Serve() { + // Start all components. if err := p.storage.Start(); err != nil { glog.Error("Error opening memory series storage: ", err) os.Exit(1) } + defer p.storage.Stop() + + // The storage has to be fully initialized before registering Prometheus. + registry.MustRegister(p) + for _, q := range p.remoteStorageQueues { go q.Run() + defer q.Stop() } go p.ruleManager.Run() + defer p.ruleManager.Stop() + go p.notificationHandler.Run() + defer p.notificationHandler.Stop() + go p.targetManager.Run() + defer p.targetManager.Stop() - registry.MustRegister(p) + defer p.queryEngine.Stop() - go func() { - err := p.webService.ServeForever(*pathPrefix) - if err != nil { - glog.Fatal(err) - } - }() + go p.webService.ServeForever(*pathPrefix) + // Wait for reload or termination signals. hup := make(chan os.Signal) signal.Notify(hup, syscall.SIGHUP) go func() { @@ -277,19 +285,6 @@ func (p *prometheus) Serve() { close(hup) - p.targetManager.Stop() - p.ruleManager.Stop() - p.queryEngine.Stop() - - if err := p.storage.Stop(); err != nil { - glog.Error("Error stopping local storage: ", err) - } - - for _, q := range p.remoteStorageQueues { - q.Stop() - } - - p.notificationHandler.Stop() glog.Info("See you next time!") } diff --git a/web/web.go b/web/web.go index bd53b0de5..1857fe6ff 100644 --- a/web/web.go +++ b/web/web.go @@ -54,7 +54,7 @@ type WebService struct { } // ServeForever serves the HTTP endpoints and only returns upon errors. -func (ws WebService) ServeForever(pathPrefix string) error { +func (ws WebService) ServeForever(pathPrefix string) { http.Handle("/favicon.ico", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.Error(w, "", 404) @@ -104,9 +104,16 @@ func (ws WebService) ServeForever(pathPrefix string) error { })) } - glog.Info("listening on ", *listenAddress) + glog.Infof("Listening on %s", *listenAddress) - return http.ListenAndServe(*listenAddress, nil) + // If we cannot bind to a port, retry after 30 seconds. + for { + err := http.ListenAndServe(*listenAddress, nil) + if err != nil { + glog.Errorf("Could not listen on %s: %s", *listenAddress, err) + } + time.Sleep(30 * time.Second) + } } func (ws WebService) quitHandler(w http.ResponseWriter, r *http.Request) { From ec9c51ab98d882fb642d7477594ade59dcb1d2b2 Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Tue, 19 May 2015 15:38:51 +0200 Subject: [PATCH 22/27] Fix old template naming --- web/status.go | 4 ++-- web/templates/status.html | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/web/status.go b/web/status.go index f536f5561..6f92c51c4 100644 --- a/web/status.go +++ b/web/status.go @@ -42,8 +42,8 @@ type PrometheusStatusHandler struct { func (h *PrometheusStatusHandler) TargetHealthToClass() map[retrieval.TargetHealth]string { return map[retrieval.TargetHealth]string{ retrieval.HealthUnknown: "warning", - retrieval.HealthBad: "success", - retrieval.HealthGood: "danger", + retrieval.HealthGood: "success", + retrieval.HealthBad: "danger", } } diff --git a/web/templates/status.html b/web/templates/status.html index c9609e786..3ee4b0ef7 100644 --- a/web/templates/status.html +++ b/web/templates/status.html @@ -32,6 +32,7 @@

Targets

{{$job}}
- + {{.Status.State}}
+ {{$healthToClass := .TargetHealthToClass}} {{range $job, $pool := call .TargetPools}} @@ -50,8 +51,8 @@ {{.URL}}
{{$job}}
- - {{.Status.State}} + + {{.Status.Health}} From ff832d2e0306d521712975814cc693580ad70dd0 Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Tue, 19 May 2015 15:24:26 +0200 Subject: [PATCH 23/27] Attach __meta_filepath label to file SD targets. --- retrieval/discovery/file.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/retrieval/discovery/file.go b/retrieval/discovery/file.go index 7e16b5f66..a439d6874 100644 --- a/retrieval/discovery/file.go +++ b/retrieval/discovery/file.go @@ -26,8 +26,12 @@ import ( "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/config" + + clientmodel "github.com/prometheus/client_golang/model" ) +const FileSDFilepathLabel = clientmodel.MetaLabelPrefix + "filepath" + // FileDiscovery provides service discovery functionality based // on files that contain target groups in JSON or YAML format. Refreshing // happens using file watches and periodic refreshes. @@ -196,7 +200,7 @@ func fileSource(filename string, i int) string { // Stop implements the TargetProvider interface. func (fd *FileDiscovery) Stop() { - glog.V(1).Info("Stopping file discovery for %s...", fd.paths) + glog.V(1).Infof("Stopping file discovery for %s...", fd.paths) fd.done <- struct{}{} // Closing the watcher will deadlock unless all events and errors are drained. @@ -215,7 +219,7 @@ func (fd *FileDiscovery) Stop() { fd.done <- struct{}{} - glog.V(1).Info("File discovery for %s stopped.", fd.paths) + glog.V(1).Infof("File discovery for %s stopped.", fd.paths) } // readFile reads a JSON or YAML list of targets groups from the file, depending on its @@ -243,6 +247,9 @@ func readFile(filename string) ([]*config.TargetGroup, error) { for i, tg := range targetGroups { tg.Source = fileSource(filename, i) + tg.Labels = clientmodel.LabelSet{ + FileSDFilepathLabel: clientmodel.LabelValue(filename), + } } return targetGroups, nil } From a703241bf8fb792f2bfe893b6675264538ecf704 Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Tue, 19 May 2015 18:12:58 +0200 Subject: [PATCH 24/27] Adjust example config to naming changes --- documentation/examples/prometheus.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/documentation/examples/prometheus.yml b/documentation/examples/prometheus.yml index 075645395..50857a991 100644 --- a/documentation/examples/prometheus.yml +++ b/documentation/examples/prometheus.yml @@ -1,12 +1,12 @@ # my global config -global_config: +global: scrape_interval: 15s # By default, scrape targets every 15 seconds. evaluation_interval: 15s # By default, scrape targets every 15 seconds. # scrape_timeout is set to the global default (10s). # Attach these extra labels to all timeseries collected by this Prometheus instance. labels: - monitor: codelab-monitor + monitor: 'codelab-monitor' # Load and evaluate rules in this file every 'evaluation_interval' seconds. rule_files: @@ -17,7 +17,7 @@ rule_files: # Here it's Prometheus itself. scrape_configs: # The job name is added as a label `job=` to any timeseries scraped from this config. - - job_name: prometheus + - job_name: 'prometheus' # Override the global default and scrape targets from this job every 5 seconds. scrape_interval: 5s From ab4e3ee594e0361c51c2b17f0d0c38ad142be4af Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Wed, 13 May 2015 23:43:18 +0200 Subject: [PATCH 25/27] Add consul api package to godeps. --- Godeps/Godeps.json | 4 + .../github.com/hashicorp/consul/api/README.md | 39 ++ .../github.com/hashicorp/consul/api/acl.go | 140 +++++ .../hashicorp/consul/api/acl_test.go | 152 +++++ .../github.com/hashicorp/consul/api/agent.go | 334 +++++++++++ .../hashicorp/consul/api/agent_test.go | 524 ++++++++++++++++++ .../github.com/hashicorp/consul/api/api.go | 442 +++++++++++++++ .../hashicorp/consul/api/api_test.go | 242 ++++++++ .../hashicorp/consul/api/catalog.go | 182 ++++++ .../hashicorp/consul/api/catalog_test.go | 279 ++++++++++ .../github.com/hashicorp/consul/api/event.go | 104 ++++ .../hashicorp/consul/api/event_test.go | 49 ++ .../github.com/hashicorp/consul/api/health.go | 136 +++++ .../hashicorp/consul/api/health_test.go | 125 +++++ .../src/github.com/hashicorp/consul/api/kv.go | 236 ++++++++ .../hashicorp/consul/api/kv_test.go | 439 +++++++++++++++ .../github.com/hashicorp/consul/api/lock.go | 326 +++++++++++ .../hashicorp/consul/api/lock_test.go | 363 ++++++++++++ .../github.com/hashicorp/consul/api/raw.go | 24 + .../hashicorp/consul/api/semaphore.go | 482 ++++++++++++++++ .../hashicorp/consul/api/semaphore_test.go | 313 +++++++++++ .../hashicorp/consul/api/session.go | 201 +++++++ .../hashicorp/consul/api/session_test.go | 205 +++++++ .../github.com/hashicorp/consul/api/status.go | 43 ++ .../hashicorp/consul/api/status_test.go | 37 ++ 25 files changed, 5421 insertions(+) create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/health_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/kv_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/lock_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/raw.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/session.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/session_test.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/status.go create mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/status_test.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index e4cf26b4f..d662427b0 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -19,6 +19,10 @@ "ImportPath": "github.com/golang/protobuf/proto", "Rev": "655cdfa588ea190e901bc5590e65d5621688847c" }, + { + "ImportPath": "github.com/hashicorp/consul/api", + "Rev": "9fb235a98d8e88f7857b21bb2dd3efc428c01427", + }, { "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil", "Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a" diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md b/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md new file mode 100644 index 000000000..bce2ebb51 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md @@ -0,0 +1,39 @@ +Consul API client +================= + +This package provides the `api` package which attempts to +provide programmatic access to the full Consul API. + +Currently, all of the Consul APIs included in version 0.3 are supported. + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/consul/api) + +Usage +===== + +Below is an example of using the Consul client: + +```go +// Get a new client, with KV endpoints +client, _ := api.NewClient(api.DefaultConfig()) +kv := client.KV() + +// PUT a new KV pair +p := &api.KVPair{Key: "foo", Value: []byte("test")} +_, err := kv.Put(p, nil) +if err != nil { + panic(err) +} + +// Lookup the pair +pair, _, err := kv.Get("foo", nil) +if err != nil { + panic(err) +} +fmt.Printf("KV: %v", pair) + +``` + diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go new file mode 100644 index 000000000..c3fb0d53a --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go @@ -0,0 +1,140 @@ +package api + +const ( + // ACLCLientType is the client type token + ACLClientType = "client" + + // ACLManagementType is the management type token + ACLManagementType = "management" +) + +// ACLEntry is used to represent an ACL entry +type ACLEntry struct { + CreateIndex uint64 + ModifyIndex uint64 + ID string + Name string + Type string + Rules string +} + +// ACL can be used to query the ACL endpoints +type ACL struct { + c *Client +} + +// ACL returns a handle to the ACL endpoints +func (c *Client) ACL() *ACL { + return &ACL{c} +} + +// Create is used to generate a new token with the given parameters +func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/create") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update is used to update the rules of an existing token +func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/update") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Destroy is used to destroy a given ACL token ID +func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Clone is used to return a new token cloned from an existing one +func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Info is used to query for information about an ACL token +func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/info/"+id) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to get all the ACL tokens +func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/list") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go new file mode 100644 index 000000000..b896a1895 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go @@ -0,0 +1,152 @@ +package api + +import ( + "os" + "testing" +) + +// ROOT is a management token for the tests +var CONSUL_ROOT string + +func init() { + CONSUL_ROOT = os.Getenv("CONSUL_ROOT") +} + +func TestACL_CreateDestroy(t *testing.T) { + t.Parallel() + if CONSUL_ROOT == "" { + t.SkipNow() + } + c, s := makeClient(t) + defer s.Stop() + + c.config.Token = CONSUL_ROOT + acl := c.ACL() + + ae := ACLEntry{ + Name: "API test", + Type: ACLClientType, + Rules: `key "" { policy = "deny" }`, + } + + id, wm, err := acl.Create(&ae, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if wm.RequestTime == 0 { + t.Fatalf("bad: %v", wm) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + ae2, _, err := acl.Info(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if ae2.Name != ae.Name || ae2.Type != ae.Type || ae2.Rules != ae.Rules { + t.Fatalf("Bad: %#v", ae2) + } + + wm, err = acl.Destroy(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if wm.RequestTime == 0 { + t.Fatalf("bad: %v", wm) + } +} + +func TestACL_CloneDestroy(t *testing.T) { + t.Parallel() + if CONSUL_ROOT == "" { + t.SkipNow() + } + c, s := makeClient(t) + defer s.Stop() + + c.config.Token = CONSUL_ROOT + acl := c.ACL() + + id, wm, err := acl.Clone(CONSUL_ROOT, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if wm.RequestTime == 0 { + t.Fatalf("bad: %v", wm) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + wm, err = acl.Destroy(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if wm.RequestTime == 0 { + t.Fatalf("bad: %v", wm) + } +} + +func TestACL_Info(t *testing.T) { + t.Parallel() + if CONSUL_ROOT == "" { + t.SkipNow() + } + c, s := makeClient(t) + defer s.Stop() + + c.config.Token = CONSUL_ROOT + acl := c.ACL() + + ae, qm, err := acl.Info(CONSUL_ROOT, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } + + if ae == nil || ae.ID != CONSUL_ROOT || ae.Type != ACLManagementType { + t.Fatalf("bad: %#v", ae) + } +} + +func TestACL_List(t *testing.T) { + t.Parallel() + if CONSUL_ROOT == "" { + t.SkipNow() + } + c, s := makeClient(t) + defer s.Stop() + + c.config.Token = CONSUL_ROOT + acl := c.ACL() + + acls, qm, err := acl.List(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(acls) < 2 { + t.Fatalf("bad: %v", acls) + } + + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go new file mode 100644 index 000000000..e56a18dcd --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go @@ -0,0 +1,334 @@ +package api + +import ( + "fmt" +) + +// AgentCheck represents a check known to the agent +type AgentCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string +} + +// AgentService represents a service known to the agent +type AgentService struct { + ID string + Service string + Tags []string + Port int + Address string +} + +// AgentMember represents a cluster member known to the agent +type AgentMember struct { + Name string + Addr string + Port uint16 + Tags map[string]string + Status int + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// AgentServiceRegistration is used to register a new service +type AgentServiceRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Tags []string `json:",omitempty"` + Port int `json:",omitempty"` + Address string `json:",omitempty"` + Check *AgentServiceCheck + Checks AgentServiceChecks +} + +// AgentCheckRegistration is used to register a new check +type AgentCheckRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Notes string `json:",omitempty"` + ServiceID string `json:",omitempty"` + AgentServiceCheck +} + +// AgentServiceCheck is used to create an associated +// check for a service +type AgentServiceCheck struct { + Script string `json:",omitempty"` + Interval string `json:",omitempty"` + Timeout string `json:",omitempty"` + TTL string `json:",omitempty"` + HTTP string `json:",omitempty"` + Status string `json:",omitempty"` +} +type AgentServiceChecks []*AgentServiceCheck + +// Agent can be used to query the Agent endpoints +type Agent struct { + c *Client + + // cache the node name + nodeName string +} + +// Agent returns a handle to the agent endpoints +func (c *Client) Agent() *Agent { + return &Agent{c: c} +} + +// Self is used to query the agent we are speaking to for +// information about itself +func (a *Agent) Self() (map[string]map[string]interface{}, error) { + r := a.c.newRequest("GET", "/v1/agent/self") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]map[string]interface{} + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// NodeName is used to get the node name of the agent +func (a *Agent) NodeName() (string, error) { + if a.nodeName != "" { + return a.nodeName, nil + } + info, err := a.Self() + if err != nil { + return "", err + } + name := info["Config"]["NodeName"].(string) + a.nodeName = name + return name, nil +} + +// Checks returns the locally registered checks +func (a *Agent) Checks() (map[string]*AgentCheck, error) { + r := a.c.newRequest("GET", "/v1/agent/checks") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentCheck + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Services returns the locally registered services +func (a *Agent) Services() (map[string]*AgentService, error) { + r := a.c.newRequest("GET", "/v1/agent/services") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentService + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Members returns the known gossip members. The WAN +// flag can be used to query a server for WAN members. +func (a *Agent) Members(wan bool) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// ServiceRegister is used to register a new service with +// the local agent +func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/service/register") + r.obj = service + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ServiceDeregister is used to deregister a service with +// the local agent +func (a *Agent) ServiceDeregister(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// PassTTL is used to set a TTL check to the passing state +func (a *Agent) PassTTL(checkID, note string) error { + return a.UpdateTTL(checkID, note, "pass") +} + +// WarnTTL is used to set a TTL check to the warning state +func (a *Agent) WarnTTL(checkID, note string) error { + return a.UpdateTTL(checkID, note, "warn") +} + +// FailTTL is used to set a TTL check to the failing state +func (a *Agent) FailTTL(checkID, note string) error { + return a.UpdateTTL(checkID, note, "fail") +} + +// UpdateTTL is used to update the TTL of a check +func (a *Agent) UpdateTTL(checkID, note, status string) error { + switch status { + case "pass": + case "warn": + case "fail": + default: + return fmt.Errorf("Invalid status: %s", status) + } + endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) + r := a.c.newRequest("PUT", endpoint) + r.params.Set("note", note) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckRegister is used to register a new check with +// the local agent +func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/check/register") + r.obj = check + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckDeregister is used to deregister a check with +// the local agent +func (a *Agent) CheckDeregister(checkID string) error { + r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Join is used to instruct the agent to attempt a join to +// another cluster member +func (a *Agent) Join(addr string, wan bool) error { + r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ForceLeave is used to have the agent eject a failed node +func (a *Agent) ForceLeave(node string) error { + r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// EnableServiceMaintenance toggles service maintenance mode on +// for the given service ID. +func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableServiceMaintenance toggles service maintenance mode off +// for the given service ID. +func (a *Agent) DisableServiceMaintenance(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// EnableNodeMaintenance toggles node maintenance mode on for the +// agent we are connected to. +func (a *Agent) EnableNodeMaintenance(reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableNodeMaintenance toggles node maintenance mode off for the +// agent we are connected to. +func (a *Agent) DisableNodeMaintenance() error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go new file mode 100644 index 000000000..358c12a6c --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go @@ -0,0 +1,524 @@ +package api + +import ( + "strings" + "testing" +) + +func TestAgent_Self(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + info, err := agent.Self() + if err != nil { + t.Fatalf("err: %v", err) + } + + name := info["Config"]["NodeName"] + if name == "" { + t.Fatalf("bad: %v", info) + } +} + +func TestAgent_Members(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + members, err := agent.Members(false) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(members) != 1 { + t.Fatalf("bad: %v", members) + } +} + +func TestAgent_Services(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg := &AgentServiceRegistration{ + Name: "foo", + Tags: []string{"bar", "baz"}, + Port: 8000, + Check: &AgentServiceCheck{ + TTL: "15s", + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + services, err := agent.Services() + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := services["foo"]; !ok { + t.Fatalf("missing service: %v", services) + } + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + chk, ok := checks["service:foo"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + + // Checks should default to critical + if chk.Status != "critical" { + t.Fatalf("Bad: %#v", chk) + } + + if err := agent.ServiceDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAgent_Services_CheckPassing(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + reg := &AgentServiceRegistration{ + Name: "foo", + Tags: []string{"bar", "baz"}, + Port: 8000, + Check: &AgentServiceCheck{ + TTL: "15s", + Status: "passing", + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + services, err := agent.Services() + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := services["foo"]; !ok { + t.Fatalf("missing service: %v", services) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + chk, ok := checks["service:foo"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + + if chk.Status != "passing" { + t.Fatalf("Bad: %#v", chk) + } + if err := agent.ServiceDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAgent_Services_CheckBadStatus(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + reg := &AgentServiceRegistration{ + Name: "foo", + Tags: []string{"bar", "baz"}, + Port: 8000, + Check: &AgentServiceCheck{ + TTL: "15s", + Status: "fluffy", + }, + } + if err := agent.ServiceRegister(reg); err == nil { + t.Fatalf("bad status accepted") + } +} + +func TestAgent_ServiceAddress(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg1 := &AgentServiceRegistration{ + Name: "foo1", + Port: 8000, + Address: "192.168.0.42", + } + reg2 := &AgentServiceRegistration{ + Name: "foo2", + Port: 8000, + } + if err := agent.ServiceRegister(reg1); err != nil { + t.Fatalf("err: %v", err) + } + if err := agent.ServiceRegister(reg2); err != nil { + t.Fatalf("err: %v", err) + } + + services, err := agent.Services() + if err != nil { + t.Fatalf("err: %v", err) + } + + if _, ok := services["foo1"]; !ok { + t.Fatalf("missing service: %v", services) + } + if _, ok := services["foo2"]; !ok { + t.Fatalf("missing service: %v", services) + } + + if services["foo1"].Address != "192.168.0.42" { + t.Fatalf("missing Address field in service foo1: %v", services) + } + if services["foo2"].Address != "" { + t.Fatalf("missing Address field in service foo2: %v", services) + } + + if err := agent.ServiceDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAgent_Services_MultipleChecks(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg := &AgentServiceRegistration{ + Name: "foo", + Tags: []string{"bar", "baz"}, + Port: 8000, + Checks: AgentServiceChecks{ + &AgentServiceCheck{ + TTL: "15s", + }, + &AgentServiceCheck{ + TTL: "30s", + }, + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + services, err := agent.Services() + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := services["foo"]; !ok { + t.Fatalf("missing service: %v", services) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := checks["service:foo:1"]; !ok { + t.Fatalf("missing check: %v", checks) + } + if _, ok := checks["service:foo:2"]; !ok { + t.Fatalf("missing check: %v", checks) + } +} + +func TestAgent_SetTTLStatus(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg := &AgentServiceRegistration{ + Name: "foo", + Check: &AgentServiceCheck{ + TTL: "15s", + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + if err := agent.WarnTTL("service:foo", "test"); err != nil { + t.Fatalf("err: %v", err) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + chk, ok := checks["service:foo"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + if chk.Status != "warning" { + t.Fatalf("Bad: %#v", chk) + } + if chk.Output != "test" { + t.Fatalf("Bad: %#v", chk) + } + + if err := agent.ServiceDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAgent_Checks(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg := &AgentCheckRegistration{ + Name: "foo", + } + reg.TTL = "15s" + if err := agent.CheckRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + chk, ok := checks["foo"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + if chk.Status != "critical" { + t.Fatalf("check not critical: %v", chk) + } + + if err := agent.CheckDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAgent_CheckStartPassing(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg := &AgentCheckRegistration{ + Name: "foo", + AgentServiceCheck: AgentServiceCheck{ + Status: "passing", + }, + } + reg.TTL = "15s" + if err := agent.CheckRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + chk, ok := checks["foo"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + if chk.Status != "passing" { + t.Fatalf("check not passing: %v", chk) + } + + if err := agent.CheckDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAgent_Checks_serviceBound(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + // First register a service + serviceReg := &AgentServiceRegistration{ + Name: "redis", + } + if err := agent.ServiceRegister(serviceReg); err != nil { + t.Fatalf("err: %v", err) + } + + // Register a check bound to the service + reg := &AgentCheckRegistration{ + Name: "redischeck", + ServiceID: "redis", + } + reg.TTL = "15s" + if err := agent.CheckRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + + check, ok := checks["redischeck"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + if check.ServiceID != "redis" { + t.Fatalf("missing service association for check: %v", check) + } +} + +func TestAgent_Join(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + info, err := agent.Self() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Join ourself + addr := info["Config"]["AdvertiseAddr"].(string) + err = agent.Join(addr, false) + if err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAgent_ForceLeave(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + // Eject somebody + err := agent.ForceLeave("foo") + if err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestServiceMaintenance(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + // First register a service + serviceReg := &AgentServiceRegistration{ + Name: "redis", + } + if err := agent.ServiceRegister(serviceReg); err != nil { + t.Fatalf("err: %v", err) + } + + // Enable maintenance mode + if err := agent.EnableServiceMaintenance("redis", "broken"); err != nil { + t.Fatalf("err: %s", err) + } + + // Ensure a critical check was added + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + found := false + for _, check := range checks { + if strings.Contains(check.CheckID, "maintenance") { + found = true + if check.Status != "critical" || check.Notes != "broken" { + t.Fatalf("bad: %#v", checks) + } + } + } + if !found { + t.Fatalf("bad: %#v", checks) + } + + // Disable maintenance mode + if err := agent.DisableServiceMaintenance("redis"); err != nil { + t.Fatalf("err: %s", err) + } + + // Ensure the critical health check was removed + checks, err = agent.Checks() + if err != nil { + t.Fatalf("err: %s", err) + } + for _, check := range checks { + if strings.Contains(check.CheckID, "maintenance") { + t.Fatalf("should have removed health check") + } + } +} + +func TestNodeMaintenance(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + // Enable maintenance mode + if err := agent.EnableNodeMaintenance("broken"); err != nil { + t.Fatalf("err: %s", err) + } + + // Check that a critical check was added + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %s", err) + } + found := false + for _, check := range checks { + if strings.Contains(check.CheckID, "maintenance") { + found = true + if check.Status != "critical" || check.Notes != "broken" { + t.Fatalf("bad: %#v", checks) + } + } + } + if !found { + t.Fatalf("bad: %#v", checks) + } + + // Disable maintenance mode + if err := agent.DisableNodeMaintenance(); err != nil { + t.Fatalf("err: %s", err) + } + + // Ensure the check was removed + checks, err = agent.Checks() + if err != nil { + t.Fatalf("err: %s", err) + } + for _, check := range checks { + if strings.Contains(check.CheckID, "maintenance") { + t.Fatalf("should have removed health check") + } + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go new file mode 100644 index 000000000..8fe2ead04 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go @@ -0,0 +1,442 @@ +package api + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "log" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" +) + +// QueryOptions are used to parameterize a query +type QueryOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // AllowStale allows any Consul server (non-leader) to service + // a read. This allows for lower latency and higher throughput + AllowStale bool + + // RequireConsistent forces the read to be fully consistent. + // This is more expensive but prevents ever performing a stale + // read. + RequireConsistent bool + + // WaitIndex is used to enable a blocking query. Waits + // until the timeout or the next index is reached + WaitIndex uint64 + + // WaitTime is used to bound the duration of a wait. + // Defaults to that of the Config, but can be overriden. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string +} + +// WriteOptions are used to parameterize a write +type WriteOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string +} + +// QueryMeta is used to return meta data about a query +type QueryMeta struct { + // LastIndex. This can be used as a WaitIndex to perform + // a blocking query + LastIndex uint64 + + // Time of last contact from the leader for the + // server servicing the request + LastContact time.Duration + + // Is there a known leader + KnownLeader bool + + // How long did the request take + RequestTime time.Duration +} + +// WriteMeta is used to return meta data about a write +type WriteMeta struct { + // How long did the request take + RequestTime time.Duration +} + +// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication +type HttpBasicAuth struct { + // Username to use for HTTP Basic Authentication + Username string + + // Password to use for HTTP Basic Authentication + Password string +} + +// Config is used to configure the creation of a client +type Config struct { + // Address is the address of the Consul server + Address string + + // Scheme is the URI scheme for the Consul server + Scheme string + + // Datacenter to use. If not provided, the default agent datacenter is used. + Datacenter string + + // HttpClient is the client to use. Default will be + // used if not provided. + HttpClient *http.Client + + // HttpAuth is the auth info to use for http access. + HttpAuth *HttpBasicAuth + + // WaitTime limits how long a Watch will block. If not provided, + // the agent default values will be used. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string +} + +// DefaultConfig returns a default configuration for the client +func DefaultConfig() *Config { + config := &Config{ + Address: "127.0.0.1:8500", + Scheme: "http", + HttpClient: http.DefaultClient, + } + + if addr := os.Getenv("CONSUL_HTTP_ADDR"); addr != "" { + config.Address = addr + } + + if token := os.Getenv("CONSUL_HTTP_TOKEN"); token != "" { + config.Token = token + } + + if auth := os.Getenv("CONSUL_HTTP_AUTH"); auth != "" { + var username, password string + if strings.Contains(auth, ":") { + split := strings.SplitN(auth, ":", 2) + username = split[0] + password = split[1] + } else { + username = auth + } + + config.HttpAuth = &HttpBasicAuth{ + Username: username, + Password: password, + } + } + + if ssl := os.Getenv("CONSUL_HTTP_SSL"); ssl != "" { + enabled, err := strconv.ParseBool(ssl) + if err != nil { + log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL: %s", err) + } + + if enabled { + config.Scheme = "https" + } + } + + if verify := os.Getenv("CONSUL_HTTP_SSL_VERIFY"); verify != "" { + doVerify, err := strconv.ParseBool(verify) + if err != nil { + log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL_VERIFY: %s", err) + } + + if !doVerify { + config.HttpClient.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } + } + } + + return config +} + +// Client provides a client to the Consul API +type Client struct { + config Config +} + +// NewClient returns a new client +func NewClient(config *Config) (*Client, error) { + // bootstrap the config + defConfig := DefaultConfig() + + if len(config.Address) == 0 { + config.Address = defConfig.Address + } + + if len(config.Scheme) == 0 { + config.Scheme = defConfig.Scheme + } + + if config.HttpClient == nil { + config.HttpClient = defConfig.HttpClient + } + + if parts := strings.SplitN(config.Address, "unix://", 2); len(parts) == 2 { + config.HttpClient = &http.Client{ + Transport: &http.Transport{ + Dial: func(_, _ string) (net.Conn, error) { + return net.Dial("unix", parts[1]) + }, + }, + } + config.Address = parts[1] + } + + client := &Client{ + config: *config, + } + return client, nil +} + +// request is used to help build up a request +type request struct { + config *Config + method string + url *url.URL + params url.Values + body io.Reader + obj interface{} +} + +// setQueryOptions is used to annotate the request with +// additional query options +func (r *request) setQueryOptions(q *QueryOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.AllowStale { + r.params.Set("stale", "") + } + if q.RequireConsistent { + r.params.Set("consistent", "") + } + if q.WaitIndex != 0 { + r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) + } + if q.WaitTime != 0 { + r.params.Set("wait", durToMsec(q.WaitTime)) + } + if q.Token != "" { + r.params.Set("token", q.Token) + } +} + +// durToMsec converts a duration to a millisecond specified string +func durToMsec(dur time.Duration) string { + return fmt.Sprintf("%dms", dur/time.Millisecond) +} + +// setWriteOptions is used to annotate the request with +// additional write options +func (r *request) setWriteOptions(q *WriteOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.Token != "" { + r.params.Set("token", q.Token) + } +} + +// toHTTP converts the request to an HTTP request +func (r *request) toHTTP() (*http.Request, error) { + // Encode the query parameters + r.url.RawQuery = r.params.Encode() + + // Check if we should encode the body + if r.body == nil && r.obj != nil { + if b, err := encodeBody(r.obj); err != nil { + return nil, err + } else { + r.body = b + } + } + + // Create the HTTP request + req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) + if err != nil { + return nil, err + } + + req.URL.Host = r.url.Host + req.URL.Scheme = r.url.Scheme + req.Host = r.url.Host + + // Setup auth + if r.config.HttpAuth != nil { + req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) + } + + return req, nil +} + +// newRequest is used to create a new request +func (c *Client) newRequest(method, path string) *request { + r := &request{ + config: &c.config, + method: method, + url: &url.URL{ + Scheme: c.config.Scheme, + Host: c.config.Address, + Path: path, + }, + params: make(map[string][]string), + } + if c.config.Datacenter != "" { + r.params.Set("dc", c.config.Datacenter) + } + if c.config.WaitTime != 0 { + r.params.Set("wait", durToMsec(r.config.WaitTime)) + } + if c.config.Token != "" { + r.params.Set("token", r.config.Token) + } + return r +} + +// doRequest runs a request with our client +func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { + req, err := r.toHTTP() + if err != nil { + return 0, nil, err + } + start := time.Now() + resp, err := c.config.HttpClient.Do(req) + diff := time.Now().Sub(start) + return diff, resp, err +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + r := c.newRequest("GET", endpoint) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, out); err != nil { + return nil, err + } + return qm, nil +} + +// write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + r := c.newRequest("PUT", endpoint) + r.setWriteOptions(q) + r.obj = in + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + if out != nil { + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + } + return wm, nil +} + +// parseQueryMeta is used to help parse query meta-data +func parseQueryMeta(resp *http.Response, q *QueryMeta) error { + header := resp.Header + + // Parse the X-Consul-Index + index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) + } + q.LastIndex = index + + // Parse the X-Consul-LastContact + last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) + } + q.LastContact = time.Duration(last) * time.Millisecond + + // Parse the X-Consul-KnownLeader + switch header.Get("X-Consul-KnownLeader") { + case "true": + q.KnownLeader = true + default: + q.KnownLeader = false + } + return nil +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + dec := json.NewDecoder(resp.Body) + return dec.Decode(out) +} + +// encodeBody is used to encode a request body +func encodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(obj); err != nil { + return nil, err + } + return buf, nil +} + +// requireOK is used to wrap doRequest and check for a 200 +func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { + if e != nil { + if resp != nil { + resp.Body.Close() + } + return d, nil, e + } + if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + resp.Body.Close() + return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) + } + return d, resp, nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go new file mode 100644 index 000000000..283ebe358 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go @@ -0,0 +1,242 @@ +package api + +import ( + crand "crypto/rand" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/hashicorp/consul/testutil" +) + +type configCallback func(c *Config) + +func makeClient(t *testing.T) (*Client, *testutil.TestServer) { + return makeClientWithConfig(t, nil, nil) +} + +func makeClientWithConfig( + t *testing.T, + cb1 configCallback, + cb2 testutil.ServerConfigCallback) (*Client, *testutil.TestServer) { + + // Make client config + conf := DefaultConfig() + if cb1 != nil { + cb1(conf) + } + + // Create server + server := testutil.NewTestServerConfig(t, cb2) + conf.Address = server.HTTPAddr + + // Create client + client, err := NewClient(conf) + if err != nil { + t.Fatalf("err: %v", err) + } + + return client, server +} + +func testKey() string { + buf := make([]byte, 16) + if _, err := crand.Read(buf); err != nil { + panic(fmt.Errorf("Failed to read random bytes: %v", err)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) +} + +func TestDefaultConfig_env(t *testing.T) { + t.Parallel() + addr := "1.2.3.4:5678" + token := "abcd1234" + auth := "username:password" + + os.Setenv("CONSUL_HTTP_ADDR", addr) + defer os.Setenv("CONSUL_HTTP_ADDR", "") + os.Setenv("CONSUL_HTTP_TOKEN", token) + defer os.Setenv("CONSUL_HTTP_TOKEN", "") + os.Setenv("CONSUL_HTTP_AUTH", auth) + defer os.Setenv("CONSUL_HTTP_AUTH", "") + os.Setenv("CONSUL_HTTP_SSL", "1") + defer os.Setenv("CONSUL_HTTP_SSL", "") + os.Setenv("CONSUL_HTTP_SSL_VERIFY", "0") + defer os.Setenv("CONSUL_HTTP_SSL_VERIFY", "") + + config := DefaultConfig() + + if config.Address != addr { + t.Errorf("expected %q to be %q", config.Address, addr) + } + + if config.Token != token { + t.Errorf("expected %q to be %q", config.Token, token) + } + + if config.HttpAuth == nil { + t.Fatalf("expected HttpAuth to be enabled") + } + if config.HttpAuth.Username != "username" { + t.Errorf("expected %q to be %q", config.HttpAuth.Username, "username") + } + if config.HttpAuth.Password != "password" { + t.Errorf("expected %q to be %q", config.HttpAuth.Password, "password") + } + + if config.Scheme != "https" { + t.Errorf("expected %q to be %q", config.Scheme, "https") + } + + if !config.HttpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify { + t.Errorf("expected SSL verification to be off") + } +} + +func TestSetQueryOptions(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + r := c.newRequest("GET", "/v1/kv/foo") + q := &QueryOptions{ + Datacenter: "foo", + AllowStale: true, + RequireConsistent: true, + WaitIndex: 1000, + WaitTime: 100 * time.Second, + Token: "12345", + } + r.setQueryOptions(q) + + if r.params.Get("dc") != "foo" { + t.Fatalf("bad: %v", r.params) + } + if _, ok := r.params["stale"]; !ok { + t.Fatalf("bad: %v", r.params) + } + if _, ok := r.params["consistent"]; !ok { + t.Fatalf("bad: %v", r.params) + } + if r.params.Get("index") != "1000" { + t.Fatalf("bad: %v", r.params) + } + if r.params.Get("wait") != "100000ms" { + t.Fatalf("bad: %v", r.params) + } + if r.params.Get("token") != "12345" { + t.Fatalf("bad: %v", r.params) + } +} + +func TestSetWriteOptions(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + r := c.newRequest("GET", "/v1/kv/foo") + q := &WriteOptions{ + Datacenter: "foo", + Token: "23456", + } + r.setWriteOptions(q) + + if r.params.Get("dc") != "foo" { + t.Fatalf("bad: %v", r.params) + } + if r.params.Get("token") != "23456" { + t.Fatalf("bad: %v", r.params) + } +} + +func TestRequestToHTTP(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + r := c.newRequest("DELETE", "/v1/kv/foo") + q := &QueryOptions{ + Datacenter: "foo", + } + r.setQueryOptions(q) + req, err := r.toHTTP() + if err != nil { + t.Fatalf("err: %v", err) + } + + if req.Method != "DELETE" { + t.Fatalf("bad: %v", req) + } + if req.URL.RequestURI() != "/v1/kv/foo?dc=foo" { + t.Fatalf("bad: %v", req) + } +} + +func TestParseQueryMeta(t *testing.T) { + t.Parallel() + resp := &http.Response{ + Header: make(map[string][]string), + } + resp.Header.Set("X-Consul-Index", "12345") + resp.Header.Set("X-Consul-LastContact", "80") + resp.Header.Set("X-Consul-KnownLeader", "true") + + qm := &QueryMeta{} + if err := parseQueryMeta(resp, qm); err != nil { + t.Fatalf("err: %v", err) + } + + if qm.LastIndex != 12345 { + t.Fatalf("Bad: %v", qm) + } + if qm.LastContact != 80*time.Millisecond { + t.Fatalf("Bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("Bad: %v", qm) + } +} + +func TestAPI_UnixSocket(t *testing.T) { + t.Parallel() + if runtime.GOOS == "windows" { + t.SkipNow() + } + + tempDir, err := ioutil.TempDir("", "consul") + if err != nil { + t.Fatalf("err: %s", err) + } + defer os.RemoveAll(tempDir) + socket := filepath.Join(tempDir, "test.sock") + + c, s := makeClientWithConfig(t, func(c *Config) { + c.Address = "unix://" + socket + }, func(c *testutil.TestServerConfig) { + c.Addresses = &testutil.TestAddressConfig{ + HTTP: "unix://" + socket, + } + }) + defer s.Stop() + + agent := c.Agent() + + info, err := agent.Self() + if err != nil { + t.Fatalf("err: %s", err) + } + if info["Config"]["NodeName"] == "" { + t.Fatalf("bad: %v", info) + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go new file mode 100644 index 000000000..cf64bd909 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go @@ -0,0 +1,182 @@ +package api + +type Node struct { + Node string + Address string +} + +type CatalogService struct { + Node string + Address string + ServiceID string + ServiceName string + ServiceAddress string + ServiceTags []string + ServicePort int +} + +type CatalogNode struct { + Node *Node + Services map[string]*AgentService +} + +type CatalogRegistration struct { + Node string + Address string + Datacenter string + Service *AgentService + Check *AgentCheck +} + +type CatalogDeregistration struct { + Node string + Address string + Datacenter string + ServiceID string + CheckID string +} + +// Catalog can be used to query the Catalog endpoints +type Catalog struct { + c *Client +} + +// Catalog returns a handle to the catalog endpoints +func (c *Client) Catalog() *Catalog { + return &Catalog{c} +} + +func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/register") + r.setWriteOptions(q) + r.obj = reg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/deregister") + r.setWriteOptions(q) + r.obj = dereg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +// Datacenters is used to query for all the known datacenters +func (c *Catalog) Datacenters() ([]string, error) { + r := c.c.newRequest("GET", "/v1/catalog/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []string + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to query all the known nodes +func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Node + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Services is used to query for all known services +func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/services") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out map[string][]string + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query catalog entries for a given service +func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CatalogService + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Node is used to query for service information about a single node +func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *CatalogNode + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go new file mode 100644 index 000000000..bb8be25b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go @@ -0,0 +1,279 @@ +package api + +import ( + "fmt" + "testing" + + "github.com/hashicorp/consul/testutil" +) + +func TestCatalog_Datacenters(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + + testutil.WaitForResult(func() (bool, error) { + datacenters, err := catalog.Datacenters() + if err != nil { + return false, err + } + + if len(datacenters) == 0 { + return false, fmt.Errorf("Bad: %v", datacenters) + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} + +func TestCatalog_Nodes(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + + testutil.WaitForResult(func() (bool, error) { + nodes, meta, err := catalog.Nodes(nil) + if err != nil { + return false, err + } + + if meta.LastIndex == 0 { + return false, fmt.Errorf("Bad: %v", meta) + } + + if len(nodes) == 0 { + return false, fmt.Errorf("Bad: %v", nodes) + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} + +func TestCatalog_Services(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + + testutil.WaitForResult(func() (bool, error) { + services, meta, err := catalog.Services(nil) + if err != nil { + return false, err + } + + if meta.LastIndex == 0 { + return false, fmt.Errorf("Bad: %v", meta) + } + + if len(services) == 0 { + return false, fmt.Errorf("Bad: %v", services) + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} + +func TestCatalog_Service(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + + testutil.WaitForResult(func() (bool, error) { + services, meta, err := catalog.Service("consul", "", nil) + if err != nil { + return false, err + } + + if meta.LastIndex == 0 { + return false, fmt.Errorf("Bad: %v", meta) + } + + if len(services) == 0 { + return false, fmt.Errorf("Bad: %v", services) + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} + +func TestCatalog_Node(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + name, _ := c.Agent().NodeName() + + testutil.WaitForResult(func() (bool, error) { + info, meta, err := catalog.Node(name, nil) + if err != nil { + return false, err + } + + if meta.LastIndex == 0 { + return false, fmt.Errorf("Bad: %v", meta) + } + if len(info.Services) == 0 { + return false, fmt.Errorf("Bad: %v", info) + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} + +func TestCatalog_Registration(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + + service := &AgentService{ + ID: "redis1", + Service: "redis", + Tags: []string{"master", "v1"}, + Port: 8000, + } + + check := &AgentCheck{ + Node: "foobar", + CheckID: "service:redis1", + Name: "Redis health check", + Notes: "Script based health check", + Status: "passing", + ServiceID: "redis1", + } + + reg := &CatalogRegistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + Service: service, + Check: check, + } + + testutil.WaitForResult(func() (bool, error) { + if _, err := catalog.Register(reg, nil); err != nil { + return false, err + } + + node, _, err := catalog.Node("foobar", nil) + if err != nil { + return false, err + } + + if _, ok := node.Services["redis1"]; !ok { + return false, fmt.Errorf("missing service: redis1") + } + + health, _, err := c.Health().Node("foobar", nil) + if err != nil { + return false, err + } + + if health[0].CheckID != "service:redis1" { + return false, fmt.Errorf("missing checkid service:redis1") + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) + + // Test catalog deregistration of the previously registered service + dereg := &CatalogDeregistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + ServiceID: "redis1", + } + + if _, err := catalog.Deregister(dereg, nil); err != nil { + t.Fatalf("err: %v", err) + } + + testutil.WaitForResult(func() (bool, error) { + node, _, err := catalog.Node("foobar", nil) + if err != nil { + return false, err + } + + if _, ok := node.Services["redis1"]; ok { + return false, fmt.Errorf("ServiceID:redis1 is not deregistered") + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) + + // Test deregistration of the previously registered check + dereg = &CatalogDeregistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + CheckID: "service:redis1", + } + + if _, err := catalog.Deregister(dereg, nil); err != nil { + t.Fatalf("err: %v", err) + } + + testutil.WaitForResult(func() (bool, error) { + health, _, err := c.Health().Node("foobar", nil) + if err != nil { + return false, err + } + + if len(health) != 0 { + return false, fmt.Errorf("CheckID:service:redis1 is not deregistered") + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) + + // Test node deregistration of the previously registered node + dereg = &CatalogDeregistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + } + + if _, err := catalog.Deregister(dereg, nil); err != nil { + t.Fatalf("err: %v", err) + } + + testutil.WaitForResult(func() (bool, error) { + node, _, err := catalog.Node("foobar", nil) + if err != nil { + return false, err + } + + if node != nil { + return false, fmt.Errorf("node is not deregistered: %v", node) + } + + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go new file mode 100644 index 000000000..85b5b069b --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go @@ -0,0 +1,104 @@ +package api + +import ( + "bytes" + "strconv" +) + +// Event can be used to query the Event endpoints +type Event struct { + c *Client +} + +// UserEvent represents an event that was fired by the user +type UserEvent struct { + ID string + Name string + Payload []byte + NodeFilter string + ServiceFilter string + TagFilter string + Version int + LTime uint64 +} + +// Event returns a handle to the event endpoints +func (c *Client) Event() *Event { + return &Event{c} +} + +// Fire is used to fire a new user event. Only the Name, Payload and Filters +// are respected. This returns the ID or an associated error. Cross DC requests +// are supported. +func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { + r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) + r.setWriteOptions(q) + if params.NodeFilter != "" { + r.params.Set("node", params.NodeFilter) + } + if params.ServiceFilter != "" { + r.params.Set("service", params.ServiceFilter) + } + if params.TagFilter != "" { + r.params.Set("tag", params.TagFilter) + } + if params.Payload != nil { + r.body = bytes.NewReader(params.Payload) + } + + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out UserEvent + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// List is used to get the most recent events an agent has received. +// This list can be optionally filtered by the name. This endpoint supports +// quasi-blocking queries. The index is not monotonic, nor does it provide provide +// LastContact or KnownLeader. +func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { + r := e.c.newRequest("GET", "/v1/event/list") + r.setQueryOptions(q) + if name != "" { + r.params.Set("name", name) + } + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*UserEvent + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// IDToIndex is a bit of a hack. This simulates the index generation to +// convert an event ID into a WaitIndex. +func (e *Event) IDToIndex(uuid string) uint64 { + lower := uuid[0:8] + uuid[9:13] + uuid[14:18] + upper := uuid[19:23] + uuid[24:36] + lowVal, err := strconv.ParseUint(lower, 16, 64) + if err != nil { + panic("Failed to convert " + lower) + } + highVal, err := strconv.ParseUint(upper, 16, 64) + if err != nil { + panic("Failed to convert " + upper) + } + return lowVal ^ highVal +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go new file mode 100644 index 000000000..1ca92e233 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go @@ -0,0 +1,49 @@ +package api + +import ( + "testing" + + "github.com/hashicorp/consul/testutil" +) + +func TestEvent_FireList(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + event := c.Event() + + params := &UserEvent{Name: "foo"} + id, meta, err := event.Fire(params, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + var events []*UserEvent + var qm *QueryMeta + testutil.WaitForResult(func() (bool, error) { + events, qm, err = event.List("", nil) + if err != nil { + t.Fatalf("err: %v", err) + } + return len(events) > 0, err + }, func(err error) { + t.Fatalf("err: %#v", err) + }) + + if events[len(events)-1].ID != id { + t.Fatalf("bad: %#v", events) + } + + if qm.LastIndex != event.IDToIndex(id) { + t.Fatalf("Bad: %#v", qm) + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go new file mode 100644 index 000000000..02b161e28 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go @@ -0,0 +1,136 @@ +package api + +import ( + "fmt" +) + +// HealthCheck is used to represent a single check +type HealthCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string +} + +// ServiceEntry is used for the health service endpoint +type ServiceEntry struct { + Node *Node + Service *AgentService + Checks []*HealthCheck +} + +// Health can be used to query the Health endpoints +type Health struct { + c *Client +} + +// Health returns a handle to the health endpoints +func (c *Client) Health() *Health { + return &Health{c} +} + +// Node is used to query for checks belonging to a given node +func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*HealthCheck + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Checks is used to return the checks associated with a service +func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/checks/"+service) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*HealthCheck + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query health information along with service info +// for a given service. It can optionally do server-side filtering on a tag +// or nodes with passing health checks only. +func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + if passingOnly { + r.params.Set("passing", "1") + } + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*ServiceEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// State is used to retreive all the checks in a given state. +// The wildcard "any" state can also be used for all checks. +func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { + switch state { + case "any": + case "warning": + case "critical": + case "passing": + case "unknown": + default: + return nil, nil, fmt.Errorf("Unsupported state: %v", state) + } + r := h.c.newRequest("GET", "/v1/health/state/"+state) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*HealthCheck + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/health_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/health_test.go new file mode 100644 index 000000000..d80a4693a --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/health_test.go @@ -0,0 +1,125 @@ +package api + +import ( + "fmt" + "testing" + + "github.com/hashicorp/consul/testutil" +) + +func TestHealth_Node(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + health := c.Health() + + info, err := agent.Self() + if err != nil { + t.Fatalf("err: %v", err) + } + name := info["Config"]["NodeName"].(string) + + testutil.WaitForResult(func() (bool, error) { + checks, meta, err := health.Node(name, nil) + if err != nil { + return false, err + } + if meta.LastIndex == 0 { + return false, fmt.Errorf("bad: %v", meta) + } + if len(checks) == 0 { + return false, fmt.Errorf("bad: %v", checks) + } + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} + +func TestHealth_Checks(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + health := c.Health() + + // Make a service with a check + reg := &AgentServiceRegistration{ + Name: "foo", + Check: &AgentServiceCheck{ + TTL: "15s", + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + defer agent.ServiceDeregister("foo") + + testutil.WaitForResult(func() (bool, error) { + checks, meta, err := health.Checks("foo", nil) + if err != nil { + return false, err + } + if meta.LastIndex == 0 { + return false, fmt.Errorf("bad: %v", meta) + } + if len(checks) == 0 { + return false, fmt.Errorf("Bad: %v", checks) + } + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} + +func TestHealth_Service(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + health := c.Health() + + testutil.WaitForResult(func() (bool, error) { + // consul service should always exist... + checks, meta, err := health.Service("consul", "", true, nil) + if err != nil { + return false, err + } + if meta.LastIndex == 0 { + return false, fmt.Errorf("bad: %v", meta) + } + if len(checks) == 0 { + return false, fmt.Errorf("Bad: %v", checks) + } + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} + +func TestHealth_State(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + health := c.Health() + + testutil.WaitForResult(func() (bool, error) { + checks, meta, err := health.State("any", nil) + if err != nil { + return false, err + } + if meta.LastIndex == 0 { + return false, fmt.Errorf("bad: %v", meta) + } + if len(checks) == 0 { + return false, fmt.Errorf("Bad: %v", checks) + } + return true, nil + }, func(err error) { + t.Fatalf("err: %s", err) + }) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go new file mode 100644 index 000000000..ba74057fc --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go @@ -0,0 +1,236 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +// KVPair is used to represent a single K/V entry +type KVPair struct { + Key string + CreateIndex uint64 + ModifyIndex uint64 + LockIndex uint64 + Flags uint64 + Value []byte + Session string +} + +// KVPairs is a list of KVPair objects +type KVPairs []*KVPair + +// KV is used to manipulate the K/V API +type KV struct { + c *Client +} + +// KV is used to return a handle to the K/V apis +func (c *Client) KV() *KV { + return &KV{c} +} + +// Get is used to lookup a single key +func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { + resp, qm, err := k.getInternal(key, nil, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to lookup all keys under a prefix +func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { + resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Keys is used to list all the keys under a prefix. Optionally, +// a separator can be used to limit the responses. +func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { + params := map[string]string{"keys": ""} + if separator != "" { + params["separator"] = separator + } + resp, qm, err := k.getInternal(prefix, params, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []string + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { + r := k.c.newRequest("GET", "/v1/kv/"+key) + r.setQueryOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == 404 { + resp.Body.Close() + return nil, qm, nil + } else if resp.StatusCode != 200 { + resp.Body.Close() + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + return resp, qm, nil +} + +// Put is used to write a new value. Only the +// Key, Flags and Value is respected. +func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { + params := make(map[string]string, 1) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + _, wm, err := k.put(p.Key, params, p.Value, q) + return wm, err +} + +// CAS is used for a Check-And-Set operation. The Key, +// ModifyIndex, Flags and Value are respected. Returns true +// on success or false on failures. +func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) + return k.put(p.Key, params, p.Value, q) +} + +// Acquire is used for a lock acquisiiton operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["acquire"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +// Release is used for a lock release operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["release"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { + r := k.c.newRequest("PUT", "/v1/kv/"+key) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + r.body = bytes.NewReader(body) + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(string(buf.Bytes()), "true") + return res, qm, nil +} + +// Delete is used to delete a single key +func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(key, nil, w) + return qm, err +} + +// DeleteCAS is used for a Delete Check-And-Set operation. The Key +// and ModifyIndex are respected. Returns true on success or false on failures. +func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := map[string]string{ + "cas": strconv.FormatUint(p.ModifyIndex, 10), + } + return k.deleteInternal(p.Key, params, q) +} + +// DeleteTree is used to delete all keys under a prefix +func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w) + return qm, err +} + +func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { + r := k.c.newRequest("DELETE", "/v1/kv/"+key) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(string(buf.Bytes()), "true") + return res, qm, nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv_test.go new file mode 100644 index 000000000..346d13681 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv_test.go @@ -0,0 +1,439 @@ +package api + +import ( + "bytes" + "path" + "testing" + "time" +) + +func TestClientPutGetDelete(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Get a get without a key + key := testKey() + pair, _, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair != nil { + t.Fatalf("unexpected value: %#v", pair) + } + + // Put the key + value := []byte("test") + p := &KVPair{Key: key, Flags: 42, Value: value} + if _, err := kv.Put(p, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // Get should work + pair, meta, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if !bytes.Equal(pair.Value, value) { + t.Fatalf("unexpected value: %#v", pair) + } + if pair.Flags != 42 { + t.Fatalf("unexpected value: %#v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Delete + if _, err := kv.Delete(key, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // Get should fail + pair, _, err = kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair != nil { + t.Fatalf("unexpected value: %#v", pair) + } +} + +func TestClient_List_DeleteRecurse(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Generate some test keys + prefix := testKey() + var keys []string + for i := 0; i < 100; i++ { + keys = append(keys, path.Join(prefix, testKey())) + } + + // Set values + value := []byte("test") + for _, key := range keys { + p := &KVPair{Key: key, Value: value} + if _, err := kv.Put(p, nil); err != nil { + t.Fatalf("err: %v", err) + } + } + + // List the values + pairs, meta, err := kv.List(prefix, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(pairs) != len(keys) { + t.Fatalf("got %d keys", len(pairs)) + } + for _, pair := range pairs { + if !bytes.Equal(pair.Value, value) { + t.Fatalf("unexpected value: %#v", pair) + } + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Delete all + if _, err := kv.DeleteTree(prefix, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // List the values + pairs, _, err = kv.List(prefix, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(pairs) != 0 { + t.Fatalf("got %d keys", len(pairs)) + } +} + +func TestClient_DeleteCAS(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Put the key + key := testKey() + value := []byte("test") + p := &KVPair{Key: key, Value: value} + if work, _, err := kv.CAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("CAS failure") + } + + // Get should work + pair, meta, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // CAS update with bad index + p.ModifyIndex = 1 + if work, _, err := kv.DeleteCAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if work { + t.Fatalf("unexpected CAS") + } + + // CAS update with valid index + p.ModifyIndex = meta.LastIndex + if work, _, err := kv.DeleteCAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("unexpected CAS failure") + } +} + +func TestClient_CAS(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Put the key + key := testKey() + value := []byte("test") + p := &KVPair{Key: key, Value: value} + if work, _, err := kv.CAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("CAS failure") + } + + // Get should work + pair, meta, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // CAS update with bad index + newVal := []byte("foo") + p.Value = newVal + p.ModifyIndex = 1 + if work, _, err := kv.CAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if work { + t.Fatalf("unexpected CAS") + } + + // CAS update with valid index + p.ModifyIndex = meta.LastIndex + if work, _, err := kv.CAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("unexpected CAS failure") + } +} + +func TestClient_WatchGet(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Get a get without a key + key := testKey() + pair, meta, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair != nil { + t.Fatalf("unexpected value: %#v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Put the key + value := []byte("test") + go func() { + kv := c.KV() + + time.Sleep(100 * time.Millisecond) + p := &KVPair{Key: key, Flags: 42, Value: value} + if _, err := kv.Put(p, nil); err != nil { + t.Fatalf("err: %v", err) + } + }() + + // Get should work + options := &QueryOptions{WaitIndex: meta.LastIndex} + pair, meta2, err := kv.Get(key, options) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if !bytes.Equal(pair.Value, value) { + t.Fatalf("unexpected value: %#v", pair) + } + if pair.Flags != 42 { + t.Fatalf("unexpected value: %#v", pair) + } + if meta2.LastIndex <= meta.LastIndex { + t.Fatalf("unexpected value: %#v", meta2) + } +} + +func TestClient_WatchList(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Get a get without a key + prefix := testKey() + key := path.Join(prefix, testKey()) + pairs, meta, err := kv.List(prefix, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(pairs) != 0 { + t.Fatalf("unexpected value: %#v", pairs) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Put the key + value := []byte("test") + go func() { + kv := c.KV() + + time.Sleep(100 * time.Millisecond) + p := &KVPair{Key: key, Flags: 42, Value: value} + if _, err := kv.Put(p, nil); err != nil { + t.Fatalf("err: %v", err) + } + }() + + // Get should work + options := &QueryOptions{WaitIndex: meta.LastIndex} + pairs, meta2, err := kv.List(prefix, options) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(pairs) != 1 { + t.Fatalf("expected value: %#v", pairs) + } + if !bytes.Equal(pairs[0].Value, value) { + t.Fatalf("unexpected value: %#v", pairs) + } + if pairs[0].Flags != 42 { + t.Fatalf("unexpected value: %#v", pairs) + } + if meta2.LastIndex <= meta.LastIndex { + t.Fatalf("unexpected value: %#v", meta2) + } + +} + +func TestClient_Keys_DeleteRecurse(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Generate some test keys + prefix := testKey() + var keys []string + for i := 0; i < 100; i++ { + keys = append(keys, path.Join(prefix, testKey())) + } + + // Set values + value := []byte("test") + for _, key := range keys { + p := &KVPair{Key: key, Value: value} + if _, err := kv.Put(p, nil); err != nil { + t.Fatalf("err: %v", err) + } + } + + // List the values + out, meta, err := kv.Keys(prefix, "", nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(out) != len(keys) { + t.Fatalf("got %d keys", len(out)) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Delete all + if _, err := kv.DeleteTree(prefix, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // List the values + out, _, err = kv.Keys(prefix, "", nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(out) != 0 { + t.Fatalf("got %d keys", len(out)) + } +} + +func TestClient_AcquireRelease(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + kv := c.KV() + + // Make a session + id, _, err := session.CreateNoChecks(nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + // Acquire the key + key := testKey() + value := []byte("test") + p := &KVPair{Key: key, Value: value, Session: id} + if work, _, err := kv.Acquire(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("Lock failure") + } + + // Get should work + pair, meta, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if pair.LockIndex != 1 { + t.Fatalf("Expected lock: %v", pair) + } + if pair.Session != id { + t.Fatalf("Expected lock: %v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Release + if work, _, err := kv.Release(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("Release fail") + } + + // Get should work + pair, meta, err = kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if pair.LockIndex != 1 { + t.Fatalf("Expected lock: %v", pair) + } + if pair.Session != "" { + t.Fatalf("Expected unlock: %v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go new file mode 100644 index 000000000..4b694789c --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go @@ -0,0 +1,326 @@ +package api + +import ( + "fmt" + "sync" + "time" +) + +const ( + // DefaultLockSessionName is the Session Name we assign if none is provided + DefaultLockSessionName = "Consul API Lock" + + // DefaultLockSessionTTL is the default session TTL if no Session is provided + // when creating a new Lock. This is used because we do not have another + // other check to depend upon. + DefaultLockSessionTTL = "15s" + + // DefaultLockWaitTime is how long we block for at a time to check if lock + // acquisition is possible. This affects the minimum time it takes to cancel + // a Lock acquisition. + DefaultLockWaitTime = 15 * time.Second + + // DefaultLockRetryTime is how long we wait after a failed lock acquisition + // before attempting to do the lock again. This is so that once a lock-delay + // is in affect, we do not hot loop retrying the acquisition. + DefaultLockRetryTime = 5 * time.Second + + // LockFlagValue is a magic flag we set to indicate a key + // is being used for a lock. It is used to detect a potential + // conflict with a semaphore. + LockFlagValue = 0x2ddccbc058a50c18 +) + +var ( + // ErrLockHeld is returned if we attempt to double lock + ErrLockHeld = fmt.Errorf("Lock already held") + + // ErrLockNotHeld is returned if we attempt to unlock a lock + // that we do not hold. + ErrLockNotHeld = fmt.Errorf("Lock not held") + + // ErrLockInUse is returned if we attempt to destroy a lock + // that is in use. + ErrLockInUse = fmt.Errorf("Lock in use") + + // ErrLockConflict is returned if the flags on a key + // used for a lock do not match expectation + ErrLockConflict = fmt.Errorf("Existing key does not match lock use") +) + +// Lock is used to implement client-side leader election. It is follows the +// algorithm as described here: https://consul.io/docs/guides/leader-election.html. +type Lock struct { + c *Client + opts *LockOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// LockOptions is used to parameterize the Lock behavior. +type LockOptions struct { + Key string // Must be set and have write permissions + Value []byte // Optional, value to associate with the lock + Session string // Optional, created if not specified + SessionName string // Optional, defaults to DefaultLockSessionName + SessionTTL string // Optional, defaults to DefaultLockSessionTTL +} + +// LockKey returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockKey(key string) (*Lock, error) { + opts := &LockOptions{ + Key: key, + } + return c.LockOpts(opts) +} + +// LockOpts returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { + if opts.Key == "" { + return nil, fmt.Errorf("missing key") + } + if opts.SessionName == "" { + opts.SessionName = DefaultLockSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultLockSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + l := &Lock{ + c: c, + opts: opts, + } + return l, nil +} + +// Lock attempts to acquire the lock and blocks while doing so. +// Providing a non-nil stopCh can be used to abort the lock attempt. +// Returns a channel that is closed if our lock is lost or an error. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the lock is held until Unlock() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the lock being lost. +func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return nil, ErrLockHeld + } + + // Check if we need to create a session first + l.lockSession = l.opts.Session + if l.lockSession == "" { + if s, err := l.createSession(); err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } else { + l.sessionRenew = make(chan struct{}) + l.lockSession = s + session := l.c.Session() + go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !l.isHeld { + close(l.sessionRenew) + l.sessionRenew = nil + } + }() + } + } + + // Setup the query options + kv := l.c.KV() + qOpts := &QueryOptions{ + WaitTime: DefaultLockWaitTime, + } + +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Look for an existing lock, blocking until not taken + pair, meta, err := kv.Get(l.opts.Key, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read lock: %v", err) + } + if pair != nil && pair.Flags != LockFlagValue { + return nil, ErrLockConflict + } + locked := false + if pair != nil && pair.Session == l.lockSession { + goto HELD + } + if pair != nil && pair.Session != "" { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Try to acquire the lock + pair = l.lockEntry(l.lockSession) + locked, _, err = kv.Acquire(pair, nil) + if err != nil { + return nil, fmt.Errorf("failed to acquire lock: %v", err) + } + + // Handle the case of not getting the lock + if !locked { + select { + case <-time.After(DefaultLockRetryTime): + goto WAIT + case <-stopCh: + return nil, nil + } + } + +HELD: + // Watch to ensure we maintain leadership + leaderCh := make(chan struct{}) + go l.monitorLock(l.lockSession, leaderCh) + + // Set that we own the lock + l.isHeld = true + + // Locked! All done + return leaderCh, nil +} + +// Unlock released the lock. It is an error to call this +// if the lock is not currently held. +func (l *Lock) Unlock() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Ensure the lock is actually held + if !l.isHeld { + return ErrLockNotHeld + } + + // Set that we no longer own the lock + l.isHeld = false + + // Stop the session renew + if l.sessionRenew != nil { + defer func() { + close(l.sessionRenew) + l.sessionRenew = nil + }() + } + + // Get the lock entry, and clear the lock session + lockEnt := l.lockEntry(l.lockSession) + l.lockSession = "" + + // Release the lock explicitly + kv := l.c.KV() + _, _, err := kv.Release(lockEnt, nil) + if err != nil { + return fmt.Errorf("failed to release lock: %v", err) + } + return nil +} + +// Destroy is used to cleanup the lock entry. It is not necessary +// to invoke. It will fail if the lock is in use. +func (l *Lock) Destroy() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return ErrLockHeld + } + + // Look for an existing lock + kv := l.c.KV() + pair, _, err := kv.Get(l.opts.Key, nil) + if err != nil { + return fmt.Errorf("failed to read lock: %v", err) + } + + // Nothing to do if the lock does not exist + if pair == nil { + return nil + } + + // Check for possible flag conflict + if pair.Flags != LockFlagValue { + return ErrLockConflict + } + + // Check if it is in use + if pair.Session != "" { + return ErrLockInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(pair, nil) + if err != nil { + return fmt.Errorf("failed to remove lock: %v", err) + } + if !didRemove { + return ErrLockInUse + } + return nil +} + +// createSession is used to create a new managed session +func (l *Lock) createSession() (string, error) { + session := l.c.Session() + se := &SessionEntry{ + Name: l.opts.SessionName, + TTL: l.opts.SessionTTL, + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// lockEntry returns a formatted KVPair for the lock +func (l *Lock) lockEntry(session string) *KVPair { + return &KVPair{ + Key: l.opts.Key, + Value: l.opts.Value, + Session: session, + Flags: LockFlagValue, + } +} + +// monitorLock is a long running routine to monitor a lock ownership +// It closes the stopCh if we lose our leadership. +func (l *Lock) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := l.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + pair, meta, err := kv.Get(l.opts.Key, opts) + if err != nil { + return + } + if pair != nil && pair.Session == session { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock_test.go new file mode 100644 index 000000000..0a8fa5172 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock_test.go @@ -0,0 +1,363 @@ +package api + +import ( + "log" + "sync" + "testing" + "time" +) + +func TestLock_LockUnlock(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + lock, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Initial unlock should fail + err = lock.Unlock() + if err != ErrLockNotHeld { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + + // Double lock should fail + _, err = lock.Lock(nil) + if err != ErrLockHeld { + t.Fatalf("err: %v", err) + } + + // Should be leader + select { + case <-leaderCh: + t.Fatalf("should be leader") + default: + } + + // Initial unlock should work + err = lock.Unlock() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Double unlock should fail + err = lock.Unlock() + if err != ErrLockNotHeld { + t.Fatalf("err: %v", err) + } + + // Should loose leadership + select { + case <-leaderCh: + case <-time.After(time.Second): + t.Fatalf("should not be leader") + } +} + +func TestLock_ForceInvalidate(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + lock, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + defer lock.Unlock() + + go func() { + // Nuke the session, simulator an operator invalidation + // or a health check failure + session := c.Session() + session.Destroy(lock.lockSession, nil) + }() + + // Should loose leadership + select { + case <-leaderCh: + case <-time.After(time.Second): + t.Fatalf("should not be leader") + } +} + +func TestLock_DeleteKey(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + lock, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + defer lock.Unlock() + + go func() { + // Nuke the key, simulate an operator intervention + kv := c.KV() + kv.Delete("test/lock", nil) + }() + + // Should loose leadership + select { + case <-leaderCh: + case <-time.After(time.Second): + t.Fatalf("should not be leader") + } +} + +func TestLock_Contend(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + wg := &sync.WaitGroup{} + acquired := make([]bool, 3) + for idx := range acquired { + wg.Add(1) + go func(idx int) { + defer wg.Done() + lock, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work eventually, will contend + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + defer lock.Unlock() + log.Printf("Contender %d acquired", idx) + + // Set acquired and then leave + acquired[idx] = true + }(idx) + } + + // Wait for termination + doneCh := make(chan struct{}) + go func() { + wg.Wait() + close(doneCh) + }() + + // Wait for everybody to get a turn + select { + case <-doneCh: + case <-time.After(3 * DefaultLockRetryTime): + t.Fatalf("timeout") + } + + for idx, did := range acquired { + if !did { + t.Fatalf("contender %d never acquired", idx) + } + } +} + +func TestLock_Destroy(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + lock, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + + // Destroy should fail + if err := lock.Destroy(); err != ErrLockHeld { + t.Fatalf("err: %v", err) + } + + // Should be able to release + err = lock.Unlock() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Acquire with a different lock + l2, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err = l2.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + + // Destroy should still fail + if err := lock.Destroy(); err != ErrLockInUse { + t.Fatalf("err: %v", err) + } + + // Should relese + err = l2.Unlock() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Destroy should work + err = lock.Destroy() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Double destroy should work + err = l2.Destroy() + if err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestLock_Conflict(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/lock/", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + lockCh, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if lockCh == nil { + t.Fatalf("not hold") + } + defer sema.Release() + + lock, err := c.LockKey("test/lock/.lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should conflict with semaphore + _, err = lock.Lock(nil) + if err != ErrLockConflict { + t.Fatalf("err: %v", err) + } + + // Should conflict with semaphore + err = lock.Destroy() + if err != ErrLockConflict { + t.Fatalf("err: %v", err) + } +} + +func TestLock_ReclaimLock(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session, _, err := c.Session().Create(&SessionEntry{}, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + lock, err := c.LockOpts(&LockOptions{Key: "test/lock", Session: session}) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + defer lock.Unlock() + + l2, err := c.LockOpts(&LockOptions{Key: "test/lock", Session: session}) + if err != nil { + t.Fatalf("err: %v", err) + } + + reclaimed := make(chan (<-chan struct{}), 1) + go func() { + l2Ch, err := l2.Lock(nil) + if err != nil { + t.Fatalf("not locked: %v", err) + } + reclaimed <- l2Ch + }() + + // Should reclaim the lock + var leader2Ch <-chan struct{} + + select { + case leader2Ch = <-reclaimed: + case <-time.After(time.Second): + t.Fatalf("should have locked") + } + + // unlock should work + err = l2.Unlock() + if err != nil { + t.Fatalf("err: %v", err) + } + + //Both locks should see the unlock + select { + case <-leader2Ch: + case <-time.After(time.Second): + t.Fatalf("should not be leader") + } + + select { + case <-leaderCh: + case <-time.After(time.Second): + t.Fatalf("should not be leader") + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/raw.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/raw.go new file mode 100644 index 000000000..745a208c9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/raw.go @@ -0,0 +1,24 @@ +package api + +// Raw can be used to do raw queries against custom endpoints +type Raw struct { + c *Client +} + +// Raw returns a handle to query endpoints +func (c *Client) Raw() *Raw { + return &Raw{c} +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + return raw.c.query(endpoint, out, q) +} + +// Write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + return raw.c.write(endpoint, in, out, q) +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore.go new file mode 100644 index 000000000..957f884a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore.go @@ -0,0 +1,482 @@ +package api + +import ( + "encoding/json" + "fmt" + "path" + "sync" + "time" +) + +const ( + // DefaultSemaphoreSessionName is the Session Name we assign if none is provided + DefaultSemaphoreSessionName = "Consul API Semaphore" + + // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided + // when creating a new Semaphore. This is used because we do not have another + // other check to depend upon. + DefaultSemaphoreSessionTTL = "15s" + + // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore + // acquisition is possible. This affects the minimum time it takes to cancel + // a Semaphore acquisition. + DefaultSemaphoreWaitTime = 15 * time.Second + + // DefaultSemaphoreRetryTime is how long we wait after a failed lock acquisition + // before attempting to do the lock again. This is so that once a lock-delay + // is in affect, we do not hot loop retrying the acquisition. + DefaultSemaphoreRetryTime = 5 * time.Second + + // DefaultSemaphoreKey is the key used within the prefix to + // use for coordination between all the contenders. + DefaultSemaphoreKey = ".lock" + + // SemaphoreFlagValue is a magic flag we set to indicate a key + // is being used for a semaphore. It is used to detect a potential + // conflict with a lock. + SemaphoreFlagValue = 0xe0f69a2baa414de0 +) + +var ( + // ErrSemaphoreHeld is returned if we attempt to double lock + ErrSemaphoreHeld = fmt.Errorf("Semaphore already held") + + // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore + // that we do not hold. + ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held") + + // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore + // that is in use. + ErrSemaphoreInUse = fmt.Errorf("Semaphore in use") + + // ErrSemaphoreConflict is returned if the flags on a key + // used for a semaphore do not match expectation + ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use") +) + +// Semaphore is used to implement a distributed semaphore +// using the Consul KV primitives. +type Semaphore struct { + c *Client + opts *SemaphoreOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// SemaphoreOptions is used to parameterize the Semaphore +type SemaphoreOptions struct { + Prefix string // Must be set and have write permissions + Limit int // Must be set, and be positive + Value []byte // Optional, value to associate with the contender entry + Session string // OPtional, created if not specified + SessionName string // Optional, defaults to DefaultLockSessionName + SessionTTL string // Optional, defaults to DefaultLockSessionTTL +} + +// semaphoreLock is written under the DefaultSemaphoreKey and +// is used to coordinate between all the contenders. +type semaphoreLock struct { + // Limit is the integer limit of holders. This is used to + // verify that all the holders agree on the value. + Limit int + + // Holders is a list of all the semaphore holders. + // It maps the session ID to true. It is used as a set effectively. + Holders map[string]bool +} + +// SemaphorePrefix is used to created a Semaphore which will operate +// at the given KV prefix and uses the given limit for the semaphore. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. +func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) { + opts := &SemaphoreOptions{ + Prefix: prefix, + Limit: limit, + } + return c.SemaphoreOpts(opts) +} + +// SemaphoreOpts is used to create a Semaphore with the given options. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. If a Session is not provided, one will be created. +func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { + if opts.Prefix == "" { + return nil, fmt.Errorf("missing prefix") + } + if opts.Limit <= 0 { + return nil, fmt.Errorf("semaphore limit must be positive") + } + if opts.SessionName == "" { + opts.SessionName = DefaultSemaphoreSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultSemaphoreSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + s := &Semaphore{ + c: c, + opts: opts, + } + return s, nil +} + +// Acquire attempts to reserve a slot in the semaphore, blocking until +// success, interrupted via the stopCh or an error is encounted. +// Providing a non-nil stopCh can be used to abort the attempt. +// On success, a channel is returned that represents our slot. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the slot is held until Release() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the session being lost. +func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return nil, ErrSemaphoreHeld + } + + // Check if we need to create a session first + s.lockSession = s.opts.Session + if s.lockSession == "" { + if sess, err := s.createSession(); err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } else { + s.sessionRenew = make(chan struct{}) + s.lockSession = sess + session := s.c.Session() + go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !s.isHeld { + close(s.sessionRenew) + s.sessionRenew = nil + } + }() + } + } + + // Create the contender entry + kv := s.c.KV() + made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil) + if err != nil || !made { + return nil, fmt.Errorf("failed to make contender entry: %v", err) + } + + // Setup the query options + qOpts := &QueryOptions{ + WaitTime: DefaultSemaphoreWaitTime, + } + +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Read the prefix + pairs, meta, err := kv.List(s.opts.Prefix, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read prefix: %v", err) + } + + // Decode the lock + lockPair := s.findLock(pairs) + if lockPair.Flags != SemaphoreFlagValue { + return nil, ErrSemaphoreConflict + } + lock, err := s.decodeLock(lockPair) + if err != nil { + return nil, err + } + + // Verify we agree with the limit + if lock.Limit != s.opts.Limit { + return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)", + lock.Limit, s.opts.Limit) + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if the lock is held + if len(lock.Holders) >= lock.Limit { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Create a new lock with us as a holder + lock.Holders[s.lockSession] = true + newLock, err := s.encodeLock(lock, lockPair.ModifyIndex) + if err != nil { + return nil, err + } + + // Attempt the acquisition + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return nil, fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + // Update failed, could have been a race with another contender, + // retry the operation + goto WAIT + } + + // Watch to ensure we maintain ownership of the slot + lockCh := make(chan struct{}) + go s.monitorLock(s.lockSession, lockCh) + + // Set that we own the lock + s.isHeld = true + + // Acquired! All done + return lockCh, nil +} + +// Release is used to voluntarily give up our semaphore slot. It is +// an error to call this if the semaphore has not been acquired. +func (s *Semaphore) Release() error { + // Hold the lock as we try to release + s.l.Lock() + defer s.l.Unlock() + + // Ensure the lock is actually held + if !s.isHeld { + return ErrSemaphoreNotHeld + } + + // Set that we no longer own the lock + s.isHeld = false + + // Stop the session renew + if s.sessionRenew != nil { + defer func() { + close(s.sessionRenew) + s.sessionRenew = nil + }() + } + + // Get and clear the lock session + lockSession := s.lockSession + s.lockSession = "" + + // Remove ourselves as a lock holder + kv := s.c.KV() + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) +READ: + pair, _, err := kv.Get(key, nil) + if err != nil { + return err + } + if pair == nil { + pair = &KVPair{} + } + lock, err := s.decodeLock(pair) + if err != nil { + return err + } + + // Create a new lock without us as a holder + if _, ok := lock.Holders[lockSession]; ok { + delete(lock.Holders, lockSession) + newLock, err := s.encodeLock(lock, pair.ModifyIndex) + if err != nil { + return err + } + + // Swap the locks + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + goto READ + } + } + + // Destroy the contender entry + contenderKey := path.Join(s.opts.Prefix, lockSession) + if _, err := kv.Delete(contenderKey, nil); err != nil { + return err + } + return nil +} + +// Destroy is used to cleanup the semaphore entry. It is not necessary +// to invoke. It will fail if the semaphore is in use. +func (s *Semaphore) Destroy() error { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return ErrSemaphoreHeld + } + + // List for the semaphore + kv := s.c.KV() + pairs, _, err := kv.List(s.opts.Prefix, nil) + if err != nil { + return fmt.Errorf("failed to read prefix: %v", err) + } + + // Find the lock pair, bail if it doesn't exist + lockPair := s.findLock(pairs) + if lockPair.ModifyIndex == 0 { + return nil + } + if lockPair.Flags != SemaphoreFlagValue { + return ErrSemaphoreConflict + } + + // Decode the lock + lock, err := s.decodeLock(lockPair) + if err != nil { + return err + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if there are any holders + if len(lock.Holders) > 0 { + return ErrSemaphoreInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(lockPair, nil) + if err != nil { + return fmt.Errorf("failed to remove semaphore: %v", err) + } + if !didRemove { + return ErrSemaphoreInUse + } + return nil +} + +// createSession is used to create a new managed session +func (s *Semaphore) createSession() (string, error) { + session := s.c.Session() + se := &SessionEntry{ + Name: s.opts.SessionName, + TTL: s.opts.SessionTTL, + Behavior: SessionBehaviorDelete, + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// contenderEntry returns a formatted KVPair for the contender +func (s *Semaphore) contenderEntry(session string) *KVPair { + return &KVPair{ + Key: path.Join(s.opts.Prefix, session), + Value: s.opts.Value, + Session: session, + Flags: SemaphoreFlagValue, + } +} + +// findLock is used to find the KV Pair which is used for coordination +func (s *Semaphore) findLock(pairs KVPairs) *KVPair { + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) + for _, pair := range pairs { + if pair.Key == key { + return pair + } + } + return &KVPair{Flags: SemaphoreFlagValue} +} + +// decodeLock is used to decode a semaphoreLock from an +// entry in Consul +func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) { + // Handle if there is no lock + if pair == nil || pair.Value == nil { + return &semaphoreLock{ + Limit: s.opts.Limit, + Holders: make(map[string]bool), + }, nil + } + + l := &semaphoreLock{} + if err := json.Unmarshal(pair.Value, l); err != nil { + return nil, fmt.Errorf("lock decoding failed: %v", err) + } + return l, nil +} + +// encodeLock is used to encode a semaphoreLock into a KVPair +// that can be PUT +func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) { + enc, err := json.Marshal(l) + if err != nil { + return nil, fmt.Errorf("lock encoding failed: %v", err) + } + pair := &KVPair{ + Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey), + Value: enc, + Flags: SemaphoreFlagValue, + ModifyIndex: oldIndex, + } + return pair, nil +} + +// pruneDeadHolders is used to remove all the dead lock holders +func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) { + // Gather all the live holders + alive := make(map[string]struct{}, len(pairs)) + for _, pair := range pairs { + if pair.Session != "" { + alive[pair.Session] = struct{}{} + } + } + + // Remove any holders that are dead + for holder := range lock.Holders { + if _, ok := alive[holder]; !ok { + delete(lock.Holders, holder) + } + } +} + +// monitorLock is a long running routine to monitor a semaphore ownership +// It closes the stopCh if we lose our slot. +func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := s.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + pairs, meta, err := kv.List(s.opts.Prefix, opts) + if err != nil { + return + } + lockPair := s.findLock(pairs) + lock, err := s.decodeLock(lockPair) + if err != nil { + return + } + s.pruneDeadHolders(lock, pairs) + if _, ok := lock.Holders[session]; ok { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore_test.go new file mode 100644 index 000000000..5e5e53588 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore_test.go @@ -0,0 +1,313 @@ +package api + +import ( + "log" + "sync" + "testing" + "time" +) + +func TestSemaphore_AcquireRelease(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Initial release should fail + err = sema.Release() + if err != ErrSemaphoreNotHeld { + t.Fatalf("err: %v", err) + } + + // Should work + lockCh, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if lockCh == nil { + t.Fatalf("not hold") + } + + // Double lock should fail + _, err = sema.Acquire(nil) + if err != ErrSemaphoreHeld { + t.Fatalf("err: %v", err) + } + + // Should be held + select { + case <-lockCh: + t.Fatalf("should be held") + default: + } + + // Initial release should work + err = sema.Release() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Double unlock should fail + err = sema.Release() + if err != ErrSemaphoreNotHeld { + t.Fatalf("err: %v", err) + } + + // Should lose resource + select { + case <-lockCh: + case <-time.After(time.Second): + t.Fatalf("should not be held") + } +} + +func TestSemaphore_ForceInvalidate(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + lockCh, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if lockCh == nil { + t.Fatalf("not acquired") + } + defer sema.Release() + + go func() { + // Nuke the session, simulator an operator invalidation + // or a health check failure + session := c.Session() + session.Destroy(sema.lockSession, nil) + }() + + // Should loose slot + select { + case <-lockCh: + case <-time.After(time.Second): + t.Fatalf("should not be locked") + } +} + +func TestSemaphore_DeleteKey(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + lockCh, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if lockCh == nil { + t.Fatalf("not locked") + } + defer sema.Release() + + go func() { + // Nuke the key, simulate an operator intervention + kv := c.KV() + kv.DeleteTree("test/semaphore", nil) + }() + + // Should loose leadership + select { + case <-lockCh: + case <-time.After(time.Second): + t.Fatalf("should not be locked") + } +} + +func TestSemaphore_Contend(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + wg := &sync.WaitGroup{} + acquired := make([]bool, 4) + for idx := range acquired { + wg.Add(1) + go func(idx int) { + defer wg.Done() + sema, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work eventually, will contend + lockCh, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if lockCh == nil { + t.Fatalf("not locked") + } + defer sema.Release() + log.Printf("Contender %d acquired", idx) + + // Set acquired and then leave + acquired[idx] = true + }(idx) + } + + // Wait for termination + doneCh := make(chan struct{}) + go func() { + wg.Wait() + close(doneCh) + }() + + // Wait for everybody to get a turn + select { + case <-doneCh: + case <-time.After(3 * DefaultLockRetryTime): + t.Fatalf("timeout") + } + + for idx, did := range acquired { + if !did { + t.Fatalf("contender %d never acquired", idx) + } + } +} + +func TestSemaphore_BadLimit(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/semaphore", 0) + if err == nil { + t.Fatalf("should error") + } + + sema, err = c.SemaphorePrefix("test/semaphore", 1) + if err != nil { + t.Fatalf("err: %v", err) + } + + _, err = sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + sema2, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + _, err = sema2.Acquire(nil) + if err.Error() != "semaphore limit conflict (lock: 1, local: 2)" { + t.Fatalf("err: %v", err) + } +} + +func TestSemaphore_Destroy(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + sema2, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + _, err = sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + _, err = sema2.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Destroy should fail, still held + if err := sema.Destroy(); err != ErrSemaphoreHeld { + t.Fatalf("err: %v", err) + } + + err = sema.Release() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Destroy should fail, still in use + if err := sema.Destroy(); err != ErrSemaphoreInUse { + t.Fatalf("err: %v", err) + } + + err = sema2.Release() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Destroy should work + if err := sema.Destroy(); err != nil { + t.Fatalf("err: %v", err) + } + + // Destroy should work + if err := sema2.Destroy(); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestSemaphore_Conflict(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + lock, err := c.LockKey("test/sema/.lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + defer lock.Unlock() + + sema, err := c.SemaphorePrefix("test/sema/", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should conflict with lock + _, err = sema.Acquire(nil) + if err != ErrSemaphoreConflict { + t.Fatalf("err: %v", err) + } + + // Should conflict with lock + err = sema.Destroy() + if err != ErrSemaphoreConflict { + t.Fatalf("err: %v", err) + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/session.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/session.go new file mode 100644 index 000000000..a99da511d --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/session.go @@ -0,0 +1,201 @@ +package api + +import ( + "fmt" + "time" +) + +const ( + // SessionBehaviorRelease is the default behavior and causes + // all associated locks to be released on session invalidation. + SessionBehaviorRelease = "release" + + // SessionBehaviorDelete is new in Consul 0.5 and changes the + // behavior to delete all associated locks on session invalidation. + // It can be used in a way similar to Ephemeral Nodes in ZooKeeper. + SessionBehaviorDelete = "delete" +) + +// SessionEntry represents a session in consul +type SessionEntry struct { + CreateIndex uint64 + ID string + Name string + Node string + Checks []string + LockDelay time.Duration + Behavior string + TTL string +} + +// Session can be used to query the Session endpoints +type Session struct { + c *Client +} + +// Session returns a handle to the session endpoints +func (c *Client) Session() *Session { + return &Session{c} +} + +// CreateNoChecks is like Create but is used specifically to create +// a session with no associated health checks. +func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + body := make(map[string]interface{}) + body["Checks"] = []string{} + if se != nil { + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(body, q) + +} + +// Create makes a new session. Providing a session entry can +// customize the session. It can also be nil to use defaults. +func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + var obj interface{} + if se != nil { + body := make(map[string]interface{}) + obj = body + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if len(se.Checks) > 0 { + body["Checks"] = se.Checks + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(obj, q) +} + +func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { + var out struct{ ID string } + wm, err := s.c.write("/v1/session/create", obj, &out, q) + if err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Destroy invalides a given session +func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Renew renews the TTL on a given session +func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { + var entries []*SessionEntry + wm, err := s.c.write("/v1/session/renew/"+id, nil, &entries, q) + if err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], wm, nil + } + return nil, wm, nil +} + +// RenewPeriodic is used to periodically invoke Session.Renew on a +// session until a doneCh is closed. This is meant to be used in a long running +// goroutine to ensure a session stays valid. +func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh chan struct{}) error { + ttl, err := time.ParseDuration(initialTTL) + if err != nil { + return err + } + + waitDur := ttl / 2 + lastRenewTime := time.Now() + var lastErr error + for { + if time.Since(lastRenewTime) > ttl { + return lastErr + } + select { + case <-time.After(waitDur): + entry, _, err := s.Renew(id, q) + if err != nil { + waitDur = time.Second + lastErr = err + continue + } + if entry == nil { + waitDur = time.Second + lastErr = fmt.Errorf("No SessionEntry returned") + continue + } + + // Handle the server updating the TTL + ttl, _ = time.ParseDuration(entry.TTL) + waitDur = ttl / 2 + lastRenewTime = time.Now() + + case <-doneCh: + // Attempt a session destroy + s.Destroy(id, q) + return nil + } + } +} + +// Info looks up a single session +func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/info/"+id, &entries, q) + if err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List gets sessions for a node +func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/node/"+node, &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// List gets all active sessions +func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/list", &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/session_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/session_test.go new file mode 100644 index 000000000..c503c21a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/session_test.go @@ -0,0 +1,205 @@ +package api + +import ( + "testing" +) + +func TestSession_CreateDestroy(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + id, meta, err := session.Create(nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + meta, err = session.Destroy(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } +} + +func TestSession_CreateRenewDestroy(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + se := &SessionEntry{ + TTL: "10s", + } + + id, meta, err := session.Create(se, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + renew, meta, err := session.Renew(id, nil) + + if err != nil { + t.Fatalf("err: %v", err) + } + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + if renew == nil { + t.Fatalf("should get session") + } + + if renew.ID != id { + t.Fatalf("should have matching id") + } + + if renew.TTL != "10s" { + t.Fatalf("should get session with TTL") + } +} + +func TestSession_Info(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + id, _, err := session.Create(nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + info, qm, err := session.Info(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } + + if info == nil { + t.Fatalf("should get session") + } + if info.CreateIndex == 0 { + t.Fatalf("bad: %v", info) + } + if info.ID != id { + t.Fatalf("bad: %v", info) + } + if info.Name != "" { + t.Fatalf("bad: %v", info) + } + if info.Node == "" { + t.Fatalf("bad: %v", info) + } + if len(info.Checks) == 0 { + t.Fatalf("bad: %v", info) + } + if info.LockDelay == 0 { + t.Fatalf("bad: %v", info) + } + if info.Behavior != "release" { + t.Fatalf("bad: %v", info) + } + if info.TTL != "" { + t.Fatalf("bad: %v", info) + } +} + +func TestSession_Node(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + id, _, err := session.Create(nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + info, qm, err := session.Info(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + sessions, qm, err := session.Node(info.Node, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(sessions) != 1 { + t.Fatalf("bad: %v", sessions) + } + + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } +} + +func TestSession_List(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + id, _, err := session.Create(nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + sessions, qm, err := session.List(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(sessions) != 1 { + t.Fatalf("bad: %v", sessions) + } + + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/status.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/status.go new file mode 100644 index 000000000..74ef61a67 --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/status.go @@ -0,0 +1,43 @@ +package api + +// Status can be used to query the Status endpoints +type Status struct { + c *Client +} + +// Status returns a handle to the status endpoints +func (c *Client) Status() *Status { + return &Status{c} +} + +// Leader is used to query for a known leader +func (s *Status) Leader() (string, error) { + r := s.c.newRequest("GET", "/v1/status/leader") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + var leader string + if err := decodeBody(resp, &leader); err != nil { + return "", err + } + return leader, nil +} + +// Peers is used to query for a known raft peers +func (s *Status) Peers() ([]string, error) { + r := s.c.newRequest("GET", "/v1/status/peers") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var peers []string + if err := decodeBody(resp, &peers); err != nil { + return nil, err + } + return peers, nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/status_test.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/status_test.go new file mode 100644 index 000000000..62dc1550f --- /dev/null +++ b/Godeps/_workspace/src/github.com/hashicorp/consul/api/status_test.go @@ -0,0 +1,37 @@ +package api + +import ( + "testing" +) + +func TestStatusLeader(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + status := c.Status() + + leader, err := status.Leader() + if err != nil { + t.Fatalf("err: %v", err) + } + if leader == "" { + t.Fatalf("Expected leader") + } +} + +func TestStatusPeers(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + status := c.Status() + + peers, err := status.Peers() + if err != nil { + t.Fatalf("err: %v", err) + } + if len(peers) == 0 { + t.Fatalf("Expected peers ") + } +} From b0c181dc0db9c6ba041224d43cc90e7c71bd1927 Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Thu, 14 May 2015 13:32:11 +0200 Subject: [PATCH 26/27] Add Consul SD configuration. --- config/config.go | 42 ++++++++++++++++++++++++++++++++++++++ retrieval/targetmanager.go | 3 +++ 2 files changed, 45 insertions(+) diff --git a/config/config.go b/config/config.go index 5da03e2de..bb8e75221 100644 --- a/config/config.go +++ b/config/config.go @@ -78,6 +78,12 @@ var ( DefaultFileSDConfig = DefaultedFileSDConfig{ RefreshInterval: Duration(30 * time.Second), } + + // The default Consul SD configuration. + DefaultConsulSDConfig = DefaultedConsulSDConfig{ + TagSeparator: ",", + Scheme: "http", + } ) // Config is the top-level configuration for Prometheus's config files. @@ -200,6 +206,8 @@ type DefaultedScrapeConfig struct { DNSSDConfigs []*DNSSDConfig `yaml:"dns_sd_configs,omitempty"` // List of file service discovery configurations. FileSDConfigs []*FileSDConfig `yaml:"file_sd_configs,omitempty"` + // List of Consul service discovery configurations. + ConsulSDConfigs []*ConsulSDConfig `yaml:"consul_sd_configs,omitempty"` // List of relabel configurations. RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"` } @@ -340,6 +348,40 @@ type DefaultedFileSDConfig struct { RefreshInterval Duration `yaml:"refresh_interval,omitempty"` } +// ConsulSDConfig is the configuration for Consul service discovery. +type ConsulSDConfig struct { + // DefaultedConsulSDConfig contains the actual fields for ConsulSDConfig. + DefaultedConsulSDConfig `yaml:",inline"` +} + +// UnmarshalYAML implements the yaml.Unmarshaller interface. +func (c *ConsulSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + c.DefaultedConsulSDConfig = DefaultConsulSDConfig + err := unmarshal(&c.DefaultedConsulSDConfig) + if err != nil { + return err + } + if strings.TrimSpace(c.Server) == "" { + return fmt.Errorf("Consul SD configuration requires a server address") + } + if len(c.Services) == 0 { + return fmt.Errorf("Consul SD configuration requires at least one service name") + } + return nil +} + +// DefaultedConsulSDConfig is a proxy type for ConsulSDConfig. +type DefaultedConsulSDConfig struct { + Server string `yaml:"server"` + Token string `yaml:"token"` + Datacenter string `yaml:"datacenter"` + TagSeparator string `yaml:"tag_separator"` + Scheme string `yaml:"scheme"` + Username string `yaml:"username"` + Password string `yaml:"password"` + Services []string `yaml:"services"` +} + // RelabelAction is the action to be performed on relabeling. type RelabelAction string diff --git a/retrieval/targetmanager.go b/retrieval/targetmanager.go index 7763a5604..5ddc2892f 100644 --- a/retrieval/targetmanager.go +++ b/retrieval/targetmanager.go @@ -361,6 +361,9 @@ func ProvidersFromConfig(cfg *config.ScrapeConfig) []TargetProvider { for _, c := range cfg.FileSDConfigs { providers = append(providers, discovery.NewFileDiscovery(c)) } + for _, c := range cfg.ConsulSDConfigs { + providers = append(providers, discovery.NewConsulDiscovery(c)) + } if len(cfg.TargetGroups) > 0 { providers = append(providers, NewStaticProvider(cfg.TargetGroups)) } From 7143dff02fd1493e5b7859b665d9fafd452edf1c Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Thu, 14 May 2015 13:32:27 +0200 Subject: [PATCH 27/27] Add initial implementation for SD via Consul. This commit adds service discovery using Consul's HTTP API and watches (long polling) to retrieve target updates. --- retrieval/discovery/consul.go | 266 ++++++++++++++++++++++++++++++++++ 1 file changed, 266 insertions(+) create mode 100644 retrieval/discovery/consul.go diff --git a/retrieval/discovery/consul.go b/retrieval/discovery/consul.go new file mode 100644 index 000000000..94f7dbc56 --- /dev/null +++ b/retrieval/discovery/consul.go @@ -0,0 +1,266 @@ +package discovery + +import ( + "fmt" + "net/http" + "strings" + "sync" + "time" + + "github.com/golang/glog" + + consul "github.com/hashicorp/consul/api" + clientmodel "github.com/prometheus/client_golang/model" + + "github.com/prometheus/prometheus/config" +) + +const ( + consulSourcePrefix = "consul" + consulWatchTimeout = 30 * time.Second + consulRetryInterval = 15 * time.Second + + // ConsuleNodeLabel is the name for the label containing a target's node name. + ConsulNodeLabel = clientmodel.MetaLabelPrefix + "consul_node" + // ConsulTagsLabel is the name of the label containing the tags assigned to the target. + ConsulTagsLabel = clientmodel.MetaLabelPrefix + "consul_tags" + // ConsulServiceLabel is the name of the label containing the service name. + ConsulServiceLabel = clientmodel.MetaLabelPrefix + "consul_service" + // ConsulDCLabel is the name of the label containing the datacenter ID. + ConsulDCLabel = clientmodel.MetaLabelPrefix + "consul_dc" +) + +// ConsulDiscovery retrieves target information from a Consul server +// and updates them via watches. +type ConsulDiscovery struct { + client *consul.Client + clientConf *consul.Config + tagSeparator string + scrapedServices map[string]struct{} + + mu sync.RWMutex + services map[string]*consulService + runDone, srvsDone chan struct{} +} + +// consulService contains data belonging to the same service. +type consulService struct { + name string + tgroup *config.TargetGroup + lastIndex uint64 + removed bool + running bool + done chan struct{} +} + +// NewConsulDiscovery returns a new ConsulDiscovery for the given config. +func NewConsulDiscovery(conf *config.ConsulSDConfig) *ConsulDiscovery { + clientConf := &consul.Config{ + Address: conf.Server, + Scheme: conf.Scheme, + Datacenter: conf.Datacenter, + Token: conf.Token, + HttpAuth: &consul.HttpBasicAuth{ + Username: conf.Username, + Password: conf.Password, + }, + } + client, err := consul.NewClient(clientConf) + if err != nil { + // NewClient always returns a nil error. + panic(fmt.Errorf("discovery.NewConsulDiscovery: %s", err)) + } + cd := &ConsulDiscovery{ + client: client, + clientConf: clientConf, + tagSeparator: conf.TagSeparator, + runDone: make(chan struct{}), + srvsDone: make(chan struct{}, 1), + scrapedServices: map[string]struct{}{}, + services: map[string]*consulService{}, + } + for _, name := range conf.Services { + cd.scrapedServices[name] = struct{}{} + } + return cd +} + +// Sources implements the TargetProvider interface. +func (cd *ConsulDiscovery) Sources() []string { + clientConf := *cd.clientConf + clientConf.HttpClient = &http.Client{Timeout: 5 * time.Second} + + client, err := consul.NewClient(&clientConf) + if err != nil { + // NewClient always returns a nil error. + panic(fmt.Errorf("discovery.ConsulDiscovery.Sources: %s", err)) + } + + srvs, _, err := client.Catalog().Services(nil) + if err != nil { + glog.Errorf("Error refreshing service list: %s", err) + return nil + } + cd.mu.Lock() + defer cd.mu.Unlock() + + srcs := make([]string, 0, len(srvs)) + for name := range srvs { + if _, ok := cd.scrapedServices[name]; ok { + srcs = append(srcs, consulSourcePrefix+":"+name) + } + } + return srcs +} + +// Run implements the TargetProvider interface. +func (cd *ConsulDiscovery) Run(ch chan<- *config.TargetGroup) { + defer close(ch) + + update := make(chan *consulService, 10) + go cd.watchServices(update) + + for { + select { + case <-cd.runDone: + return + case srv := <-update: + if srv.removed { + ch <- &config.TargetGroup{Source: consulSourcePrefix + ":" + srv.name} + break + } + // Launch watcher for the service. + if !srv.running { + go cd.watchService(srv, ch) + srv.running = true + } + } + } +} + +// Stop implements the TargetProvider interface. +func (cd *ConsulDiscovery) Stop() { + glog.V(1).Infof("Stopping Consul service discovery for %s", cd.clientConf.Address) + + // The lock prevents Run from terminating while the watchers attempt + // to send on their channels. + cd.mu.Lock() + defer cd.mu.Unlock() + + // The watching goroutines will terminate after their next watch timeout. + // As this can take long, the channel is buffered and we do not wait. + for _, srv := range cd.services { + srv.done <- struct{}{} + } + cd.srvsDone <- struct{}{} + + // Terminate Run. + cd.runDone <- struct{}{} + + glog.V(1).Infof("Consul service discovery for %s stopped.", cd.clientConf.Address) +} + +// watchServices retrieves updates from Consul's services endpoint and sends +// potential updates to the update channel. +func (cd *ConsulDiscovery) watchServices(update chan<- *consulService) { + var lastIndex uint64 + for { + catalog := cd.client.Catalog() + srvs, meta, err := catalog.Services(&consul.QueryOptions{ + RequireConsistent: false, + WaitIndex: lastIndex, + }) + if err != nil { + glog.Errorf("Error refreshing service list: %s", err) + <-time.After(consulRetryInterval) + continue + } + // If the index equals the previous one, the watch timed out with no update. + if meta.LastIndex == lastIndex { + continue + } + lastIndex = meta.LastIndex + + cd.mu.Lock() + select { + case <-cd.srvsDone: + return + default: + // Continue. + } + // Check for new services. + for name := range srvs { + if _, ok := cd.scrapedServices[name]; !ok { + continue + } + srv, ok := cd.services[name] + if !ok { + srv = &consulService{ + name: name, + tgroup: &config.TargetGroup{}, + done: make(chan struct{}, 1), + } + srv.tgroup.Source = consulSourcePrefix + ":" + name + cd.services[name] = srv + } + srv.tgroup.Labels = clientmodel.LabelSet{ + ConsulServiceLabel: clientmodel.LabelValue(name), + ConsulDCLabel: clientmodel.LabelValue(cd.clientConf.Datacenter), + } + update <- srv + } + // Check for removed services. + for name, srv := range cd.services { + if _, ok := srvs[name]; !ok { + srv.removed = true + update <- srv + srv.done <- struct{}{} + delete(cd.services, name) + } + } + cd.mu.Unlock() + } +} + +// watchService retrieves updates about srv from Consul's service endpoint. +// On a potential update the resulting target group is sent to ch. +func (cd *ConsulDiscovery) watchService(srv *consulService, ch chan<- *config.TargetGroup) { + catalog := cd.client.Catalog() + for { + nodes, meta, err := catalog.Service(srv.name, "", &consul.QueryOptions{ + WaitIndex: srv.lastIndex, + WaitTime: consulWatchTimeout, + }) + if err != nil { + glog.Errorf("Error refreshing service %s: %s", srv.name, err) + <-time.After(consulRetryInterval) + continue + } + // If the index equals the previous one, the watch timed out with no update. + if meta.LastIndex == srv.lastIndex { + continue + } + srv.lastIndex = meta.LastIndex + srv.tgroup.Targets = make([]clientmodel.LabelSet, 0, len(nodes)) + + for _, node := range nodes { + addr := fmt.Sprintf("%s:%d", node.Address, node.ServicePort) + tags := strings.Join(node.ServiceTags, cd.tagSeparator) + + srv.tgroup.Targets = append(srv.tgroup.Targets, clientmodel.LabelSet{ + clientmodel.AddressLabel: clientmodel.LabelValue(addr), + ConsulNodeLabel: clientmodel.LabelValue(node.Node), + ConsulTagsLabel: clientmodel.LabelValue(tags), + }) + } + cd.mu.Lock() + select { + case <-srv.done: + return + default: + // Continue. + } + ch <- srv.tgroup + cd.mu.Unlock() + } +}