Yaml UnmarshalStrict implementation. (#4033)

* Updated yaml vendor package.

* remove checkOverflow duplicate in rulefmt

* remove duplicated HTTPClientConfig.Validate()

* Added yaml static check.
This commit is contained in:
Manos Fokas 2018-04-04 11:07:39 +03:00 committed by Brian Brazil
parent 406233e937
commit 25f929b772
36 changed files with 813 additions and 693 deletions

View file

@ -25,7 +25,6 @@ import (
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
sd_config "github.com/prometheus/prometheus/discovery/config"
yaml_util "github.com/prometheus/prometheus/util/yaml"
"gopkg.in/yaml.v2"
)
@ -42,7 +41,7 @@ func Load(s string) (*Config, error) {
// point as well.
*cfg = DefaultConfig
err := yaml.Unmarshal([]byte(s), cfg)
err := yaml.UnmarshalStrict([]byte(s), cfg)
if err != nil {
return nil, err
}
@ -141,9 +140,6 @@ type Config struct {
RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"`
RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
// original is the input from which the config was parsed.
original string
}
@ -221,9 +217,7 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err := unmarshal((*plain)(c)); err != nil {
return err
}
if err := yaml_util.CheckOverflow(c.XXX, "config"); err != nil {
return err
}
// If a global block was open but empty the default global config is overwritten.
// We have to restore it here.
if c.GlobalConfig.isZero() {
@ -273,9 +267,6 @@ type GlobalConfig struct {
EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"`
// The labels to add to any timeseries that this Prometheus instance scrapes.
ExternalLabels model.LabelSet `yaml:"external_labels,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -287,9 +278,7 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err := unmarshal((*plain)(gc)); err != nil {
return err
}
if err := yaml_util.CheckOverflow(gc.XXX, "global config"); err != nil {
return err
}
// First set the correct scrape interval, then check that the timeout
// (inferred or explicit) is not greater than that.
if gc.ScrapeInterval == 0 {
@ -349,9 +338,6 @@ type ScrapeConfig struct {
RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"`
// List of metric relabel configurations.
MetricRelabelConfigs []*RelabelConfig `yaml:"metric_relabel_configs,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -362,9 +348,6 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err != nil {
return err
}
if err = yaml_util.CheckOverflow(c.XXX, "scrape_config"); err != nil {
return err
}
if len(c.JobName) == 0 {
return fmt.Errorf("job_name is empty")
}
@ -372,7 +355,7 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
// The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
// We cannot make it a pointer as the parser panics for inlined pointer structs.
// Thus we just do its validation here.
if err = c.HTTPClientConfig.Validate(); err != nil {
if err := c.HTTPClientConfig.Validate(); err != nil {
return err
}
@ -380,7 +363,7 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if len(c.RelabelConfigs) == 0 {
for _, tg := range c.ServiceDiscoveryConfig.StaticConfigs {
for _, t := range tg.Targets {
if err = CheckTargetAddress(t[model.AddressLabel]); err != nil {
if err := CheckTargetAddress(t[model.AddressLabel]); err != nil {
return err
}
}
@ -393,9 +376,6 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
type AlertingConfig struct {
AlertRelabelConfigs []*RelabelConfig `yaml:"alert_relabel_configs,omitempty"`
AlertmanagerConfigs []*AlertmanagerConfig `yaml:"alertmanagers,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -404,10 +384,7 @@ func (c *AlertingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error
// by the default due to the YAML parser behavior for empty blocks.
*c = AlertingConfig{}
type plain AlertingConfig
if err := unmarshal((*plain)(c)); err != nil {
return err
}
return yaml_util.CheckOverflow(c.XXX, "alerting config")
return unmarshal((*plain)(c))
}
// AlertmanagerConfig configures how Alertmanagers can be discovered and communicated with.
@ -427,9 +404,6 @@ type AlertmanagerConfig struct {
// List of Alertmanager relabel configurations.
RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -439,9 +413,6 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
if err := unmarshal((*plain)(c)); err != nil {
return err
}
if err := yaml_util.CheckOverflow(c.XXX, "alertmanager config"); err != nil {
return err
}
// The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
// We cannot make it a pointer as the parser panics for inlined pointer structs.
@ -476,18 +447,12 @@ func CheckTargetAddress(address model.LabelValue) error {
type ClientCert struct {
Cert string `yaml:"cert"`
Key config_util.Secret `yaml:"key"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// FileSDConfig is the configuration for file based discovery.
type FileSDConfig struct {
Files []string `yaml:"files"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// RelabelAction is the action to be performed on relabeling.
@ -542,9 +507,6 @@ type RelabelConfig struct {
Replacement string `yaml:"replacement,omitempty"`
// Action is the action to be performed for the relabeling.
Action RelabelAction `yaml:"action,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -554,9 +516,6 @@ func (c *RelabelConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err := unmarshal((*plain)(c)); err != nil {
return err
}
if err := yaml_util.CheckOverflow(c.XXX, "relabel_config"); err != nil {
return err
}
if c.Regex.Regexp == nil {
c.Regex = MustNewRegexp("")
}
@ -646,9 +605,6 @@ type RemoteWriteConfig struct {
// values arbitrarily into the overflow maps of further-down types.
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
QueueConfig QueueConfig `yaml:"queue_config,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -665,11 +621,7 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
// The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
// We cannot make it a pointer as the parser panics for inlined pointer structs.
// Thus we just do its validation here.
if err := c.HTTPClientConfig.Validate(); err != nil {
return err
}
return yaml_util.CheckOverflow(c.XXX, "remote_write")
return c.HTTPClientConfig.Validate()
}
// QueueConfig is the configuration for the queue used to write to remote
@ -707,9 +659,6 @@ type RemoteReadConfig struct {
// RequiredMatchers is an optional list of equality matchers which have to
// be present in a selector to query the remote read endpoint.
RequiredMatchers model.LabelSet `yaml:"required_matchers,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -722,13 +671,8 @@ func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) erro
if c.URL == nil {
return fmt.Errorf("url for remote_read is empty")
}
// The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
// We cannot make it a pointer as the parser panics for inlined pointer structs.
// Thus we just do its validation here.
if err := c.HTTPClientConfig.Validate(); err != nil {
return err
}
return yaml_util.CheckOverflow(c.XXX, "remote_read")
return c.HTTPClientConfig.Validate()
}

View file

@ -657,7 +657,7 @@ var expectedErrors = []struct {
errMsg: "invalid rule file path",
}, {
filename: "unknown_attr.bad.yml",
errMsg: "unknown fields in scrape_config: consult_sd_configs",
errMsg: "field consult_sd_configs not found in type config.plain",
}, {
filename: "bearertoken.bad.yml",
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
@ -672,7 +672,7 @@ var expectedErrors = []struct {
errMsg: "role",
}, {
filename: "kubernetes_namespace_discovery.bad.yml",
errMsg: "unknown fields in namespaces",
errMsg: "field foo not found in type kubernetes.plain",
}, {
filename: "kubernetes_bearertoken_basicauth.bad.yml",
errMsg: "at most one of basic_auth, bearer_token & bearer_token_file must be configured",
@ -690,7 +690,7 @@ var expectedErrors = []struct {
errMsg: "relabel configuration for hashmod action requires 'target_label' value",
}, {
filename: "unknown_global_attr.bad.yml",
errMsg: "unknown fields in global config: nonexistent_field",
errMsg: "field nonexistent_field not found in type config.plain",
}, {
filename: "remote_read_url_missing.bad.yml",
errMsg: `url for remote_read is empty`,
@ -702,6 +702,10 @@ var expectedErrors = []struct {
filename: "ec2_filters_empty_values.bad.yml",
errMsg: `EC2 SD configuration filter values cannot be empty`,
},
{
filename: "section_key_dup.bad.yml",
errMsg: "field scrape_configs already set in type config.plain",
},
}
func TestBadConfigs(t *testing.T) {
@ -713,7 +717,7 @@ func TestBadConfigs(t *testing.T) {
}
}
func TestBadStaticConfigs(t *testing.T) {
func TestBadStaticConfigsJSON(t *testing.T) {
content, err := ioutil.ReadFile("testdata/static_config.bad.json")
testutil.Ok(t, err)
var tg targetgroup.Group
@ -721,6 +725,14 @@ func TestBadStaticConfigs(t *testing.T) {
testutil.NotOk(t, err, "")
}
func TestBadStaticConfigsYML(t *testing.T) {
content, err := ioutil.ReadFile("testdata/static_config.bad.yml")
testutil.Ok(t, err)
var tg targetgroup.Group
err = yaml.UnmarshalStrict(content, &tg)
testutil.NotOk(t, err, "")
}
func TestEmptyConfig(t *testing.T) {
c, err := Load("")
testutil.Ok(t, err)

View file

@ -0,0 +1,5 @@
scrape_configs:
- job_name: 'prometheus_system'
scrape_configs:
- job_name: 'foo_system'

4
config/testdata/static_config.bad.yml vendored Normal file
View file

@ -0,0 +1,4 @@
targets: ['1.2.3.4:9001', '1.2.3.5:9090']
labels:
valid_label: foo
not:valid_label: bar

View file

@ -32,7 +32,6 @@ import (
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/strutil"
yaml_util "github.com/prometheus/prometheus/util/yaml"
)
const (
@ -73,9 +72,6 @@ type SDConfig struct {
ClientID string `yaml:"client_id,omitempty"`
ClientSecret config_util.Secret `yaml:"client_secret,omitempty"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -89,8 +85,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if c.SubscriptionID == "" {
return fmt.Errorf("Azure SD configuration requires a subscription_id")
}
return yaml_util.CheckOverflow(c.XXX, "azure_sd_config")
return nil
}
func init() {

View file

@ -26,8 +26,6 @@ import (
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/discovery/triton"
"github.com/prometheus/prometheus/discovery/zookeeper"
yaml_util "github.com/prometheus/prometheus/util/yaml"
)
// ServiceDiscoveryConfig configures lists of different service discovery mechanisms.
@ -58,16 +56,10 @@ type ServiceDiscoveryConfig struct {
AzureSDConfigs []*azure.SDConfig `yaml:"azure_sd_configs,omitempty"`
// List of Triton service discovery configurations.
TritonSDConfigs []*triton.SDConfig `yaml:"triton_sd_configs,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *ServiceDiscoveryConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
type plain ServiceDiscoveryConfig
if err := unmarshal((*plain)(c)); err != nil {
return err
}
return yaml_util.CheckOverflow(c.XXX, "service discovery config")
return unmarshal((*plain)(c))
}

View file

@ -32,7 +32,6 @@ import (
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/httputil"
"github.com/prometheus/prometheus/util/strutil"
yaml_util "github.com/prometheus/prometheus/util/yaml"
)
const (
@ -118,8 +117,6 @@ type SDConfig struct {
NodeMeta map[string]string `yaml:"node_meta,omitempty"`
TLSConfig config_util.TLSConfig `yaml:"tls_config,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -130,9 +127,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err != nil {
return err
}
if err := yaml_util.CheckOverflow(c.XXX, "consul_sd_config"); err != nil {
return err
}
if strings.TrimSpace(c.Server) == "" {
return fmt.Errorf("Consul SD configuration requires a server address")
}

View file

@ -27,7 +27,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery/targetgroup"
yaml_util "github.com/prometheus/prometheus/util/yaml"
)
const (
@ -66,8 +65,6 @@ type SDConfig struct {
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
Type string `yaml:"type"`
Port int `yaml:"port"` // Ignored for SRV records
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -78,9 +75,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err != nil {
return err
}
if err := yaml_util.CheckOverflow(c.XXX, "dns_sd_config"); err != nil {
return err
}
if len(c.Names) == 0 {
return fmt.Errorf("DNS-SD config must contain at least one SRV record name")
}

View file

@ -34,7 +34,6 @@ import (
config_util "github.com/prometheus/common/config"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/strutil"
yaml_util "github.com/prometheus/prometheus/util/yaml"
)
const (
@ -86,9 +85,6 @@ type SDConfig struct {
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
Port int `yaml:"port"`
Filters []*Filter `yaml:"filters"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -99,9 +95,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err != nil {
return err
}
if err := yaml_util.CheckOverflow(c.XXX, "ec2_sd_config"); err != nil {
return err
}
if c.Region == "" {
sess, err := session.NewSession()
if err != nil {

View file

@ -31,7 +31,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery/targetgroup"
yaml_util "github.com/prometheus/prometheus/util/yaml"
"gopkg.in/fsnotify/fsnotify.v1"
"gopkg.in/yaml.v2"
)
@ -49,9 +48,6 @@ var (
type SDConfig struct {
Files []string `yaml:"files"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -62,9 +58,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err != nil {
return err
}
if err := yaml_util.CheckOverflow(c.XXX, "file_sd_config"); err != nil {
return err
}
if len(c.Files) == 0 {
return fmt.Errorf("file service discovery config must contain at least one path name")
}
@ -385,7 +378,7 @@ func (d *Discovery) readFile(filename string) ([]*targetgroup.Group, error) {
return nil, err
}
case ".yml", ".yaml":
if err := yaml.Unmarshal(content, &targetGroups); err != nil {
if err := yaml.UnmarshalStrict(content, &targetGroups); err != nil {
return nil, err
}
default:

View file

@ -30,7 +30,6 @@ import (
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/strutil"
yaml_util "github.com/prometheus/prometheus/util/yaml"
)
const (
@ -84,9 +83,6 @@ type SDConfig struct {
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
Port int `yaml:"port"`
TagSeparator string `yaml:"tag_separator,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -97,9 +93,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err != nil {
return err
}
if err := yaml_util.CheckOverflow(c.XXX, "gce_sd_config"); err != nil {
return err
}
if c.Project == "" {
return fmt.Errorf("GCE SD configuration requires a project")
}

View file

@ -26,7 +26,6 @@ import (
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery/targetgroup"
yaml_util "github.com/prometheus/prometheus/util/yaml"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api"
@ -89,9 +88,6 @@ type SDConfig struct {
BearerTokenFile string `yaml:"bearer_token_file,omitempty"`
TLSConfig config_util.TLSConfig `yaml:"tls_config,omitempty"`
NamespaceDiscovery NamespaceDiscovery `yaml:"namespaces"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -102,9 +98,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err != nil {
return err
}
if err := yaml_util.CheckOverflow(c.XXX, "kubernetes_sd_config"); err != nil {
return err
}
if c.Role == "" {
return fmt.Errorf("role missing (one of: pod, service, endpoints, node)")
}
@ -126,19 +119,13 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
// Kubernetes namespaces.
type NamespaceDiscovery struct {
Names []string `yaml:"names"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *NamespaceDiscovery) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = NamespaceDiscovery{}
type plain NamespaceDiscovery
err := unmarshal((*plain)(c))
if err != nil {
return err
}
return yaml_util.CheckOverflow(c.XXX, "namespaces")
return unmarshal((*plain)(c))
}
func init() {

View file

@ -736,7 +736,7 @@ scrape_configs:
- targets: ["foo:9090"]
- targets: ["bar:9090"]
`
if err := yaml.Unmarshal([]byte(sOne), cfg); err != nil {
if err := yaml.UnmarshalStrict([]byte(sOne), cfg); err != nil {
t.Fatalf("Unable to load YAML config sOne: %s", err)
}
ctx, cancel := context.WithCancel(context.Background())
@ -760,7 +760,7 @@ scrape_configs:
static_configs:
- targets: ["foo:9090"]
`
if err := yaml.Unmarshal([]byte(sTwo), cfg); err != nil {
if err := yaml.UnmarshalStrict([]byte(sTwo), cfg); err != nil {
t.Fatalf("Unable to load YAML config sOne: %s", err)
}
c = make(map[string]sd_config.ServiceDiscoveryConfig)

View file

@ -34,7 +34,6 @@ import (
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/httputil"
"github.com/prometheus/prometheus/util/strutil"
yaml_util "github.com/prometheus/prometheus/util/yaml"
)
const (
@ -90,9 +89,6 @@ type SDConfig struct {
TLSConfig config_util.TLSConfig `yaml:"tls_config,omitempty"`
BearerToken config_util.Secret `yaml:"bearer_token,omitempty"`
BearerTokenFile string `yaml:"bearer_token_file,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -103,9 +99,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err != nil {
return err
}
if err := yaml_util.CheckOverflow(c.XXX, "marathon_sd_config"); err != nil {
return err
}
if len(c.Servers) == 0 {
return fmt.Errorf("Marathon SD config must contain at least one Marathon server")
}

View file

@ -26,7 +26,6 @@ import (
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery/targetgroup"
yaml_util "github.com/prometheus/prometheus/util/yaml"
)
var (
@ -61,9 +60,6 @@ type SDConfig struct {
Region string `yaml:"region"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
Port int `yaml:"port"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// OpenStackRole is role of the target in OpenStack.
@ -106,7 +102,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if c.Region == "" {
return fmt.Errorf("Openstack SD configuration requires a region")
}
return yaml_util.CheckOverflow(c.XXX, "openstack_sd_config")
return nil
}
func init() {

View file

@ -18,7 +18,6 @@ import (
"encoding/json"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/util/yaml"
)
// Group is a set of targets with a common label set(production , test, staging etc.).
@ -40,9 +39,8 @@ func (tg Group) String() string {
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (tg *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
g := struct {
Targets []string `yaml:"targets"`
Labels model.LabelSet `yaml:"labels"`
XXX map[string]interface{} `yaml:",inline"`
Targets []string `yaml:"targets"`
Labels model.LabelSet `yaml:"labels"`
}{}
if err := unmarshal(&g); err != nil {
return err
@ -54,7 +52,7 @@ func (tg *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
})
}
tg.Labels = g.Labels
return yaml.CheckOverflow(g.XXX, "static_config")
return nil
}
// MarshalYAML implements the yaml.Marshaler interface.

View file

@ -30,7 +30,6 @@ import (
config_util "github.com/prometheus/common/config"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/httputil"
yaml_util "github.com/prometheus/prometheus/util/yaml"
)
const (
@ -71,8 +70,6 @@ type SDConfig struct {
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
TLSConfig config_util.TLSConfig `yaml:"tls_config,omitempty"`
Version int `yaml:"version"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -95,7 +92,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if c.RefreshInterval <= 0 {
return fmt.Errorf("Triton SD configuration requires RefreshInterval to be a positive integer")
}
return yaml_util.CheckOverflow(c.XXX, "triton_sd_config")
return nil
}
func init() {

View file

@ -29,7 +29,6 @@ import (
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/strutil"
"github.com/prometheus/prometheus/util/treecache"
yaml_util "github.com/prometheus/prometheus/util/yaml"
)
var (
@ -48,9 +47,6 @@ type ServersetSDConfig struct {
Servers []string `yaml:"servers"`
Paths []string `yaml:"paths"`
Timeout model.Duration `yaml:"timeout,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -61,9 +57,6 @@ func (c *ServersetSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
if err != nil {
return err
}
if err := yaml_util.CheckOverflow(c.XXX, "serverset_sd_config"); err != nil {
return err
}
if len(c.Servers) == 0 {
return fmt.Errorf("serverset SD config must contain at least one Zookeeper server")
}
@ -83,9 +76,6 @@ type NerveSDConfig struct {
Servers []string `yaml:"servers"`
Paths []string `yaml:"paths"`
Timeout model.Duration `yaml:"timeout,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -96,9 +86,6 @@ func (c *NerveSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err != nil {
return err
}
if err := yaml_util.CheckOverflow(c.XXX, "nerve_sd_config"); err != nil {
return err
}
if len(c.Servers) == 0 {
return fmt.Errorf("nerve SD config must contain at least one Zookeeper server")
}

View file

@ -442,7 +442,7 @@ alerting:
alertmanagers:
- static_configs:
`
if err := yaml.Unmarshal([]byte(s), cfg); err != nil {
if err := yaml.UnmarshalStrict([]byte(s), cfg); err != nil {
t.Fatalf("Unable to load YAML config: %s", err)
}
@ -497,7 +497,7 @@ alerting:
regex: 'alertmanager:9093'
action: drop
`
if err := yaml.Unmarshal([]byte(s), cfg); err != nil {
if err := yaml.UnmarshalStrict([]byte(s), cfg); err != nil {
t.Fatalf("Unable to load YAML config: %s", err)
}

View file

@ -14,9 +14,7 @@
package rulefmt
import (
"fmt"
"io/ioutil"
"strings"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
@ -39,9 +37,6 @@ func (err *Error) Error() string {
// RuleGroups is a set of rule groups that are typically exposed in a file.
type RuleGroups struct {
Groups []RuleGroup `yaml:"groups"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// Validate validates all rules in the rule groups.
@ -60,10 +55,6 @@ func (g *RuleGroups) Validate() (errs []error) {
)
}
if err := checkOverflow(g.XXX, "rule_group"); err != nil {
errs = append(errs, errors.Wrapf(err, "Group: %s", g.Name))
}
set[g.Name] = struct{}{}
for i, r := range g.Rules {
@ -84,10 +75,6 @@ func (g *RuleGroups) Validate() (errs []error) {
}
}
if err := checkOverflow(g.XXX, "config_file"); err != nil {
errs = append(errs, err)
}
return errs
}
@ -96,9 +83,6 @@ type RuleGroup struct {
Name string `yaml:"name"`
Interval model.Duration `yaml:"interval,omitempty"`
Rules []Rule `yaml:"rules"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// Rule describes an alerting or recording rule.
@ -109,9 +93,6 @@ type Rule struct {
For model.Duration `yaml:"for,omitempty"`
Labels map[string]string `yaml:"labels,omitempty"`
Annotations map[string]string `yaml:"annotations,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// Validate the rule and return a list of encountered errors.
@ -156,28 +137,13 @@ func (r *Rule) Validate() (errs []error) {
}
}
if err := checkOverflow(r.XXX, "rule"); err != nil {
errs = append(errs, err)
}
return errs
}
func checkOverflow(m map[string]interface{}, ctx string) error {
if len(m) > 0 {
var keys []string
for k := range m {
keys = append(keys, k)
}
return fmt.Errorf("unknown fields in %s: %s", ctx, strings.Join(keys, ", "))
}
return nil
}
// Parse parses and validates a set of rules.
func Parse(content []byte) (*RuleGroups, []error) {
var groups RuleGroups
if err := yaml.Unmarshal(content, &groups); err != nil {
if err := yaml.UnmarshalStrict(content, &groups); err != nil {
return nil, []error{err}
}
return &groups, groups.Validate()

View file

@ -1,31 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yaml
import (
"fmt"
"strings"
)
// CheckOverflow checks for unknown config params and raises an error if found
func CheckOverflow(m map[string]interface{}, ctx string) error {
if len(m) > 0 {
var keys []string
for k := range m {
keys = append(keys, k)
}
return fmt.Errorf("unknown fields in %s: %s", ctx, strings.Join(keys, ", "))
}
return nil
}

325
vendor/gopkg.in/yaml.v2/LICENSE generated vendored
View file

@ -1,188 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
Copyright (c) 2011-2014 - Canonical Inc.
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
This software is licensed under the LGPLv3, included below.
1. Definitions.
As a special exception to the GNU Lesser General Public License version 3
("LGPL3"), the copyright holders of this Library give you permission to
convey to a third party a Combined Work that links statically or dynamically
to this Library without providing any Minimal Corresponding Source or
Minimal Application Code as set out in 4d or providing the installation
information set out in section 4e, provided that you comply with the other
provisions of LGPL3 and provided that you meet, for the Application the
terms and conditions of the license(s) which apply to the Application.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
Except as stated in this special exception, the provisions of LGPL3 will
continue to comply in full to this Library. If you modify this Library, you
may apply this exception to your version of this Library, but you are not
obliged to do so. If you do not wish to do so, delete this exception
statement from your version. This exception does not (and cannot) modify any
license terms which apply to the Application, with which you must still
comply.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
0. Additional Definitions.
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
1. Exception to Section 3 of the GNU GPL.
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
2. Conveying Modified Versions.
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
3. Object Code Incorporating Material from Library Header Files.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
END OF TERMS AND CONDITIONS
4. Combined Works.
APPENDIX: How to apply the Apache License to your work.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
Copyright {yyyy} {name of copyright owner}
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
http://www.apache.org/licenses/LICENSE-2.0
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

9
vendor/gopkg.in/yaml.v2/README.md generated vendored
View file

@ -42,7 +42,7 @@ The package API for yaml v2 will remain stable as described in [gopkg.in](https:
License
-------
The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
Example
@ -65,9 +65,14 @@ b:
d: [3, 4]
`
// Note: struct fields must be public in order for unmarshal to
// correctly populate the data.
type T struct {
A string
B struct{C int; D []int ",flow"}
B struct {
RenamedC int `yaml:"c"`
D []int `yaml:",flow"`
}
}
func main() {

53
vendor/gopkg.in/yaml.v2/apic.go generated vendored
View file

@ -2,7 +2,6 @@ package yaml
import (
"io"
"os"
)
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
@ -48,9 +47,9 @@ func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err
return n, nil
}
// File read handler.
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
return parser.input_file.Read(buffer)
// Reader read handler.
func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
return parser.input_reader.Read(buffer)
}
// Set a string input.
@ -64,12 +63,12 @@ func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
}
// Set a file input.
func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_file_read_handler
parser.input_file = file
parser.read_handler = yaml_reader_read_handler
parser.input_reader = r
}
// Set the source encoding.
@ -81,14 +80,13 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
}
// Create a new emitter object.
func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{
buffer: make([]byte, output_buffer_size),
raw_buffer: make([]byte, 0, output_raw_buffer_size),
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
events: make([]yaml_event_t, 0, initial_queue_size),
}
return true
}
// Destroy an emitter object.
@ -102,9 +100,10 @@ func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
return nil
}
// File write handler.
func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_file.Write(buffer)
// yaml_writer_write_handler uses emitter.output_writer to write the
// emitted text.
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_writer.Write(buffer)
return err
}
@ -118,12 +117,12 @@ func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]by
}
// Set a file output.
func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_file_write_handler
emitter.output_file = file
emitter.write_handler = yaml_writer_write_handler
emitter.output_writer = w
}
// Set the output encoding.
@ -252,41 +251,41 @@ func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
//
// Create STREAM-START.
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
*event = yaml_event_t{
typ: yaml_STREAM_START_EVENT,
encoding: encoding,
}
return true
}
// Create STREAM-END.
func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
func yaml_stream_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
typ: yaml_STREAM_END_EVENT,
}
return true
}
// Create DOCUMENT-START.
func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t, implicit bool) bool {
func yaml_document_start_event_initialize(
event *yaml_event_t,
version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t,
implicit bool,
) {
*event = yaml_event_t{
typ: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
return true
}
// Create DOCUMENT-END.
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
*event = yaml_event_t{
typ: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
return true
}
///*
@ -348,7 +347,7 @@ func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
}
// Create MAPPING-START.
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
*event = yaml_event_t{
typ: yaml_MAPPING_START_EVENT,
anchor: anchor,
@ -356,15 +355,13 @@ func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create MAPPING-END.
func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
typ: yaml_MAPPING_END_EVENT,
}
return true
}
// Destroy an event object.

250
vendor/gopkg.in/yaml.v2/decode.go generated vendored
View file

@ -4,6 +4,7 @@ import (
"encoding"
"encoding/base64"
"fmt"
"io"
"math"
"reflect"
"strconv"
@ -22,19 +23,22 @@ type node struct {
kind int
line, column int
tag string
value string
implicit bool
children []*node
anchors map[string]*node
// For an alias node, alias holds the resolved alias.
alias *node
value string
implicit bool
children []*node
anchors map[string]*node
}
// ----------------------------------------------------------------------------
// Parser, produces a node tree out of a libyaml event stream.
type parser struct {
parser yaml_parser_t
event yaml_event_t
doc *node
parser yaml_parser_t
event yaml_event_t
doc *node
doneInit bool
}
func newParser(b []byte) *parser {
@ -42,21 +46,30 @@ func newParser(b []byte) *parser {
if !yaml_parser_initialize(&p.parser) {
panic("failed to initialize YAML emitter")
}
if len(b) == 0 {
b = []byte{'\n'}
}
yaml_parser_set_input_string(&p.parser, b)
p.skip()
if p.event.typ != yaml_STREAM_START_EVENT {
panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
}
p.skip()
return &p
}
func newParserFromReader(r io.Reader) *parser {
p := parser{}
if !yaml_parser_initialize(&p.parser) {
panic("failed to initialize YAML emitter")
}
yaml_parser_set_input_reader(&p.parser, r)
return &p
}
func (p *parser) init() {
if p.doneInit {
return
}
p.expect(yaml_STREAM_START_EVENT)
p.doneInit = true
}
func (p *parser) destroy() {
if p.event.typ != yaml_NO_EVENT {
yaml_event_delete(&p.event)
@ -64,16 +77,35 @@ func (p *parser) destroy() {
yaml_parser_delete(&p.parser)
}
func (p *parser) skip() {
if p.event.typ != yaml_NO_EVENT {
if p.event.typ == yaml_STREAM_END_EVENT {
failf("attempted to go past the end of stream; corrupted value?")
// expect consumes an event from the event stream and
// checks that it's of the expected type.
func (p *parser) expect(e yaml_event_type_t) {
if p.event.typ == yaml_NO_EVENT {
if !yaml_parser_parse(&p.parser, &p.event) {
p.fail()
}
yaml_event_delete(&p.event)
}
if p.event.typ == yaml_STREAM_END_EVENT {
failf("attempted to go past the end of stream; corrupted value?")
}
if p.event.typ != e {
p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
p.fail()
}
yaml_event_delete(&p.event)
p.event.typ = yaml_NO_EVENT
}
// peek peeks at the next event in the event stream,
// puts the results into p.event and returns the event type.
func (p *parser) peek() yaml_event_type_t {
if p.event.typ != yaml_NO_EVENT {
return p.event.typ
}
if !yaml_parser_parse(&p.parser, &p.event) {
p.fail()
}
return p.event.typ
}
func (p *parser) fail() {
@ -81,6 +113,10 @@ func (p *parser) fail() {
var line int
if p.parser.problem_mark.line != 0 {
line = p.parser.problem_mark.line
// Scanner errors don't iterate line before returning error
if p.parser.error == yaml_SCANNER_ERROR {
line++
}
} else if p.parser.context_mark.line != 0 {
line = p.parser.context_mark.line
}
@ -103,7 +139,8 @@ func (p *parser) anchor(n *node, anchor []byte) {
}
func (p *parser) parse() *node {
switch p.event.typ {
p.init()
switch p.peek() {
case yaml_SCALAR_EVENT:
return p.scalar()
case yaml_ALIAS_EVENT:
@ -118,9 +155,8 @@ func (p *parser) parse() *node {
// Happens when attempting to decode an empty buffer.
return nil
default:
panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
panic("attempted to parse unknown event: " + p.event.typ.String())
}
panic("unreachable")
}
func (p *parser) node(kind int) *node {
@ -135,19 +171,20 @@ func (p *parser) document() *node {
n := p.node(documentNode)
n.anchors = make(map[string]*node)
p.doc = n
p.skip()
p.expect(yaml_DOCUMENT_START_EVENT)
n.children = append(n.children, p.parse())
if p.event.typ != yaml_DOCUMENT_END_EVENT {
panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
}
p.skip()
p.expect(yaml_DOCUMENT_END_EVENT)
return n
}
func (p *parser) alias() *node {
n := p.node(aliasNode)
n.value = string(p.event.anchor)
p.skip()
n.alias = p.doc.anchors[n.value]
if n.alias == nil {
failf("unknown anchor '%s' referenced", n.value)
}
p.expect(yaml_ALIAS_EVENT)
return n
}
@ -157,29 +194,29 @@ func (p *parser) scalar() *node {
n.tag = string(p.event.tag)
n.implicit = p.event.implicit
p.anchor(n, p.event.anchor)
p.skip()
p.expect(yaml_SCALAR_EVENT)
return n
}
func (p *parser) sequence() *node {
n := p.node(sequenceNode)
p.anchor(n, p.event.anchor)
p.skip()
for p.event.typ != yaml_SEQUENCE_END_EVENT {
p.expect(yaml_SEQUENCE_START_EVENT)
for p.peek() != yaml_SEQUENCE_END_EVENT {
n.children = append(n.children, p.parse())
}
p.skip()
p.expect(yaml_SEQUENCE_END_EVENT)
return n
}
func (p *parser) mapping() *node {
n := p.node(mappingNode)
p.anchor(n, p.event.anchor)
p.skip()
for p.event.typ != yaml_MAPPING_END_EVENT {
p.expect(yaml_MAPPING_START_EVENT)
for p.peek() != yaml_MAPPING_END_EVENT {
n.children = append(n.children, p.parse(), p.parse())
}
p.skip()
p.expect(yaml_MAPPING_END_EVENT)
return n
}
@ -188,9 +225,10 @@ func (p *parser) mapping() *node {
type decoder struct {
doc *node
aliases map[string]bool
aliases map[*node]bool
mapType reflect.Type
terrors []string
strict bool
}
var (
@ -198,11 +236,13 @@ var (
durationType = reflect.TypeOf(time.Duration(0))
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
ifaceType = defaultMapType.Elem()
timeType = reflect.TypeOf(time.Time{})
ptrTimeType = reflect.TypeOf(&time.Time{})
)
func newDecoder() *decoder {
d := &decoder{mapType: defaultMapType}
d.aliases = make(map[string]bool)
func newDecoder(strict bool) *decoder {
d := &decoder{mapType: defaultMapType, strict: strict}
d.aliases = make(map[*node]bool)
return d
}
@ -251,7 +291,7 @@ func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
//
// If n holds a null value, prepare returns before doing anything.
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
return out, false, false
}
again := true
@ -308,16 +348,13 @@ func (d *decoder) document(n *node, out reflect.Value) (good bool) {
}
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
an, ok := d.doc.anchors[n.value]
if !ok {
failf("unknown anchor '%s' referenced", n.value)
}
if d.aliases[n.value] {
if d.aliases[n] {
// TODO this could actually be allowed in some circumstances.
failf("anchor '%s' value contains itself", n.value)
}
d.aliases[n.value] = true
good = d.unmarshal(an, out)
delete(d.aliases, n.value)
d.aliases[n] = true
good = d.unmarshal(n.alias, out)
delete(d.aliases, n)
return good
}
@ -329,7 +366,7 @@ func resetMap(out reflect.Value) {
}
}
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
func (d *decoder) scalar(n *node, out reflect.Value) bool {
var tag string
var resolved interface{}
if n.tag == "" && !n.implicit {
@ -353,9 +390,26 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
}
return true
}
if s, ok := resolved.(string); ok && out.CanAddr() {
if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
err := u.UnmarshalText([]byte(s))
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
// We've resolved to exactly the type we want, so use that.
out.Set(resolvedv)
return true
}
// Perhaps we can use the value as a TextUnmarshaler to
// set its value.
if out.CanAddr() {
u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
if ok {
var text []byte
if tag == yaml_BINARY_TAG {
text = []byte(resolved.(string))
} else {
// We let any value be unmarshaled into TextUnmarshaler.
// That might be more lax than we'd like, but the
// TextUnmarshaler itself should bowl out any dubious values.
text = []byte(n.value)
}
err := u.UnmarshalText(text)
if err != nil {
fail(err)
}
@ -366,46 +420,54 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
case reflect.String:
if tag == yaml_BINARY_TAG {
out.SetString(resolved.(string))
good = true
} else if resolved != nil {
return true
}
if resolved != nil {
out.SetString(n.value)
good = true
return true
}
case reflect.Interface:
if resolved == nil {
out.Set(reflect.Zero(out.Type()))
} else if tag == yaml_TIMESTAMP_TAG {
// It looks like a timestamp but for backward compatibility
// reasons we set it as a string, so that code that unmarshals
// timestamp-like values into interface{} will continue to
// see a string and not a time.Time.
// TODO(v3) Drop this.
out.Set(reflect.ValueOf(n.value))
} else {
out.Set(reflect.ValueOf(resolved))
}
good = true
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch resolved := resolved.(type) {
case int:
if !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
return true
}
case int64:
if !out.OverflowInt(resolved) {
out.SetInt(resolved)
good = true
return true
}
case uint64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
return true
}
case float64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
return true
}
case string:
if out.Type() == durationType {
d, err := time.ParseDuration(resolved)
if err == nil {
out.SetInt(int64(d))
good = true
return true
}
}
}
@ -414,44 +476,49 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
case int:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
return true
}
case int64:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
return true
}
case uint64:
if !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
return true
}
case float64:
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
return true
}
}
case reflect.Bool:
switch resolved := resolved.(type) {
case bool:
out.SetBool(resolved)
good = true
return true
}
case reflect.Float32, reflect.Float64:
switch resolved := resolved.(type) {
case int:
out.SetFloat(float64(resolved))
good = true
return true
case int64:
out.SetFloat(float64(resolved))
good = true
return true
case uint64:
out.SetFloat(float64(resolved))
good = true
return true
case float64:
out.SetFloat(resolved)
good = true
return true
}
case reflect.Struct:
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
out.Set(resolvedv)
return true
}
case reflect.Ptr:
if out.Type().Elem() == reflect.TypeOf(resolved) {
@ -459,13 +526,11 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
elem := reflect.New(out.Type().Elem())
elem.Elem().Set(reflect.ValueOf(resolved))
out.Set(elem)
good = true
return true
}
}
if !good {
d.terror(n, tag, out)
}
return good
d.terror(n, tag, out)
return false
}
func settableValueOf(i interface{}) reflect.Value {
@ -482,6 +547,10 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
switch out.Kind() {
case reflect.Slice:
out.Set(reflect.MakeSlice(out.Type(), l, l))
case reflect.Array:
if l != out.Len() {
failf("invalid array: want %d elements but got %d", out.Len(), l)
}
case reflect.Interface:
// No type hints. Will have to use a generic sequence.
iface = out
@ -500,7 +569,9 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
j++
}
}
out.Set(out.Slice(0, j))
if out.Kind() != reflect.Array {
out.Set(out.Slice(0, j))
}
if iface.IsValid() {
iface.Set(out)
}
@ -561,7 +632,7 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
}
e := reflect.New(et).Elem()
if d.unmarshal(n.children[i+1], e) {
out.SetMapIndex(k, e)
d.setMapIndex(n.children[i+1], out, k, e)
}
}
}
@ -569,6 +640,14 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
return true
}
func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
if d.strict && out.MapIndex(k) != zeroValue {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
return
}
out.SetMapIndex(k, v)
}
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
outt := out.Type()
if outt.Elem() != mapItemType {
@ -616,6 +695,10 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
elemType = inlineMap.Type().Elem()
}
var doneFields []bool
if d.strict {
doneFields = make([]bool, len(sinfo.FieldsList))
}
for i := 0; i < l; i += 2 {
ni := n.children[i]
if isMerge(ni) {
@ -626,6 +709,13 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
continue
}
if info, ok := sinfo.FieldsMap[name.String()]; ok {
if d.strict {
if doneFields[info.Id] {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
continue
}
doneFields[info.Id] = true
}
var field reflect.Value
if info.Inline == nil {
field = out.Field(info.Num)
@ -639,7 +729,9 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
}
value := reflect.New(elemType).Elem()
d.unmarshal(n.children[i+1], value)
inlineMap.SetMapIndex(name, value)
d.setMapIndex(n.children[i+1], inlineMap, name, value)
} else if d.strict {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
}
}
return true

18
vendor/gopkg.in/yaml.v2/emitterc.go generated vendored
View file

@ -2,6 +2,7 @@ package yaml
import (
"bytes"
"fmt"
)
// Flush the buffer if needed.
@ -664,9 +665,8 @@ func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
return yaml_emitter_emit_mapping_start(emitter, event)
default:
return yaml_emitter_set_emitter_error(emitter,
"expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
}
return false
}
// Expect ALIAS.
@ -995,10 +995,10 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
break_space = false
space_break = false
preceeded_by_whitespace = false
followed_by_whitespace = false
previous_space = false
previous_break = false
preceded_by_whitespace = false
followed_by_whitespace = false
previous_space = false
previous_break = false
)
emitter.scalar_data.value = value
@ -1017,7 +1017,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
flow_indicators = true
}
preceeded_by_whitespace = true
preceded_by_whitespace = true
for i, w := 0, 0; i < len(value); i += w {
w = width(value[i])
followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
@ -1048,7 +1048,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
block_indicators = true
}
case '#':
if preceeded_by_whitespace {
if preceded_by_whitespace {
flow_indicators = true
block_indicators = true
}
@ -1089,7 +1089,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
}
// [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
preceeded_by_whitespace = is_blankz(value, i)
preceded_by_whitespace = is_blankz(value, i)
}
emitter.scalar_data.multiline = line_breaks

130
vendor/gopkg.in/yaml.v2/encode.go generated vendored
View file

@ -3,12 +3,14 @@ package yaml
import (
"encoding"
"fmt"
"io"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"unicode/utf8"
)
type encoder struct {
@ -16,25 +18,39 @@ type encoder struct {
event yaml_event_t
out []byte
flow bool
// doneInit holds whether the initial stream_start_event has been
// emitted.
doneInit bool
}
func newEncoder() (e *encoder) {
e = &encoder{}
e.must(yaml_emitter_initialize(&e.emitter))
func newEncoder() *encoder {
e := &encoder{}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_string(&e.emitter, &e.out)
yaml_emitter_set_unicode(&e.emitter, true)
e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
e.emit()
e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
e.emit()
return e
}
func (e *encoder) finish() {
e.must(yaml_document_end_event_initialize(&e.event, true))
func newEncoderWithWriter(w io.Writer) *encoder {
e := &encoder{}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_writer(&e.emitter, w)
yaml_emitter_set_unicode(&e.emitter, true)
return e
}
func (e *encoder) init() {
if e.doneInit {
return
}
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
e.emit()
e.doneInit = true
}
func (e *encoder) finish() {
e.emitter.open_ended = false
e.must(yaml_stream_end_event_initialize(&e.event))
yaml_stream_end_event_initialize(&e.event)
e.emit()
}
@ -44,9 +60,7 @@ func (e *encoder) destroy() {
func (e *encoder) emit() {
// This will internally delete the e.event value.
if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
e.must(false)
}
e.must(yaml_emitter_emit(&e.emitter, &e.event))
}
func (e *encoder) must(ok bool) {
@ -59,13 +73,28 @@ func (e *encoder) must(ok bool) {
}
}
func (e *encoder) marshalDoc(tag string, in reflect.Value) {
e.init()
yaml_document_start_event_initialize(&e.event, nil, nil, true)
e.emit()
e.marshal(tag, in)
yaml_document_end_event_initialize(&e.event, true)
e.emit()
}
func (e *encoder) marshal(tag string, in reflect.Value) {
if !in.IsValid() {
if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
e.nilv()
return
}
iface := in.Interface()
if m, ok := iface.(Marshaler); ok {
switch m := iface.(type) {
case time.Time, *time.Time:
// Although time.Time implements TextMarshaler,
// we don't want to treat it as a string for YAML
// purposes because YAML has special support for
// timestamps.
case Marshaler:
v, err := m.MarshalYAML()
if err != nil {
fail(err)
@ -75,31 +104,34 @@ func (e *encoder) marshal(tag string, in reflect.Value) {
return
}
in = reflect.ValueOf(v)
} else if m, ok := iface.(encoding.TextMarshaler); ok {
case encoding.TextMarshaler:
text, err := m.MarshalText()
if err != nil {
fail(err)
}
in = reflect.ValueOf(string(text))
case nil:
e.nilv()
return
}
switch in.Kind() {
case reflect.Interface:
if in.IsNil() {
e.nilv()
} else {
e.marshal(tag, in.Elem())
}
e.marshal(tag, in.Elem())
case reflect.Map:
e.mapv(tag, in)
case reflect.Ptr:
if in.IsNil() {
e.nilv()
if in.Type() == ptrTimeType {
e.timev(tag, in.Elem())
} else {
e.marshal(tag, in.Elem())
}
case reflect.Struct:
e.structv(tag, in)
case reflect.Slice:
if in.Type() == timeType {
e.timev(tag, in)
} else {
e.structv(tag, in)
}
case reflect.Slice, reflect.Array:
if in.Type().Elem() == mapItemType {
e.itemsv(tag, in)
} else {
@ -191,10 +223,10 @@ func (e *encoder) mappingv(tag string, f func()) {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
e.emit()
f()
e.must(yaml_mapping_end_event_initialize(&e.event))
yaml_mapping_end_event_initialize(&e.event)
e.emit()
}
@ -240,23 +272,36 @@ var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0
func (e *encoder) stringv(tag string, in reflect.Value) {
var style yaml_scalar_style_t
s := in.String()
rtag, rs := resolve("", s)
if rtag == yaml_BINARY_TAG {
if tag == "" || tag == yaml_STR_TAG {
tag = rtag
s = rs.(string)
} else if tag == yaml_BINARY_TAG {
canUsePlain := true
switch {
case !utf8.ValidString(s):
if tag == yaml_BINARY_TAG {
failf("explicitly tagged !!binary data must be base64-encoded")
} else {
}
if tag != "" {
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
}
// It can't be encoded directly as YAML so use a binary tag
// and encode it as base64.
tag = yaml_BINARY_TAG
s = encodeBase64(s)
case tag == "":
// Check to see if it would resolve to a specific
// tag when encoded unquoted. If it doesn't,
// there's no need to quote it.
rtag, _ := resolve("", s)
canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
}
if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
} else if strings.Contains(s, "\n") {
// Note: it's possible for user code to emit invalid YAML
// if they explicitly specify a tag and a string containing
// text that's incompatible with that tag.
switch {
case strings.Contains(s, "\n"):
style = yaml_LITERAL_SCALAR_STYLE
} else {
case canUsePlain:
style = yaml_PLAIN_SCALAR_STYLE
default:
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
}
e.emitScalar(s, "", tag, style)
}
@ -281,9 +326,14 @@ func (e *encoder) uintv(tag string, in reflect.Value) {
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) timev(tag string, in reflect.Value) {
t := in.Interface().(time.Time)
s := t.Format(time.RFC3339Nano)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) floatv(tag string, in reflect.Value) {
// FIXME: Handle 64 bits here.
s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
s := strconv.FormatFloat(in.Float(), 'g', -1, 64)
switch s {
case "+Inf":
s = ".inf"

1
vendor/gopkg.in/yaml.v2/parserc.go generated vendored
View file

@ -166,7 +166,6 @@ func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool
default:
panic("invalid parser state")
}
return false
}
// Parse the production:

27
vendor/gopkg.in/yaml.v2/readerc.go generated vendored
View file

@ -93,9 +93,18 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
panic("read handler must be set")
}
// [Go] This function was changed to guarantee the requested length size at EOF.
// The fact we need to do this is pretty awful, but the description above implies
// for that to be the case, and there are tests
// If the EOF flag is set and the raw buffer is empty, do nothing.
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
return true
// [Go] ACTUALLY! Read the documentation of this function above.
// This is just broken. To return true, we need to have the
// given length in the buffer. Not doing that means every single
// check that calls this function to make sure the buffer has a
// given length is Go) panicking; or C) accessing invalid memory.
//return true
}
// Return if the buffer contains enough characters.
@ -247,7 +256,7 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
if parser.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
low, high = 1, 0
}
// The UTF-16 encoding is not as simple as one might
@ -357,23 +366,26 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
if value <= 0x7F {
// 0000 0000-0000 007F . 0xxxxxxx
parser.buffer[buffer_len+0] = byte(value)
buffer_len += 1
} else if value <= 0x7FF {
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
buffer_len += 2
} else if value <= 0xFFFF {
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
buffer_len += 3
} else {
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
buffer_len += 4
}
buffer_len += width
parser.unread++
}
@ -386,6 +398,15 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
break
}
}
// [Go] Read the documentation of this function above. To return true,
// we need to have the given length in the buffer. Not doing that means
// every single check that calls this function to make sure the buffer
// has a given length is Go) panicking; or C) accessing invalid memory.
// This happens here due to the EOF above breaking early.
for buffer_len < length {
parser.buffer[buffer_len] = 0
buffer_len++
}
parser.buffer = parser.buffer[:buffer_len]
return true
}

91
vendor/gopkg.in/yaml.v2/resolve.go generated vendored
View file

@ -3,9 +3,10 @@ package yaml
import (
"encoding/base64"
"math"
"regexp"
"strconv"
"strings"
"unicode/utf8"
"time"
)
type resolveMapItem struct {
@ -74,12 +75,14 @@ func longTag(tag string) string {
func resolvableTag(tag string) bool {
switch tag {
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
return true
}
return false
}
var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
func resolve(tag string, in string) (rtag string, out interface{}) {
if !resolvableTag(tag) {
return tag, in
@ -89,6 +92,19 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
switch tag {
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
return
case yaml_FLOAT_TAG:
if rtag == yaml_INT_TAG {
switch v := out.(type) {
case int64:
rtag = yaml_FLOAT_TAG
out = float64(v)
return
case int:
rtag = yaml_FLOAT_TAG
out = float64(v)
return
}
}
}
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
}()
@ -122,6 +138,15 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
case 'D', 'S':
// Int, float, or timestamp.
// Only try values as a timestamp if the value is unquoted or there's an explicit
// !!timestamp tag.
if tag == "" || tag == yaml_TIMESTAMP_TAG {
t, ok := parseTimestamp(in)
if ok {
return yaml_TIMESTAMP_TAG, t
}
}
plain := strings.Replace(in, "_", "", -1)
intv, err := strconv.ParseInt(plain, 0, 64)
if err == nil {
@ -135,9 +160,11 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
if err == nil {
return yaml_INT_TAG, uintv
}
floatv, err := strconv.ParseFloat(plain, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
if yamlStyleFloat.MatchString(plain) {
floatv, err := strconv.ParseFloat(plain, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
}
}
if strings.HasPrefix(plain, "0b") {
intv, err := strconv.ParseInt(plain[2:], 2, 64)
@ -153,28 +180,20 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
return yaml_INT_TAG, uintv
}
} else if strings.HasPrefix(plain, "-0b") {
intv, err := strconv.ParseInt(plain[3:], 2, 64)
intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, -int(intv)
if true || intv == int64(int(intv)) {
return yaml_INT_TAG, int(intv)
} else {
return yaml_INT_TAG, -intv
return yaml_INT_TAG, intv
}
}
}
// XXX Handle timestamps here.
default:
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
}
}
if tag == yaml_BINARY_TAG {
return yaml_BINARY_TAG, in
}
if utf8.ValidString(in) {
return yaml_STR_TAG, in
}
return yaml_BINARY_TAG, encodeBase64(in)
return yaml_STR_TAG, in
}
// encodeBase64 encodes s as base64 that is broken up into multiple lines
@ -201,3 +220,39 @@ func encodeBase64(s string) string {
}
return string(out[:k])
}
// This is a subset of the formats allowed by the regular expression
// defined at http://yaml.org/type/timestamp.html.
var allowedTimestampFormats = []string{
"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
"2006-1-2 15:4:5.999999999", // space separated with no time zone
"2006-1-2", // date only
// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
// from the set of examples.
}
// parseTimestamp parses s as a timestamp string and
// returns the timestamp and reports whether it succeeded.
// Timestamp formats are defined at http://yaml.org/type/timestamp.html
func parseTimestamp(s string) (time.Time, bool) {
// TODO write code to check all the formats supported by
// http://yaml.org/type/timestamp.html instead of using time.Parse.
// Quick check: all date formats start with YYYY-.
i := 0
for ; i < len(s); i++ {
if c := s[i]; c < '0' || c > '9' {
break
}
}
if i != 4 || i == len(s) || s[i] != '-' {
return time.Time{}, false
}
for _, format := range allowedTimestampFormats {
if t, err := time.Parse(format, s); err == nil {
return t, true
}
}
return time.Time{}, false
}

72
vendor/gopkg.in/yaml.v2/scannerc.go generated vendored
View file

@ -9,7 +9,7 @@ import (
// ************
//
// The following notes assume that you are familiar with the YAML specification
// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in
// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
// some cases we are less restrictive that it requires.
//
// The process of transforming a YAML stream into a sequence of events is
@ -611,7 +611,7 @@ func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, co
if directive {
context = "while parsing a %TAG directive"
}
return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
}
func trace(args ...interface{}) func() {
@ -871,12 +871,6 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
required := parser.flow_level == 0 && parser.indent == parser.mark.column
// A simple key is required only when it is the first token in the current
// line. Therefore it is always allowed. But we add a check anyway.
if required && !parser.simple_key_allowed {
panic("should not happen")
}
//
// If the current position may start a simple key, save it.
//
@ -961,7 +955,7 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml
}
// Pop indentation levels from the indents stack until the current level
// becomes less or equal to the column. For each intendation level, append
// becomes less or equal to the column. For each indentation level, append
// the BLOCK-END token.
func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
// In the flow context, do nothing.
@ -969,7 +963,7 @@ func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
return true
}
// Loop through the intendation levels in the stack.
// Loop through the indentation levels in the stack.
for parser.indent > column {
// Create a token and append it to the queue.
token := yaml_token_t{
@ -1546,7 +1540,7 @@ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool
// Unknown directive.
} else {
yaml_parser_set_scanner_error(parser, "while scanning a directive",
start_mark, "found uknown directive name")
start_mark, "found unknown directive name")
return false
}
@ -1944,7 +1938,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma
} else {
// It's either the '!' tag or not really a tag handle. If it's a %TAG
// directive, it's an error. If it's a tag token, it must be a part of URI.
if directive && !(s[0] == '!' && s[1] == 0) {
if directive && string(s) != "!" {
yaml_parser_set_scanner_tag_error(parser, directive,
start_mark, "did not find expected '!'")
return false
@ -1959,6 +1953,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma
func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
//size_t length = head ? strlen((char *)head) : 0
var s []byte
hasTag := len(head) > 0
// Copy the head if needed.
//
@ -2000,10 +1995,10 @@ func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte
if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
return false
}
hasTag = true
}
// Check if the tag is non-empty.
if len(s) == 0 {
if !hasTag {
yaml_parser_set_scanner_tag_error(parser, directive,
start_mark, "did not find expected tag URI")
return false
@ -2085,14 +2080,14 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l
return false
}
if is_digit(parser.buffer, parser.buffer_pos) {
// Check that the intendation is greater than 0.
// Check that the indentation is greater than 0.
if parser.buffer[parser.buffer_pos] == '0' {
yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
start_mark, "found an intendation indicator equal to 0")
start_mark, "found an indentation indicator equal to 0")
return false
}
// Get the intendation level and eat the indicator.
// Get the indentation level and eat the indicator.
increment = as_digit(parser.buffer, parser.buffer_pos)
skip(parser)
}
@ -2102,7 +2097,7 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l
if parser.buffer[parser.buffer_pos] == '0' {
yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
start_mark, "found an intendation indicator equal to 0")
start_mark, "found an indentation indicator equal to 0")
return false
}
increment = as_digit(parser.buffer, parser.buffer_pos)
@ -2157,7 +2152,7 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l
end_mark := parser.mark
// Set the intendation level if it was specified.
// Set the indentation level if it was specified.
var indent int
if increment > 0 {
if parser.indent >= 0 {
@ -2217,7 +2212,7 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l
leading_break = read_line(parser, leading_break)
// Eat the following intendation spaces and line breaks.
// Eat the following indentation spaces and line breaks.
if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
return false
}
@ -2245,15 +2240,15 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l
return true
}
// Scan intendation spaces and line breaks for a block scalar. Determine the
// intendation level if needed.
// Scan indentation spaces and line breaks for a block scalar. Determine the
// indentation level if needed.
func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
*end_mark = parser.mark
// Eat the intendation spaces and line breaks.
// Eat the indentation spaces and line breaks.
max_indent := 0
for {
// Eat the intendation spaces.
// Eat the indentation spaces.
if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
return false
}
@ -2267,10 +2262,10 @@ func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, br
max_indent = parser.mark.column
}
// Check for a tab character messing the intendation.
// Check for a tab character messing the indentation.
if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
start_mark, "found a tab character where an intendation space is expected")
start_mark, "found a tab character where an indentation space is expected")
}
// Have we found a non-empty line?
@ -2474,6 +2469,10 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si
}
}
if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
return false
}
// Check if we are at the end of the scalar.
if single {
if parser.buffer[parser.buffer_pos] == '\'' {
@ -2486,10 +2485,6 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si
}
// Consume blank characters.
if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
return false
}
for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
if is_blank(parser.buffer, parser.buffer_pos) {
// Consume a space or a tab character.
@ -2591,19 +2586,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b
// Consume non-blank characters.
for !is_blankz(parser.buffer, parser.buffer_pos) {
// Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
if parser.flow_level > 0 &&
parser.buffer[parser.buffer_pos] == ':' &&
!is_blankz(parser.buffer, parser.buffer_pos+1) {
yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
start_mark, "found unexpected ':'")
return false
}
// Check for indicators that may end a plain scalar.
if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
(parser.flow_level > 0 &&
(parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
(parser.buffer[parser.buffer_pos] == ',' ||
parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
parser.buffer[parser.buffer_pos] == '}')) {
@ -2655,10 +2641,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b
for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
if is_blank(parser.buffer, parser.buffer_pos) {
// Check for tab character that abuse intendation.
// Check for tab character that abuse indentation.
if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
start_mark, "found a tab character that violate intendation")
start_mark, "found a tab character that violate indentation")
return false
}
@ -2687,7 +2673,7 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b
}
}
// Check intendation level.
// Check indentation level.
if parser.flow_level == 0 && parser.mark.column < indent {
break
}

9
vendor/gopkg.in/yaml.v2/sorter.go generated vendored
View file

@ -51,6 +51,15 @@ func (l keyList) Less(i, j int) bool {
}
var ai, bi int
var an, bn int64
if ar[i] == '0' || br[i] == '0' {
for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
if ar[j] != '0' {
an = 1
bn = 1
break
}
}
}
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
an = an*10 + int64(ar[ai]-'0')
}

65
vendor/gopkg.in/yaml.v2/writerc.go generated vendored
View file

@ -18,72 +18,9 @@ func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
return true
}
// If the output encoding is UTF-8, we don't need to recode the buffer.
if emitter.encoding == yaml_UTF8_ENCODING {
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
return true
}
// Recode the buffer into the raw buffer.
var low, high int
if emitter.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
}
pos := 0
for pos < emitter.buffer_pos {
// See the "reader.c" code for more details on UTF-8 encoding. Note
// that we assume that the buffer contains a valid UTF-8 sequence.
// Read the next UTF-8 character.
octet := emitter.buffer[pos]
var w int
var value rune
switch {
case octet&0x80 == 0x00:
w, value = 1, rune(octet&0x7F)
case octet&0xE0 == 0xC0:
w, value = 2, rune(octet&0x1F)
case octet&0xF0 == 0xE0:
w, value = 3, rune(octet&0x0F)
case octet&0xF8 == 0xF0:
w, value = 4, rune(octet&0x07)
}
for k := 1; k < w; k++ {
octet = emitter.buffer[pos+k]
value = (value << 6) + (rune(octet) & 0x3F)
}
pos += w
// Write the character.
if value < 0x10000 {
var b [2]byte
b[high] = byte(value >> 8)
b[low] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
} else {
// Write the character using a surrogate pair (check "reader.c").
var b [4]byte
value -= 0x10000
b[high] = byte(0xD8 + (value >> 18))
b[low] = byte((value >> 10) & 0xFF)
b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
b[low+2] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
}
}
// Write the raw buffer.
if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
emitter.raw_buffer = emitter.raw_buffer[:0]
return true
}

134
vendor/gopkg.in/yaml.v2/yaml.go generated vendored
View file

@ -9,6 +9,7 @@ package yaml
import (
"errors"
"fmt"
"io"
"reflect"
"strings"
"sync"
@ -77,8 +78,65 @@ type Marshaler interface {
// supported tag options.
//
func Unmarshal(in []byte, out interface{}) (err error) {
return unmarshal(in, out, false)
}
// UnmarshalStrict is like Unmarshal except that any fields that are found
// in the data that do not have corresponding struct members, or mapping
// keys that are duplicates, will result in
// an error.
func UnmarshalStrict(in []byte, out interface{}) (err error) {
return unmarshal(in, out, true)
}
// A Decorder reads and decodes YAML values from an input stream.
type Decoder struct {
strict bool
parser *parser
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may read
// data from r beyond the YAML values requested.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{
parser: newParserFromReader(r),
}
}
// SetStrict sets whether strict decoding behaviour is enabled when
// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
func (dec *Decoder) SetStrict(strict bool) {
dec.strict = strict
}
// Decode reads the next YAML-encoded value from its input
// and stores it in the value pointed to by v.
//
// See the documentation for Unmarshal for details about the
// conversion of YAML into a Go value.
func (dec *Decoder) Decode(v interface{}) (err error) {
d := newDecoder(dec.strict)
defer handleErr(&err)
d := newDecoder()
node := dec.parser.parse()
if node == nil {
return io.EOF
}
out := reflect.ValueOf(v)
if out.Kind() == reflect.Ptr && !out.IsNil() {
out = out.Elem()
}
d.unmarshal(node, out)
if len(d.terrors) > 0 {
return &TypeError{d.terrors}
}
return nil
}
func unmarshal(in []byte, out interface{}, strict bool) (err error) {
defer handleErr(&err)
d := newDecoder(strict)
p := newParser(in)
defer p.destroy()
node := p.parse()
@ -114,7 +172,10 @@ func Unmarshal(in []byte, out interface{}) (err error) {
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
// Does not apply to zero valued structs.
// Zero valued structs will be omitted if all their public
// fields are zero, unless they implement an IsZero
// method (see the IsZeroer interface type), in which
// case the field will be included if that method returns true.
//
// flow Marshal using a flow style (useful for structs,
// sequences and maps).
@ -129,7 +190,7 @@ func Unmarshal(in []byte, out interface{}) (err error) {
// For example:
//
// type T struct {
// F int "a,omitempty"
// F int `yaml:"a,omitempty"`
// B int
// }
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
@ -139,12 +200,47 @@ func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := newEncoder()
defer e.destroy()
e.marshal("", reflect.ValueOf(in))
e.marshalDoc("", reflect.ValueOf(in))
e.finish()
out = e.out
return
}
// An Encoder writes YAML values to an output stream.
type Encoder struct {
encoder *encoder
}
// NewEncoder returns a new encoder that writes to w.
// The Encoder should be closed after use to flush all data
// to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
encoder: newEncoderWithWriter(w),
}
}
// Encode writes the YAML encoding of v to the stream.
// If multiple items are encoded to the stream, the
// second and subsequent document will be preceded
// with a "---" document separator, but the first will not.
//
// See the documentation for Marshal for details about the conversion of Go
// values to YAML.
func (e *Encoder) Encode(v interface{}) (err error) {
defer handleErr(&err)
e.encoder.marshalDoc("", reflect.ValueOf(v))
return nil
}
// Close closes the encoder by writing any remaining data.
// It does not write a stream terminating string "...".
func (e *Encoder) Close() (err error) {
defer handleErr(&err)
e.encoder.finish()
return nil
}
func handleErr(err *error) {
if v := recover(); v != nil {
if e, ok := v.(yamlError); ok {
@ -200,6 +296,9 @@ type fieldInfo struct {
Num int
OmitEmpty bool
Flow bool
// Id holds the unique field identifier, so we can cheaply
// check for field duplicates without maintaining an extra map.
Id int
// Inline holds the field index if the field is part of an inlined struct.
Inline []int
@ -222,7 +321,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
inlineMap := -1
for i := 0; i != n; i++ {
field := st.Field(i)
if field.PkgPath != "" {
if field.PkgPath != "" && !field.Anonymous {
continue // Private field
}
@ -279,6 +378,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
finfo.Id = len(fieldsList)
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
@ -300,11 +400,16 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
return nil, errors.New(msg)
}
info.Id = len(fieldsList)
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
sinfo = &structInfo{
FieldsMap: fieldsMap,
FieldsList: fieldsList,
InlineMap: inlineMap,
}
fieldMapMutex.Lock()
structMap[st] = sinfo
@ -312,8 +417,23 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
return sinfo, nil
}
// IsZeroer is used to check whether an object is zero to
// determine whether it should be omitted when marshaling
// with the omitempty flag. One notable implementation
// is time.Time.
type IsZeroer interface {
IsZero() bool
}
func isZero(v reflect.Value) bool {
switch v.Kind() {
kind := v.Kind()
if z, ok := v.Interface().(IsZeroer); ok {
if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
return true
}
return z.IsZero()
}
switch kind {
case reflect.String:
return len(v.String()) == 0
case reflect.Interface, reflect.Ptr:

32
vendor/gopkg.in/yaml.v2/yamlh.go generated vendored
View file

@ -1,6 +1,7 @@
package yaml
import (
"fmt"
"io"
)
@ -239,6 +240,27 @@ const (
yaml_MAPPING_END_EVENT // A MAPPING-END event.
)
var eventStrings = []string{
yaml_NO_EVENT: "none",
yaml_STREAM_START_EVENT: "stream start",
yaml_STREAM_END_EVENT: "stream end",
yaml_DOCUMENT_START_EVENT: "document start",
yaml_DOCUMENT_END_EVENT: "document end",
yaml_ALIAS_EVENT: "alias",
yaml_SCALAR_EVENT: "scalar",
yaml_SEQUENCE_START_EVENT: "sequence start",
yaml_SEQUENCE_END_EVENT: "sequence end",
yaml_MAPPING_START_EVENT: "mapping start",
yaml_MAPPING_END_EVENT: "mapping end",
}
func (e yaml_event_type_t) String() string {
if e < 0 || int(e) >= len(eventStrings) {
return fmt.Sprintf("unknown event %d", e)
}
return eventStrings[e]
}
// The event structure.
type yaml_event_t struct {
@ -508,7 +530,7 @@ type yaml_parser_t struct {
problem string // Error description.
// The byte about which the problem occured.
// The byte about which the problem occurred.
problem_offset int
problem_value int
problem_mark yaml_mark_t
@ -521,9 +543,9 @@ type yaml_parser_t struct {
read_handler yaml_read_handler_t // Read handler.
input_file io.Reader // File input data.
input []byte // String input data.
input_pos int
input_reader io.Reader // File input data.
input []byte // String input data.
input_pos int
eof bool // EOF flag
@ -632,7 +654,7 @@ type yaml_emitter_t struct {
write_handler yaml_write_handler_t // Write handler.
output_buffer *[]byte // String output data.
output_file io.Writer // File output data.
output_writer io.Writer // File output data.
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.

8
vendor/vendor.json vendored
View file

@ -1330,10 +1330,12 @@
"revisionTime": "2016-09-30T00:14:02Z"
},
{
"checksumSHA1": "KgT+peLCcuh0/m2mpoOZXuxXmwc=",
"checksumSHA1": "HmUAWd1Jm25cGH4Pyf3Zgp2RhkI=",
"path": "gopkg.in/yaml.v2",
"revision": "7ad95dd0798a40da1ccdff6dff35fd177b5edf40",
"revisionTime": "2015-06-24T11:29:02+01:00"
"revision": "86f5ed62f8a0ee96bd888d2efdfd6d4fb100a4eb",
"revisionTime": "2018-03-26T05:07:29Z",
"version": "v2.2.0",
"versionExact": "v2.2.0"
},
{
"checksumSHA1": "yugSOlAEbiAH8bhs5RLNAAmR3Cc=",