mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-10 07:34:04 -08:00
Merge pull request #708 from prometheus/fabxc/servdisc
Service discovery and more
This commit is contained in:
commit
8c1c840429
12
Godeps/Godeps.json
generated
12
Godeps/Godeps.json
generated
|
@ -19,6 +19,10 @@
|
||||||
"ImportPath": "github.com/golang/protobuf/proto",
|
"ImportPath": "github.com/golang/protobuf/proto",
|
||||||
"Rev": "655cdfa588ea190e901bc5590e65d5621688847c"
|
"Rev": "655cdfa588ea190e901bc5590e65d5621688847c"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/hashicorp/consul/api",
|
||||||
|
"Rev": "9fb235a98d8e88f7857b21bb2dd3efc428c01427",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
|
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
|
||||||
"Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a"
|
"Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a"
|
||||||
|
@ -67,6 +71,14 @@
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/context",
|
"ImportPath": "golang.org/x/net/context",
|
||||||
"Rev": "b6fdb7d8a4ccefede406f8fe0f017fb58265054c"
|
"Rev": "b6fdb7d8a4ccefede406f8fe0f017fb58265054c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "gopkg.in/fsnotify.v1",
|
||||||
|
"Rev": "96c060f6a6b7e0d6f75fddd10efeaca3e5d1bcb0",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "gopkg.in/yaml.v2",
|
||||||
|
"Rev": "49c95bdc21843256fb6c4e0d370a05f24a0bf213"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
39
Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md
generated
vendored
Normal file
39
Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
Consul API client
|
||||||
|
=================
|
||||||
|
|
||||||
|
This package provides the `api` package which attempts to
|
||||||
|
provide programmatic access to the full Consul API.
|
||||||
|
|
||||||
|
Currently, all of the Consul APIs included in version 0.3 are supported.
|
||||||
|
|
||||||
|
Documentation
|
||||||
|
=============
|
||||||
|
|
||||||
|
The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/consul/api)
|
||||||
|
|
||||||
|
Usage
|
||||||
|
=====
|
||||||
|
|
||||||
|
Below is an example of using the Consul client:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Get a new client, with KV endpoints
|
||||||
|
client, _ := api.NewClient(api.DefaultConfig())
|
||||||
|
kv := client.KV()
|
||||||
|
|
||||||
|
// PUT a new KV pair
|
||||||
|
p := &api.KVPair{Key: "foo", Value: []byte("test")}
|
||||||
|
_, err := kv.Put(p, nil)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup the pair
|
||||||
|
pair, _, err := kv.Get("foo", nil)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("KV: %v", pair)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
140
Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go
generated
vendored
Normal file
140
Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ACLCLientType is the client type token
|
||||||
|
ACLClientType = "client"
|
||||||
|
|
||||||
|
// ACLManagementType is the management type token
|
||||||
|
ACLManagementType = "management"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ACLEntry is used to represent an ACL entry
|
||||||
|
type ACLEntry struct {
|
||||||
|
CreateIndex uint64
|
||||||
|
ModifyIndex uint64
|
||||||
|
ID string
|
||||||
|
Name string
|
||||||
|
Type string
|
||||||
|
Rules string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ACL can be used to query the ACL endpoints
|
||||||
|
type ACL struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// ACL returns a handle to the ACL endpoints
|
||||||
|
func (c *Client) ACL() *ACL {
|
||||||
|
return &ACL{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create is used to generate a new token with the given parameters
|
||||||
|
func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/acl/create")
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
r.obj = acl
|
||||||
|
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
|
var out struct{ ID string }
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
return out.ID, wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update is used to update the rules of an existing token
|
||||||
|
func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/acl/update")
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
r.obj = acl
|
||||||
|
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
|
return wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy is used to destroy a given ACL token ID
|
||||||
|
func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id)
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
|
return wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone is used to return a new token cloned from an existing one
|
||||||
|
func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/acl/clone/"+id)
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
|
var out struct{ ID string }
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
return out.ID, wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info is used to query for information about an ACL token
|
||||||
|
func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) {
|
||||||
|
r := a.c.newRequest("GET", "/v1/acl/info/"+id)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var entries []*ACLEntry
|
||||||
|
if err := decodeBody(resp, &entries); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(entries) > 0 {
|
||||||
|
return entries[0], qm, nil
|
||||||
|
}
|
||||||
|
return nil, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List is used to get all the ACL tokens
|
||||||
|
func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) {
|
||||||
|
r := a.c.newRequest("GET", "/v1/acl/list")
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var entries []*ACLEntry
|
||||||
|
if err := decodeBody(resp, &entries); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return entries, qm, nil
|
||||||
|
}
|
152
Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go
generated
vendored
Normal file
152
Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go
generated
vendored
Normal file
|
@ -0,0 +1,152 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ROOT is a management token for the tests
|
||||||
|
var CONSUL_ROOT string
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
CONSUL_ROOT = os.Getenv("CONSUL_ROOT")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestACL_CreateDestroy(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
if CONSUL_ROOT == "" {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
c.config.Token = CONSUL_ROOT
|
||||||
|
acl := c.ACL()
|
||||||
|
|
||||||
|
ae := ACLEntry{
|
||||||
|
Name: "API test",
|
||||||
|
Type: ACLClientType,
|
||||||
|
Rules: `key "" { policy = "deny" }`,
|
||||||
|
}
|
||||||
|
|
||||||
|
id, wm, err := acl.Create(&ae, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if wm.RequestTime == 0 {
|
||||||
|
t.Fatalf("bad: %v", wm)
|
||||||
|
}
|
||||||
|
|
||||||
|
if id == "" {
|
||||||
|
t.Fatalf("invalid: %v", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
ae2, _, err := acl.Info(id, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ae2.Name != ae.Name || ae2.Type != ae.Type || ae2.Rules != ae.Rules {
|
||||||
|
t.Fatalf("Bad: %#v", ae2)
|
||||||
|
}
|
||||||
|
|
||||||
|
wm, err = acl.Destroy(id, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if wm.RequestTime == 0 {
|
||||||
|
t.Fatalf("bad: %v", wm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestACL_CloneDestroy(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
if CONSUL_ROOT == "" {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
c.config.Token = CONSUL_ROOT
|
||||||
|
acl := c.ACL()
|
||||||
|
|
||||||
|
id, wm, err := acl.Clone(CONSUL_ROOT, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if wm.RequestTime == 0 {
|
||||||
|
t.Fatalf("bad: %v", wm)
|
||||||
|
}
|
||||||
|
|
||||||
|
if id == "" {
|
||||||
|
t.Fatalf("invalid: %v", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
wm, err = acl.Destroy(id, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if wm.RequestTime == 0 {
|
||||||
|
t.Fatalf("bad: %v", wm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestACL_Info(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
if CONSUL_ROOT == "" {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
c.config.Token = CONSUL_ROOT
|
||||||
|
acl := c.ACL()
|
||||||
|
|
||||||
|
ae, qm, err := acl.Info(CONSUL_ROOT, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if qm.LastIndex == 0 {
|
||||||
|
t.Fatalf("bad: %v", qm)
|
||||||
|
}
|
||||||
|
if !qm.KnownLeader {
|
||||||
|
t.Fatalf("bad: %v", qm)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ae == nil || ae.ID != CONSUL_ROOT || ae.Type != ACLManagementType {
|
||||||
|
t.Fatalf("bad: %#v", ae)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestACL_List(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
if CONSUL_ROOT == "" {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
c.config.Token = CONSUL_ROOT
|
||||||
|
acl := c.ACL()
|
||||||
|
|
||||||
|
acls, qm, err := acl.List(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(acls) < 2 {
|
||||||
|
t.Fatalf("bad: %v", acls)
|
||||||
|
}
|
||||||
|
|
||||||
|
if qm.LastIndex == 0 {
|
||||||
|
t.Fatalf("bad: %v", qm)
|
||||||
|
}
|
||||||
|
if !qm.KnownLeader {
|
||||||
|
t.Fatalf("bad: %v", qm)
|
||||||
|
}
|
||||||
|
}
|
334
Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go
generated
vendored
Normal file
334
Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go
generated
vendored
Normal file
|
@ -0,0 +1,334 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AgentCheck represents a check known to the agent
|
||||||
|
type AgentCheck struct {
|
||||||
|
Node string
|
||||||
|
CheckID string
|
||||||
|
Name string
|
||||||
|
Status string
|
||||||
|
Notes string
|
||||||
|
Output string
|
||||||
|
ServiceID string
|
||||||
|
ServiceName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// AgentService represents a service known to the agent
|
||||||
|
type AgentService struct {
|
||||||
|
ID string
|
||||||
|
Service string
|
||||||
|
Tags []string
|
||||||
|
Port int
|
||||||
|
Address string
|
||||||
|
}
|
||||||
|
|
||||||
|
// AgentMember represents a cluster member known to the agent
|
||||||
|
type AgentMember struct {
|
||||||
|
Name string
|
||||||
|
Addr string
|
||||||
|
Port uint16
|
||||||
|
Tags map[string]string
|
||||||
|
Status int
|
||||||
|
ProtocolMin uint8
|
||||||
|
ProtocolMax uint8
|
||||||
|
ProtocolCur uint8
|
||||||
|
DelegateMin uint8
|
||||||
|
DelegateMax uint8
|
||||||
|
DelegateCur uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
// AgentServiceRegistration is used to register a new service
|
||||||
|
type AgentServiceRegistration struct {
|
||||||
|
ID string `json:",omitempty"`
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
Tags []string `json:",omitempty"`
|
||||||
|
Port int `json:",omitempty"`
|
||||||
|
Address string `json:",omitempty"`
|
||||||
|
Check *AgentServiceCheck
|
||||||
|
Checks AgentServiceChecks
|
||||||
|
}
|
||||||
|
|
||||||
|
// AgentCheckRegistration is used to register a new check
|
||||||
|
type AgentCheckRegistration struct {
|
||||||
|
ID string `json:",omitempty"`
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
Notes string `json:",omitempty"`
|
||||||
|
ServiceID string `json:",omitempty"`
|
||||||
|
AgentServiceCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
// AgentServiceCheck is used to create an associated
|
||||||
|
// check for a service
|
||||||
|
type AgentServiceCheck struct {
|
||||||
|
Script string `json:",omitempty"`
|
||||||
|
Interval string `json:",omitempty"`
|
||||||
|
Timeout string `json:",omitempty"`
|
||||||
|
TTL string `json:",omitempty"`
|
||||||
|
HTTP string `json:",omitempty"`
|
||||||
|
Status string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
type AgentServiceChecks []*AgentServiceCheck
|
||||||
|
|
||||||
|
// Agent can be used to query the Agent endpoints
|
||||||
|
type Agent struct {
|
||||||
|
c *Client
|
||||||
|
|
||||||
|
// cache the node name
|
||||||
|
nodeName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Agent returns a handle to the agent endpoints
|
||||||
|
func (c *Client) Agent() *Agent {
|
||||||
|
return &Agent{c: c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Self is used to query the agent we are speaking to for
|
||||||
|
// information about itself
|
||||||
|
func (a *Agent) Self() (map[string]map[string]interface{}, error) {
|
||||||
|
r := a.c.newRequest("GET", "/v1/agent/self")
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out map[string]map[string]interface{}
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeName is used to get the node name of the agent
|
||||||
|
func (a *Agent) NodeName() (string, error) {
|
||||||
|
if a.nodeName != "" {
|
||||||
|
return a.nodeName, nil
|
||||||
|
}
|
||||||
|
info, err := a.Self()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
name := info["Config"]["NodeName"].(string)
|
||||||
|
a.nodeName = name
|
||||||
|
return name, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks returns the locally registered checks
|
||||||
|
func (a *Agent) Checks() (map[string]*AgentCheck, error) {
|
||||||
|
r := a.c.newRequest("GET", "/v1/agent/checks")
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out map[string]*AgentCheck
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Services returns the locally registered services
|
||||||
|
func (a *Agent) Services() (map[string]*AgentService, error) {
|
||||||
|
r := a.c.newRequest("GET", "/v1/agent/services")
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out map[string]*AgentService
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Members returns the known gossip members. The WAN
|
||||||
|
// flag can be used to query a server for WAN members.
|
||||||
|
func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
|
||||||
|
r := a.c.newRequest("GET", "/v1/agent/members")
|
||||||
|
if wan {
|
||||||
|
r.params.Set("wan", "1")
|
||||||
|
}
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out []*AgentMember
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceRegister is used to register a new service with
|
||||||
|
// the local agent
|
||||||
|
func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/service/register")
|
||||||
|
r.obj = service
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceDeregister is used to deregister a service with
|
||||||
|
// the local agent
|
||||||
|
func (a *Agent) ServiceDeregister(serviceID string) error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID)
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PassTTL is used to set a TTL check to the passing state
|
||||||
|
func (a *Agent) PassTTL(checkID, note string) error {
|
||||||
|
return a.UpdateTTL(checkID, note, "pass")
|
||||||
|
}
|
||||||
|
|
||||||
|
// WarnTTL is used to set a TTL check to the warning state
|
||||||
|
func (a *Agent) WarnTTL(checkID, note string) error {
|
||||||
|
return a.UpdateTTL(checkID, note, "warn")
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailTTL is used to set a TTL check to the failing state
|
||||||
|
func (a *Agent) FailTTL(checkID, note string) error {
|
||||||
|
return a.UpdateTTL(checkID, note, "fail")
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateTTL is used to update the TTL of a check
|
||||||
|
func (a *Agent) UpdateTTL(checkID, note, status string) error {
|
||||||
|
switch status {
|
||||||
|
case "pass":
|
||||||
|
case "warn":
|
||||||
|
case "fail":
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("Invalid status: %s", status)
|
||||||
|
}
|
||||||
|
endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID)
|
||||||
|
r := a.c.newRequest("PUT", endpoint)
|
||||||
|
r.params.Set("note", note)
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckRegister is used to register a new check with
|
||||||
|
// the local agent
|
||||||
|
func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/check/register")
|
||||||
|
r.obj = check
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckDeregister is used to deregister a check with
|
||||||
|
// the local agent
|
||||||
|
func (a *Agent) CheckDeregister(checkID string) error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID)
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Join is used to instruct the agent to attempt a join to
|
||||||
|
// another cluster member
|
||||||
|
func (a *Agent) Join(addr string, wan bool) error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/join/"+addr)
|
||||||
|
if wan {
|
||||||
|
r.params.Set("wan", "1")
|
||||||
|
}
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForceLeave is used to have the agent eject a failed node
|
||||||
|
func (a *Agent) ForceLeave(node string) error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnableServiceMaintenance toggles service maintenance mode on
|
||||||
|
// for the given service ID.
|
||||||
|
func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
|
||||||
|
r.params.Set("enable", "true")
|
||||||
|
r.params.Set("reason", reason)
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisableServiceMaintenance toggles service maintenance mode off
|
||||||
|
// for the given service ID.
|
||||||
|
func (a *Agent) DisableServiceMaintenance(serviceID string) error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
|
||||||
|
r.params.Set("enable", "false")
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnableNodeMaintenance toggles node maintenance mode on for the
|
||||||
|
// agent we are connected to.
|
||||||
|
func (a *Agent) EnableNodeMaintenance(reason string) error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/maintenance")
|
||||||
|
r.params.Set("enable", "true")
|
||||||
|
r.params.Set("reason", reason)
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisableNodeMaintenance toggles node maintenance mode off for the
|
||||||
|
// agent we are connected to.
|
||||||
|
func (a *Agent) DisableNodeMaintenance() error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/maintenance")
|
||||||
|
r.params.Set("enable", "false")
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
524
Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go
generated
vendored
Normal file
524
Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go
generated
vendored
Normal file
|
@ -0,0 +1,524 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAgent_Self(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
|
||||||
|
info, err := agent.Self()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
name := info["Config"]["NodeName"]
|
||||||
|
if name == "" {
|
||||||
|
t.Fatalf("bad: %v", info)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_Members(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
|
||||||
|
members, err := agent.Members(false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(members) != 1 {
|
||||||
|
t.Fatalf("bad: %v", members)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_Services(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
|
||||||
|
reg := &AgentServiceRegistration{
|
||||||
|
Name: "foo",
|
||||||
|
Tags: []string{"bar", "baz"},
|
||||||
|
Port: 8000,
|
||||||
|
Check: &AgentServiceCheck{
|
||||||
|
TTL: "15s",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := agent.ServiceRegister(reg); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
services, err := agent.Services()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if _, ok := services["foo"]; !ok {
|
||||||
|
t.Fatalf("missing service: %v", services)
|
||||||
|
}
|
||||||
|
checks, err := agent.Checks()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
chk, ok := checks["service:foo"]
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("missing check: %v", checks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks should default to critical
|
||||||
|
if chk.Status != "critical" {
|
||||||
|
t.Fatalf("Bad: %#v", chk)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := agent.ServiceDeregister("foo"); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_Services_CheckPassing(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
reg := &AgentServiceRegistration{
|
||||||
|
Name: "foo",
|
||||||
|
Tags: []string{"bar", "baz"},
|
||||||
|
Port: 8000,
|
||||||
|
Check: &AgentServiceCheck{
|
||||||
|
TTL: "15s",
|
||||||
|
Status: "passing",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := agent.ServiceRegister(reg); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
services, err := agent.Services()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if _, ok := services["foo"]; !ok {
|
||||||
|
t.Fatalf("missing service: %v", services)
|
||||||
|
}
|
||||||
|
|
||||||
|
checks, err := agent.Checks()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
chk, ok := checks["service:foo"]
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("missing check: %v", checks)
|
||||||
|
}
|
||||||
|
|
||||||
|
if chk.Status != "passing" {
|
||||||
|
t.Fatalf("Bad: %#v", chk)
|
||||||
|
}
|
||||||
|
if err := agent.ServiceDeregister("foo"); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_Services_CheckBadStatus(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
reg := &AgentServiceRegistration{
|
||||||
|
Name: "foo",
|
||||||
|
Tags: []string{"bar", "baz"},
|
||||||
|
Port: 8000,
|
||||||
|
Check: &AgentServiceCheck{
|
||||||
|
TTL: "15s",
|
||||||
|
Status: "fluffy",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := agent.ServiceRegister(reg); err == nil {
|
||||||
|
t.Fatalf("bad status accepted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_ServiceAddress(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
|
||||||
|
reg1 := &AgentServiceRegistration{
|
||||||
|
Name: "foo1",
|
||||||
|
Port: 8000,
|
||||||
|
Address: "192.168.0.42",
|
||||||
|
}
|
||||||
|
reg2 := &AgentServiceRegistration{
|
||||||
|
Name: "foo2",
|
||||||
|
Port: 8000,
|
||||||
|
}
|
||||||
|
if err := agent.ServiceRegister(reg1); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if err := agent.ServiceRegister(reg2); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
services, err := agent.Services()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := services["foo1"]; !ok {
|
||||||
|
t.Fatalf("missing service: %v", services)
|
||||||
|
}
|
||||||
|
if _, ok := services["foo2"]; !ok {
|
||||||
|
t.Fatalf("missing service: %v", services)
|
||||||
|
}
|
||||||
|
|
||||||
|
if services["foo1"].Address != "192.168.0.42" {
|
||||||
|
t.Fatalf("missing Address field in service foo1: %v", services)
|
||||||
|
}
|
||||||
|
if services["foo2"].Address != "" {
|
||||||
|
t.Fatalf("missing Address field in service foo2: %v", services)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := agent.ServiceDeregister("foo"); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_Services_MultipleChecks(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
|
||||||
|
reg := &AgentServiceRegistration{
|
||||||
|
Name: "foo",
|
||||||
|
Tags: []string{"bar", "baz"},
|
||||||
|
Port: 8000,
|
||||||
|
Checks: AgentServiceChecks{
|
||||||
|
&AgentServiceCheck{
|
||||||
|
TTL: "15s",
|
||||||
|
},
|
||||||
|
&AgentServiceCheck{
|
||||||
|
TTL: "30s",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := agent.ServiceRegister(reg); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
services, err := agent.Services()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if _, ok := services["foo"]; !ok {
|
||||||
|
t.Fatalf("missing service: %v", services)
|
||||||
|
}
|
||||||
|
|
||||||
|
checks, err := agent.Checks()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if _, ok := checks["service:foo:1"]; !ok {
|
||||||
|
t.Fatalf("missing check: %v", checks)
|
||||||
|
}
|
||||||
|
if _, ok := checks["service:foo:2"]; !ok {
|
||||||
|
t.Fatalf("missing check: %v", checks)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_SetTTLStatus(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
|
||||||
|
reg := &AgentServiceRegistration{
|
||||||
|
Name: "foo",
|
||||||
|
Check: &AgentServiceCheck{
|
||||||
|
TTL: "15s",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := agent.ServiceRegister(reg); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := agent.WarnTTL("service:foo", "test"); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
checks, err := agent.Checks()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
chk, ok := checks["service:foo"]
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("missing check: %v", checks)
|
||||||
|
}
|
||||||
|
if chk.Status != "warning" {
|
||||||
|
t.Fatalf("Bad: %#v", chk)
|
||||||
|
}
|
||||||
|
if chk.Output != "test" {
|
||||||
|
t.Fatalf("Bad: %#v", chk)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := agent.ServiceDeregister("foo"); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_Checks(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
|
||||||
|
reg := &AgentCheckRegistration{
|
||||||
|
Name: "foo",
|
||||||
|
}
|
||||||
|
reg.TTL = "15s"
|
||||||
|
if err := agent.CheckRegister(reg); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
checks, err := agent.Checks()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
chk, ok := checks["foo"]
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("missing check: %v", checks)
|
||||||
|
}
|
||||||
|
if chk.Status != "critical" {
|
||||||
|
t.Fatalf("check not critical: %v", chk)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := agent.CheckDeregister("foo"); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_CheckStartPassing(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
|
||||||
|
reg := &AgentCheckRegistration{
|
||||||
|
Name: "foo",
|
||||||
|
AgentServiceCheck: AgentServiceCheck{
|
||||||
|
Status: "passing",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
reg.TTL = "15s"
|
||||||
|
if err := agent.CheckRegister(reg); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
checks, err := agent.Checks()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
chk, ok := checks["foo"]
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("missing check: %v", checks)
|
||||||
|
}
|
||||||
|
if chk.Status != "passing" {
|
||||||
|
t.Fatalf("check not passing: %v", chk)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := agent.CheckDeregister("foo"); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_Checks_serviceBound(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
|
||||||
|
// First register a service
|
||||||
|
serviceReg := &AgentServiceRegistration{
|
||||||
|
Name: "redis",
|
||||||
|
}
|
||||||
|
if err := agent.ServiceRegister(serviceReg); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register a check bound to the service
|
||||||
|
reg := &AgentCheckRegistration{
|
||||||
|
Name: "redischeck",
|
||||||
|
ServiceID: "redis",
|
||||||
|
}
|
||||||
|
reg.TTL = "15s"
|
||||||
|
if err := agent.CheckRegister(reg); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
checks, err := agent.Checks()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
check, ok := checks["redischeck"]
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("missing check: %v", checks)
|
||||||
|
}
|
||||||
|
if check.ServiceID != "redis" {
|
||||||
|
t.Fatalf("missing service association for check: %v", check)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_Join(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
|
||||||
|
info, err := agent.Self()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Join ourself
|
||||||
|
addr := info["Config"]["AdvertiseAddr"].(string)
|
||||||
|
err = agent.Join(addr, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_ForceLeave(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
|
||||||
|
// Eject somebody
|
||||||
|
err := agent.ForceLeave("foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServiceMaintenance(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
|
||||||
|
// First register a service
|
||||||
|
serviceReg := &AgentServiceRegistration{
|
||||||
|
Name: "redis",
|
||||||
|
}
|
||||||
|
if err := agent.ServiceRegister(serviceReg); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable maintenance mode
|
||||||
|
if err := agent.EnableServiceMaintenance("redis", "broken"); err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure a critical check was added
|
||||||
|
checks, err := agent.Checks()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
found := false
|
||||||
|
for _, check := range checks {
|
||||||
|
if strings.Contains(check.CheckID, "maintenance") {
|
||||||
|
found = true
|
||||||
|
if check.Status != "critical" || check.Notes != "broken" {
|
||||||
|
t.Fatalf("bad: %#v", checks)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
t.Fatalf("bad: %#v", checks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable maintenance mode
|
||||||
|
if err := agent.DisableServiceMaintenance("redis"); err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the critical health check was removed
|
||||||
|
checks, err = agent.Checks()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
for _, check := range checks {
|
||||||
|
if strings.Contains(check.CheckID, "maintenance") {
|
||||||
|
t.Fatalf("should have removed health check")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNodeMaintenance(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
|
||||||
|
// Enable maintenance mode
|
||||||
|
if err := agent.EnableNodeMaintenance("broken"); err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that a critical check was added
|
||||||
|
checks, err := agent.Checks()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
found := false
|
||||||
|
for _, check := range checks {
|
||||||
|
if strings.Contains(check.CheckID, "maintenance") {
|
||||||
|
found = true
|
||||||
|
if check.Status != "critical" || check.Notes != "broken" {
|
||||||
|
t.Fatalf("bad: %#v", checks)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
t.Fatalf("bad: %#v", checks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable maintenance mode
|
||||||
|
if err := agent.DisableNodeMaintenance(); err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the check was removed
|
||||||
|
checks, err = agent.Checks()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
for _, check := range checks {
|
||||||
|
if strings.Contains(check.CheckID, "maintenance") {
|
||||||
|
t.Fatalf("should have removed health check")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
442
Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go
generated
vendored
Normal file
442
Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go
generated
vendored
Normal file
|
@ -0,0 +1,442 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// QueryOptions are used to parameterize a query
|
||||||
|
type QueryOptions struct {
|
||||||
|
// Providing a datacenter overwrites the DC provided
|
||||||
|
// by the Config
|
||||||
|
Datacenter string
|
||||||
|
|
||||||
|
// AllowStale allows any Consul server (non-leader) to service
|
||||||
|
// a read. This allows for lower latency and higher throughput
|
||||||
|
AllowStale bool
|
||||||
|
|
||||||
|
// RequireConsistent forces the read to be fully consistent.
|
||||||
|
// This is more expensive but prevents ever performing a stale
|
||||||
|
// read.
|
||||||
|
RequireConsistent bool
|
||||||
|
|
||||||
|
// WaitIndex is used to enable a blocking query. Waits
|
||||||
|
// until the timeout or the next index is reached
|
||||||
|
WaitIndex uint64
|
||||||
|
|
||||||
|
// WaitTime is used to bound the duration of a wait.
|
||||||
|
// Defaults to that of the Config, but can be overriden.
|
||||||
|
WaitTime time.Duration
|
||||||
|
|
||||||
|
// Token is used to provide a per-request ACL token
|
||||||
|
// which overrides the agent's default token.
|
||||||
|
Token string
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteOptions are used to parameterize a write
|
||||||
|
type WriteOptions struct {
|
||||||
|
// Providing a datacenter overwrites the DC provided
|
||||||
|
// by the Config
|
||||||
|
Datacenter string
|
||||||
|
|
||||||
|
// Token is used to provide a per-request ACL token
|
||||||
|
// which overrides the agent's default token.
|
||||||
|
Token string
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryMeta is used to return meta data about a query
|
||||||
|
type QueryMeta struct {
|
||||||
|
// LastIndex. This can be used as a WaitIndex to perform
|
||||||
|
// a blocking query
|
||||||
|
LastIndex uint64
|
||||||
|
|
||||||
|
// Time of last contact from the leader for the
|
||||||
|
// server servicing the request
|
||||||
|
LastContact time.Duration
|
||||||
|
|
||||||
|
// Is there a known leader
|
||||||
|
KnownLeader bool
|
||||||
|
|
||||||
|
// How long did the request take
|
||||||
|
RequestTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteMeta is used to return meta data about a write
|
||||||
|
type WriteMeta struct {
|
||||||
|
// How long did the request take
|
||||||
|
RequestTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication
|
||||||
|
type HttpBasicAuth struct {
|
||||||
|
// Username to use for HTTP Basic Authentication
|
||||||
|
Username string
|
||||||
|
|
||||||
|
// Password to use for HTTP Basic Authentication
|
||||||
|
Password string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config is used to configure the creation of a client
|
||||||
|
type Config struct {
|
||||||
|
// Address is the address of the Consul server
|
||||||
|
Address string
|
||||||
|
|
||||||
|
// Scheme is the URI scheme for the Consul server
|
||||||
|
Scheme string
|
||||||
|
|
||||||
|
// Datacenter to use. If not provided, the default agent datacenter is used.
|
||||||
|
Datacenter string
|
||||||
|
|
||||||
|
// HttpClient is the client to use. Default will be
|
||||||
|
// used if not provided.
|
||||||
|
HttpClient *http.Client
|
||||||
|
|
||||||
|
// HttpAuth is the auth info to use for http access.
|
||||||
|
HttpAuth *HttpBasicAuth
|
||||||
|
|
||||||
|
// WaitTime limits how long a Watch will block. If not provided,
|
||||||
|
// the agent default values will be used.
|
||||||
|
WaitTime time.Duration
|
||||||
|
|
||||||
|
// Token is used to provide a per-request ACL token
|
||||||
|
// which overrides the agent's default token.
|
||||||
|
Token string
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultConfig returns a default configuration for the client
|
||||||
|
func DefaultConfig() *Config {
|
||||||
|
config := &Config{
|
||||||
|
Address: "127.0.0.1:8500",
|
||||||
|
Scheme: "http",
|
||||||
|
HttpClient: http.DefaultClient,
|
||||||
|
}
|
||||||
|
|
||||||
|
if addr := os.Getenv("CONSUL_HTTP_ADDR"); addr != "" {
|
||||||
|
config.Address = addr
|
||||||
|
}
|
||||||
|
|
||||||
|
if token := os.Getenv("CONSUL_HTTP_TOKEN"); token != "" {
|
||||||
|
config.Token = token
|
||||||
|
}
|
||||||
|
|
||||||
|
if auth := os.Getenv("CONSUL_HTTP_AUTH"); auth != "" {
|
||||||
|
var username, password string
|
||||||
|
if strings.Contains(auth, ":") {
|
||||||
|
split := strings.SplitN(auth, ":", 2)
|
||||||
|
username = split[0]
|
||||||
|
password = split[1]
|
||||||
|
} else {
|
||||||
|
username = auth
|
||||||
|
}
|
||||||
|
|
||||||
|
config.HttpAuth = &HttpBasicAuth{
|
||||||
|
Username: username,
|
||||||
|
Password: password,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ssl := os.Getenv("CONSUL_HTTP_SSL"); ssl != "" {
|
||||||
|
enabled, err := strconv.ParseBool(ssl)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if enabled {
|
||||||
|
config.Scheme = "https"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if verify := os.Getenv("CONSUL_HTTP_SSL_VERIFY"); verify != "" {
|
||||||
|
doVerify, err := strconv.ParseBool(verify)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL_VERIFY: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !doVerify {
|
||||||
|
config.HttpClient.Transport = &http.Transport{
|
||||||
|
TLSClientConfig: &tls.Config{
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client provides a client to the Consul API
|
||||||
|
type Client struct {
|
||||||
|
config Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient returns a new client
|
||||||
|
func NewClient(config *Config) (*Client, error) {
|
||||||
|
// bootstrap the config
|
||||||
|
defConfig := DefaultConfig()
|
||||||
|
|
||||||
|
if len(config.Address) == 0 {
|
||||||
|
config.Address = defConfig.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(config.Scheme) == 0 {
|
||||||
|
config.Scheme = defConfig.Scheme
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.HttpClient == nil {
|
||||||
|
config.HttpClient = defConfig.HttpClient
|
||||||
|
}
|
||||||
|
|
||||||
|
if parts := strings.SplitN(config.Address, "unix://", 2); len(parts) == 2 {
|
||||||
|
config.HttpClient = &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Dial: func(_, _ string) (net.Conn, error) {
|
||||||
|
return net.Dial("unix", parts[1])
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
config.Address = parts[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &Client{
|
||||||
|
config: *config,
|
||||||
|
}
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// request is used to help build up a request
|
||||||
|
type request struct {
|
||||||
|
config *Config
|
||||||
|
method string
|
||||||
|
url *url.URL
|
||||||
|
params url.Values
|
||||||
|
body io.Reader
|
||||||
|
obj interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// setQueryOptions is used to annotate the request with
|
||||||
|
// additional query options
|
||||||
|
func (r *request) setQueryOptions(q *QueryOptions) {
|
||||||
|
if q == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if q.Datacenter != "" {
|
||||||
|
r.params.Set("dc", q.Datacenter)
|
||||||
|
}
|
||||||
|
if q.AllowStale {
|
||||||
|
r.params.Set("stale", "")
|
||||||
|
}
|
||||||
|
if q.RequireConsistent {
|
||||||
|
r.params.Set("consistent", "")
|
||||||
|
}
|
||||||
|
if q.WaitIndex != 0 {
|
||||||
|
r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10))
|
||||||
|
}
|
||||||
|
if q.WaitTime != 0 {
|
||||||
|
r.params.Set("wait", durToMsec(q.WaitTime))
|
||||||
|
}
|
||||||
|
if q.Token != "" {
|
||||||
|
r.params.Set("token", q.Token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// durToMsec converts a duration to a millisecond specified string
|
||||||
|
func durToMsec(dur time.Duration) string {
|
||||||
|
return fmt.Sprintf("%dms", dur/time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
// setWriteOptions is used to annotate the request with
|
||||||
|
// additional write options
|
||||||
|
func (r *request) setWriteOptions(q *WriteOptions) {
|
||||||
|
if q == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if q.Datacenter != "" {
|
||||||
|
r.params.Set("dc", q.Datacenter)
|
||||||
|
}
|
||||||
|
if q.Token != "" {
|
||||||
|
r.params.Set("token", q.Token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// toHTTP converts the request to an HTTP request
|
||||||
|
func (r *request) toHTTP() (*http.Request, error) {
|
||||||
|
// Encode the query parameters
|
||||||
|
r.url.RawQuery = r.params.Encode()
|
||||||
|
|
||||||
|
// Check if we should encode the body
|
||||||
|
if r.body == nil && r.obj != nil {
|
||||||
|
if b, err := encodeBody(r.obj); err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else {
|
||||||
|
r.body = b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the HTTP request
|
||||||
|
req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req.URL.Host = r.url.Host
|
||||||
|
req.URL.Scheme = r.url.Scheme
|
||||||
|
req.Host = r.url.Host
|
||||||
|
|
||||||
|
// Setup auth
|
||||||
|
if r.config.HttpAuth != nil {
|
||||||
|
req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password)
|
||||||
|
}
|
||||||
|
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newRequest is used to create a new request
|
||||||
|
func (c *Client) newRequest(method, path string) *request {
|
||||||
|
r := &request{
|
||||||
|
config: &c.config,
|
||||||
|
method: method,
|
||||||
|
url: &url.URL{
|
||||||
|
Scheme: c.config.Scheme,
|
||||||
|
Host: c.config.Address,
|
||||||
|
Path: path,
|
||||||
|
},
|
||||||
|
params: make(map[string][]string),
|
||||||
|
}
|
||||||
|
if c.config.Datacenter != "" {
|
||||||
|
r.params.Set("dc", c.config.Datacenter)
|
||||||
|
}
|
||||||
|
if c.config.WaitTime != 0 {
|
||||||
|
r.params.Set("wait", durToMsec(r.config.WaitTime))
|
||||||
|
}
|
||||||
|
if c.config.Token != "" {
|
||||||
|
r.params.Set("token", r.config.Token)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// doRequest runs a request with our client
|
||||||
|
func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
|
||||||
|
req, err := r.toHTTP()
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, err
|
||||||
|
}
|
||||||
|
start := time.Now()
|
||||||
|
resp, err := c.config.HttpClient.Do(req)
|
||||||
|
diff := time.Now().Sub(start)
|
||||||
|
return diff, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query is used to do a GET request against an endpoint
|
||||||
|
// and deserialize the response into an interface using
|
||||||
|
// standard Consul conventions.
|
||||||
|
func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
|
||||||
|
r := c.newRequest("GET", endpoint)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
if err := decodeBody(resp, out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// write is used to do a PUT request against an endpoint
|
||||||
|
// and serialize/deserialized using the standard Consul conventions.
|
||||||
|
func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
r := c.newRequest("PUT", endpoint)
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
r.obj = in
|
||||||
|
rtt, resp, err := requireOK(c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
|
if out != nil {
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseQueryMeta is used to help parse query meta-data
|
||||||
|
func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
|
||||||
|
header := resp.Header
|
||||||
|
|
||||||
|
// Parse the X-Consul-Index
|
||||||
|
index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to parse X-Consul-Index: %v", err)
|
||||||
|
}
|
||||||
|
q.LastIndex = index
|
||||||
|
|
||||||
|
// Parse the X-Consul-LastContact
|
||||||
|
last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err)
|
||||||
|
}
|
||||||
|
q.LastContact = time.Duration(last) * time.Millisecond
|
||||||
|
|
||||||
|
// Parse the X-Consul-KnownLeader
|
||||||
|
switch header.Get("X-Consul-KnownLeader") {
|
||||||
|
case "true":
|
||||||
|
q.KnownLeader = true
|
||||||
|
default:
|
||||||
|
q.KnownLeader = false
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeBody is used to JSON decode a body
|
||||||
|
func decodeBody(resp *http.Response, out interface{}) error {
|
||||||
|
dec := json.NewDecoder(resp.Body)
|
||||||
|
return dec.Decode(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeBody is used to encode a request body
|
||||||
|
func encodeBody(obj interface{}) (io.Reader, error) {
|
||||||
|
buf := bytes.NewBuffer(nil)
|
||||||
|
enc := json.NewEncoder(buf)
|
||||||
|
if err := enc.Encode(obj); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// requireOK is used to wrap doRequest and check for a 200
|
||||||
|
func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
|
||||||
|
if e != nil {
|
||||||
|
if resp != nil {
|
||||||
|
resp.Body.Close()
|
||||||
|
}
|
||||||
|
return d, nil, e
|
||||||
|
}
|
||||||
|
if resp.StatusCode != 200 {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
io.Copy(&buf, resp.Body)
|
||||||
|
resp.Body.Close()
|
||||||
|
return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
|
||||||
|
}
|
||||||
|
return d, resp, nil
|
||||||
|
}
|
242
Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go
generated
vendored
Normal file
242
Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go
generated
vendored
Normal file
|
@ -0,0 +1,242 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
crand "crypto/rand"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
type configCallback func(c *Config)
|
||||||
|
|
||||||
|
func makeClient(t *testing.T) (*Client, *testutil.TestServer) {
|
||||||
|
return makeClientWithConfig(t, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeClientWithConfig(
|
||||||
|
t *testing.T,
|
||||||
|
cb1 configCallback,
|
||||||
|
cb2 testutil.ServerConfigCallback) (*Client, *testutil.TestServer) {
|
||||||
|
|
||||||
|
// Make client config
|
||||||
|
conf := DefaultConfig()
|
||||||
|
if cb1 != nil {
|
||||||
|
cb1(conf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create server
|
||||||
|
server := testutil.NewTestServerConfig(t, cb2)
|
||||||
|
conf.Address = server.HTTPAddr
|
||||||
|
|
||||||
|
// Create client
|
||||||
|
client, err := NewClient(conf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return client, server
|
||||||
|
}
|
||||||
|
|
||||||
|
func testKey() string {
|
||||||
|
buf := make([]byte, 16)
|
||||||
|
if _, err := crand.Read(buf); err != nil {
|
||||||
|
panic(fmt.Errorf("Failed to read random bytes: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
|
||||||
|
buf[0:4],
|
||||||
|
buf[4:6],
|
||||||
|
buf[6:8],
|
||||||
|
buf[8:10],
|
||||||
|
buf[10:16])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDefaultConfig_env(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
addr := "1.2.3.4:5678"
|
||||||
|
token := "abcd1234"
|
||||||
|
auth := "username:password"
|
||||||
|
|
||||||
|
os.Setenv("CONSUL_HTTP_ADDR", addr)
|
||||||
|
defer os.Setenv("CONSUL_HTTP_ADDR", "")
|
||||||
|
os.Setenv("CONSUL_HTTP_TOKEN", token)
|
||||||
|
defer os.Setenv("CONSUL_HTTP_TOKEN", "")
|
||||||
|
os.Setenv("CONSUL_HTTP_AUTH", auth)
|
||||||
|
defer os.Setenv("CONSUL_HTTP_AUTH", "")
|
||||||
|
os.Setenv("CONSUL_HTTP_SSL", "1")
|
||||||
|
defer os.Setenv("CONSUL_HTTP_SSL", "")
|
||||||
|
os.Setenv("CONSUL_HTTP_SSL_VERIFY", "0")
|
||||||
|
defer os.Setenv("CONSUL_HTTP_SSL_VERIFY", "")
|
||||||
|
|
||||||
|
config := DefaultConfig()
|
||||||
|
|
||||||
|
if config.Address != addr {
|
||||||
|
t.Errorf("expected %q to be %q", config.Address, addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.Token != token {
|
||||||
|
t.Errorf("expected %q to be %q", config.Token, token)
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.HttpAuth == nil {
|
||||||
|
t.Fatalf("expected HttpAuth to be enabled")
|
||||||
|
}
|
||||||
|
if config.HttpAuth.Username != "username" {
|
||||||
|
t.Errorf("expected %q to be %q", config.HttpAuth.Username, "username")
|
||||||
|
}
|
||||||
|
if config.HttpAuth.Password != "password" {
|
||||||
|
t.Errorf("expected %q to be %q", config.HttpAuth.Password, "password")
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.Scheme != "https" {
|
||||||
|
t.Errorf("expected %q to be %q", config.Scheme, "https")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !config.HttpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify {
|
||||||
|
t.Errorf("expected SSL verification to be off")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSetQueryOptions(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
r := c.newRequest("GET", "/v1/kv/foo")
|
||||||
|
q := &QueryOptions{
|
||||||
|
Datacenter: "foo",
|
||||||
|
AllowStale: true,
|
||||||
|
RequireConsistent: true,
|
||||||
|
WaitIndex: 1000,
|
||||||
|
WaitTime: 100 * time.Second,
|
||||||
|
Token: "12345",
|
||||||
|
}
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
|
||||||
|
if r.params.Get("dc") != "foo" {
|
||||||
|
t.Fatalf("bad: %v", r.params)
|
||||||
|
}
|
||||||
|
if _, ok := r.params["stale"]; !ok {
|
||||||
|
t.Fatalf("bad: %v", r.params)
|
||||||
|
}
|
||||||
|
if _, ok := r.params["consistent"]; !ok {
|
||||||
|
t.Fatalf("bad: %v", r.params)
|
||||||
|
}
|
||||||
|
if r.params.Get("index") != "1000" {
|
||||||
|
t.Fatalf("bad: %v", r.params)
|
||||||
|
}
|
||||||
|
if r.params.Get("wait") != "100000ms" {
|
||||||
|
t.Fatalf("bad: %v", r.params)
|
||||||
|
}
|
||||||
|
if r.params.Get("token") != "12345" {
|
||||||
|
t.Fatalf("bad: %v", r.params)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSetWriteOptions(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
r := c.newRequest("GET", "/v1/kv/foo")
|
||||||
|
q := &WriteOptions{
|
||||||
|
Datacenter: "foo",
|
||||||
|
Token: "23456",
|
||||||
|
}
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
|
||||||
|
if r.params.Get("dc") != "foo" {
|
||||||
|
t.Fatalf("bad: %v", r.params)
|
||||||
|
}
|
||||||
|
if r.params.Get("token") != "23456" {
|
||||||
|
t.Fatalf("bad: %v", r.params)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRequestToHTTP(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
r := c.newRequest("DELETE", "/v1/kv/foo")
|
||||||
|
q := &QueryOptions{
|
||||||
|
Datacenter: "foo",
|
||||||
|
}
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
req, err := r.toHTTP()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.Method != "DELETE" {
|
||||||
|
t.Fatalf("bad: %v", req)
|
||||||
|
}
|
||||||
|
if req.URL.RequestURI() != "/v1/kv/foo?dc=foo" {
|
||||||
|
t.Fatalf("bad: %v", req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseQueryMeta(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
resp := &http.Response{
|
||||||
|
Header: make(map[string][]string),
|
||||||
|
}
|
||||||
|
resp.Header.Set("X-Consul-Index", "12345")
|
||||||
|
resp.Header.Set("X-Consul-LastContact", "80")
|
||||||
|
resp.Header.Set("X-Consul-KnownLeader", "true")
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
if err := parseQueryMeta(resp, qm); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if qm.LastIndex != 12345 {
|
||||||
|
t.Fatalf("Bad: %v", qm)
|
||||||
|
}
|
||||||
|
if qm.LastContact != 80*time.Millisecond {
|
||||||
|
t.Fatalf("Bad: %v", qm)
|
||||||
|
}
|
||||||
|
if !qm.KnownLeader {
|
||||||
|
t.Fatalf("Bad: %v", qm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAPI_UnixSocket(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
tempDir, err := ioutil.TempDir("", "consul")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
socket := filepath.Join(tempDir, "test.sock")
|
||||||
|
|
||||||
|
c, s := makeClientWithConfig(t, func(c *Config) {
|
||||||
|
c.Address = "unix://" + socket
|
||||||
|
}, func(c *testutil.TestServerConfig) {
|
||||||
|
c.Addresses = &testutil.TestAddressConfig{
|
||||||
|
HTTP: "unix://" + socket,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
|
||||||
|
info, err := agent.Self()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
if info["Config"]["NodeName"] == "" {
|
||||||
|
t.Fatalf("bad: %v", info)
|
||||||
|
}
|
||||||
|
}
|
182
Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go
generated
vendored
Normal file
182
Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go
generated
vendored
Normal file
|
@ -0,0 +1,182 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
type Node struct {
|
||||||
|
Node string
|
||||||
|
Address string
|
||||||
|
}
|
||||||
|
|
||||||
|
type CatalogService struct {
|
||||||
|
Node string
|
||||||
|
Address string
|
||||||
|
ServiceID string
|
||||||
|
ServiceName string
|
||||||
|
ServiceAddress string
|
||||||
|
ServiceTags []string
|
||||||
|
ServicePort int
|
||||||
|
}
|
||||||
|
|
||||||
|
type CatalogNode struct {
|
||||||
|
Node *Node
|
||||||
|
Services map[string]*AgentService
|
||||||
|
}
|
||||||
|
|
||||||
|
type CatalogRegistration struct {
|
||||||
|
Node string
|
||||||
|
Address string
|
||||||
|
Datacenter string
|
||||||
|
Service *AgentService
|
||||||
|
Check *AgentCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
type CatalogDeregistration struct {
|
||||||
|
Node string
|
||||||
|
Address string
|
||||||
|
Datacenter string
|
||||||
|
ServiceID string
|
||||||
|
CheckID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Catalog can be used to query the Catalog endpoints
|
||||||
|
type Catalog struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Catalog returns a handle to the catalog endpoints
|
||||||
|
func (c *Client) Catalog() *Catalog {
|
||||||
|
return &Catalog{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
r := c.c.newRequest("PUT", "/v1/catalog/register")
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
r.obj = reg
|
||||||
|
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{}
|
||||||
|
wm.RequestTime = rtt
|
||||||
|
|
||||||
|
return wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
r := c.c.newRequest("PUT", "/v1/catalog/deregister")
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
r.obj = dereg
|
||||||
|
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{}
|
||||||
|
wm.RequestTime = rtt
|
||||||
|
|
||||||
|
return wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Datacenters is used to query for all the known datacenters
|
||||||
|
func (c *Catalog) Datacenters() ([]string, error) {
|
||||||
|
r := c.c.newRequest("GET", "/v1/catalog/datacenters")
|
||||||
|
_, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out []string
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nodes is used to query all the known nodes
|
||||||
|
func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) {
|
||||||
|
r := c.c.newRequest("GET", "/v1/catalog/nodes")
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out []*Node
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Services is used to query for all known services
|
||||||
|
func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) {
|
||||||
|
r := c.c.newRequest("GET", "/v1/catalog/services")
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out map[string][]string
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Service is used to query catalog entries for a given service
|
||||||
|
func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
|
||||||
|
r := c.c.newRequest("GET", "/v1/catalog/service/"+service)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
if tag != "" {
|
||||||
|
r.params.Set("tag", tag)
|
||||||
|
}
|
||||||
|
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out []*CatalogService
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Node is used to query for service information about a single node
|
||||||
|
func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) {
|
||||||
|
r := c.c.newRequest("GET", "/v1/catalog/node/"+node)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out *CatalogNode
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
279
Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go
generated
vendored
Normal file
279
Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go
generated
vendored
Normal file
|
@ -0,0 +1,279 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCatalog_Datacenters(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
catalog := c.Catalog()
|
||||||
|
|
||||||
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
datacenters, err := catalog.Datacenters()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(datacenters) == 0 {
|
||||||
|
return false, fmt.Errorf("Bad: %v", datacenters)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCatalog_Nodes(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
catalog := c.Catalog()
|
||||||
|
|
||||||
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
nodes, meta, err := catalog.Nodes(nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if meta.LastIndex == 0 {
|
||||||
|
return false, fmt.Errorf("Bad: %v", meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return false, fmt.Errorf("Bad: %v", nodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCatalog_Services(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
catalog := c.Catalog()
|
||||||
|
|
||||||
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
services, meta, err := catalog.Services(nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if meta.LastIndex == 0 {
|
||||||
|
return false, fmt.Errorf("Bad: %v", meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(services) == 0 {
|
||||||
|
return false, fmt.Errorf("Bad: %v", services)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCatalog_Service(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
catalog := c.Catalog()
|
||||||
|
|
||||||
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
services, meta, err := catalog.Service("consul", "", nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if meta.LastIndex == 0 {
|
||||||
|
return false, fmt.Errorf("Bad: %v", meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(services) == 0 {
|
||||||
|
return false, fmt.Errorf("Bad: %v", services)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCatalog_Node(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
catalog := c.Catalog()
|
||||||
|
name, _ := c.Agent().NodeName()
|
||||||
|
|
||||||
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
info, meta, err := catalog.Node(name, nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if meta.LastIndex == 0 {
|
||||||
|
return false, fmt.Errorf("Bad: %v", meta)
|
||||||
|
}
|
||||||
|
if len(info.Services) == 0 {
|
||||||
|
return false, fmt.Errorf("Bad: %v", info)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCatalog_Registration(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
catalog := c.Catalog()
|
||||||
|
|
||||||
|
service := &AgentService{
|
||||||
|
ID: "redis1",
|
||||||
|
Service: "redis",
|
||||||
|
Tags: []string{"master", "v1"},
|
||||||
|
Port: 8000,
|
||||||
|
}
|
||||||
|
|
||||||
|
check := &AgentCheck{
|
||||||
|
Node: "foobar",
|
||||||
|
CheckID: "service:redis1",
|
||||||
|
Name: "Redis health check",
|
||||||
|
Notes: "Script based health check",
|
||||||
|
Status: "passing",
|
||||||
|
ServiceID: "redis1",
|
||||||
|
}
|
||||||
|
|
||||||
|
reg := &CatalogRegistration{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
Node: "foobar",
|
||||||
|
Address: "192.168.10.10",
|
||||||
|
Service: service,
|
||||||
|
Check: check,
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
if _, err := catalog.Register(reg, nil); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
node, _, err := catalog.Node("foobar", nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := node.Services["redis1"]; !ok {
|
||||||
|
return false, fmt.Errorf("missing service: redis1")
|
||||||
|
}
|
||||||
|
|
||||||
|
health, _, err := c.Health().Node("foobar", nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if health[0].CheckID != "service:redis1" {
|
||||||
|
return false, fmt.Errorf("missing checkid service:redis1")
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test catalog deregistration of the previously registered service
|
||||||
|
dereg := &CatalogDeregistration{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
Node: "foobar",
|
||||||
|
Address: "192.168.10.10",
|
||||||
|
ServiceID: "redis1",
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := catalog.Deregister(dereg, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
node, _, err := catalog.Node("foobar", nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := node.Services["redis1"]; ok {
|
||||||
|
return false, fmt.Errorf("ServiceID:redis1 is not deregistered")
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test deregistration of the previously registered check
|
||||||
|
dereg = &CatalogDeregistration{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
Node: "foobar",
|
||||||
|
Address: "192.168.10.10",
|
||||||
|
CheckID: "service:redis1",
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := catalog.Deregister(dereg, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
health, _, err := c.Health().Node("foobar", nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(health) != 0 {
|
||||||
|
return false, fmt.Errorf("CheckID:service:redis1 is not deregistered")
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test node deregistration of the previously registered node
|
||||||
|
dereg = &CatalogDeregistration{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
Node: "foobar",
|
||||||
|
Address: "192.168.10.10",
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := catalog.Deregister(dereg, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
node, _, err := catalog.Node("foobar", nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if node != nil {
|
||||||
|
return false, fmt.Errorf("node is not deregistered: %v", node)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
})
|
||||||
|
}
|
104
Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go
generated
vendored
Normal file
104
Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go
generated
vendored
Normal file
|
@ -0,0 +1,104 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Event can be used to query the Event endpoints
|
||||||
|
type Event struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserEvent represents an event that was fired by the user
|
||||||
|
type UserEvent struct {
|
||||||
|
ID string
|
||||||
|
Name string
|
||||||
|
Payload []byte
|
||||||
|
NodeFilter string
|
||||||
|
ServiceFilter string
|
||||||
|
TagFilter string
|
||||||
|
Version int
|
||||||
|
LTime uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Event returns a handle to the event endpoints
|
||||||
|
func (c *Client) Event() *Event {
|
||||||
|
return &Event{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fire is used to fire a new user event. Only the Name, Payload and Filters
|
||||||
|
// are respected. This returns the ID or an associated error. Cross DC requests
|
||||||
|
// are supported.
|
||||||
|
func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) {
|
||||||
|
r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name)
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
if params.NodeFilter != "" {
|
||||||
|
r.params.Set("node", params.NodeFilter)
|
||||||
|
}
|
||||||
|
if params.ServiceFilter != "" {
|
||||||
|
r.params.Set("service", params.ServiceFilter)
|
||||||
|
}
|
||||||
|
if params.TagFilter != "" {
|
||||||
|
r.params.Set("tag", params.TagFilter)
|
||||||
|
}
|
||||||
|
if params.Payload != nil {
|
||||||
|
r.body = bytes.NewReader(params.Payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
rtt, resp, err := requireOK(e.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
wm := &WriteMeta{RequestTime: rtt}
|
||||||
|
var out UserEvent
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
return out.ID, wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List is used to get the most recent events an agent has received.
|
||||||
|
// This list can be optionally filtered by the name. This endpoint supports
|
||||||
|
// quasi-blocking queries. The index is not monotonic, nor does it provide provide
|
||||||
|
// LastContact or KnownLeader.
|
||||||
|
func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) {
|
||||||
|
r := e.c.newRequest("GET", "/v1/event/list")
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
if name != "" {
|
||||||
|
r.params.Set("name", name)
|
||||||
|
}
|
||||||
|
rtt, resp, err := requireOK(e.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var entries []*UserEvent
|
||||||
|
if err := decodeBody(resp, &entries); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return entries, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDToIndex is a bit of a hack. This simulates the index generation to
|
||||||
|
// convert an event ID into a WaitIndex.
|
||||||
|
func (e *Event) IDToIndex(uuid string) uint64 {
|
||||||
|
lower := uuid[0:8] + uuid[9:13] + uuid[14:18]
|
||||||
|
upper := uuid[19:23] + uuid[24:36]
|
||||||
|
lowVal, err := strconv.ParseUint(lower, 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
panic("Failed to convert " + lower)
|
||||||
|
}
|
||||||
|
highVal, err := strconv.ParseUint(upper, 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
panic("Failed to convert " + upper)
|
||||||
|
}
|
||||||
|
return lowVal ^ highVal
|
||||||
|
}
|
49
Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go
generated
vendored
Normal file
49
Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEvent_FireList(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
event := c.Event()
|
||||||
|
|
||||||
|
params := &UserEvent{Name: "foo"}
|
||||||
|
id, meta, err := event.Fire(params, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if meta.RequestTime == 0 {
|
||||||
|
t.Fatalf("bad: %v", meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
if id == "" {
|
||||||
|
t.Fatalf("invalid: %v", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
var events []*UserEvent
|
||||||
|
var qm *QueryMeta
|
||||||
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
events, qm, err = event.List("", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
return len(events) > 0, err
|
||||||
|
}, func(err error) {
|
||||||
|
t.Fatalf("err: %#v", err)
|
||||||
|
})
|
||||||
|
|
||||||
|
if events[len(events)-1].ID != id {
|
||||||
|
t.Fatalf("bad: %#v", events)
|
||||||
|
}
|
||||||
|
|
||||||
|
if qm.LastIndex != event.IDToIndex(id) {
|
||||||
|
t.Fatalf("Bad: %#v", qm)
|
||||||
|
}
|
||||||
|
}
|
136
Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go
generated
vendored
Normal file
136
Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HealthCheck is used to represent a single check
|
||||||
|
type HealthCheck struct {
|
||||||
|
Node string
|
||||||
|
CheckID string
|
||||||
|
Name string
|
||||||
|
Status string
|
||||||
|
Notes string
|
||||||
|
Output string
|
||||||
|
ServiceID string
|
||||||
|
ServiceName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceEntry is used for the health service endpoint
|
||||||
|
type ServiceEntry struct {
|
||||||
|
Node *Node
|
||||||
|
Service *AgentService
|
||||||
|
Checks []*HealthCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
// Health can be used to query the Health endpoints
|
||||||
|
type Health struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Health returns a handle to the health endpoints
|
||||||
|
func (c *Client) Health() *Health {
|
||||||
|
return &Health{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Node is used to query for checks belonging to a given node
|
||||||
|
func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
|
||||||
|
r := h.c.newRequest("GET", "/v1/health/node/"+node)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(h.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out []*HealthCheck
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks is used to return the checks associated with a service
|
||||||
|
func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
|
||||||
|
r := h.c.newRequest("GET", "/v1/health/checks/"+service)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(h.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out []*HealthCheck
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Service is used to query health information along with service info
|
||||||
|
// for a given service. It can optionally do server-side filtering on a tag
|
||||||
|
// or nodes with passing health checks only.
|
||||||
|
func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
|
||||||
|
r := h.c.newRequest("GET", "/v1/health/service/"+service)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
if tag != "" {
|
||||||
|
r.params.Set("tag", tag)
|
||||||
|
}
|
||||||
|
if passingOnly {
|
||||||
|
r.params.Set("passing", "1")
|
||||||
|
}
|
||||||
|
rtt, resp, err := requireOK(h.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out []*ServiceEntry
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// State is used to retreive all the checks in a given state.
|
||||||
|
// The wildcard "any" state can also be used for all checks.
|
||||||
|
func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
|
||||||
|
switch state {
|
||||||
|
case "any":
|
||||||
|
case "warning":
|
||||||
|
case "critical":
|
||||||
|
case "passing":
|
||||||
|
case "unknown":
|
||||||
|
default:
|
||||||
|
return nil, nil, fmt.Errorf("Unsupported state: %v", state)
|
||||||
|
}
|
||||||
|
r := h.c.newRequest("GET", "/v1/health/state/"+state)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
rtt, resp, err := requireOK(h.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var out []*HealthCheck
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return out, qm, nil
|
||||||
|
}
|
125
Godeps/_workspace/src/github.com/hashicorp/consul/api/health_test.go
generated
vendored
Normal file
125
Godeps/_workspace/src/github.com/hashicorp/consul/api/health_test.go
generated
vendored
Normal file
|
@ -0,0 +1,125 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHealth_Node(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
health := c.Health()
|
||||||
|
|
||||||
|
info, err := agent.Self()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
name := info["Config"]["NodeName"].(string)
|
||||||
|
|
||||||
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
checks, meta, err := health.Node(name, nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if meta.LastIndex == 0 {
|
||||||
|
return false, fmt.Errorf("bad: %v", meta)
|
||||||
|
}
|
||||||
|
if len(checks) == 0 {
|
||||||
|
return false, fmt.Errorf("bad: %v", checks)
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHealth_Checks(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
health := c.Health()
|
||||||
|
|
||||||
|
// Make a service with a check
|
||||||
|
reg := &AgentServiceRegistration{
|
||||||
|
Name: "foo",
|
||||||
|
Check: &AgentServiceCheck{
|
||||||
|
TTL: "15s",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := agent.ServiceRegister(reg); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
defer agent.ServiceDeregister("foo")
|
||||||
|
|
||||||
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
checks, meta, err := health.Checks("foo", nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if meta.LastIndex == 0 {
|
||||||
|
return false, fmt.Errorf("bad: %v", meta)
|
||||||
|
}
|
||||||
|
if len(checks) == 0 {
|
||||||
|
return false, fmt.Errorf("Bad: %v", checks)
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHealth_Service(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
health := c.Health()
|
||||||
|
|
||||||
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
// consul service should always exist...
|
||||||
|
checks, meta, err := health.Service("consul", "", true, nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if meta.LastIndex == 0 {
|
||||||
|
return false, fmt.Errorf("bad: %v", meta)
|
||||||
|
}
|
||||||
|
if len(checks) == 0 {
|
||||||
|
return false, fmt.Errorf("Bad: %v", checks)
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHealth_State(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
health := c.Health()
|
||||||
|
|
||||||
|
testutil.WaitForResult(func() (bool, error) {
|
||||||
|
checks, meta, err := health.State("any", nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if meta.LastIndex == 0 {
|
||||||
|
return false, fmt.Errorf("bad: %v", meta)
|
||||||
|
}
|
||||||
|
if len(checks) == 0 {
|
||||||
|
return false, fmt.Errorf("Bad: %v", checks)
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}, func(err error) {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
})
|
||||||
|
}
|
236
Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go
generated
vendored
Normal file
236
Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go
generated
vendored
Normal file
|
@ -0,0 +1,236 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// KVPair is used to represent a single K/V entry
|
||||||
|
type KVPair struct {
|
||||||
|
Key string
|
||||||
|
CreateIndex uint64
|
||||||
|
ModifyIndex uint64
|
||||||
|
LockIndex uint64
|
||||||
|
Flags uint64
|
||||||
|
Value []byte
|
||||||
|
Session string
|
||||||
|
}
|
||||||
|
|
||||||
|
// KVPairs is a list of KVPair objects
|
||||||
|
type KVPairs []*KVPair
|
||||||
|
|
||||||
|
// KV is used to manipulate the K/V API
|
||||||
|
type KV struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// KV is used to return a handle to the K/V apis
|
||||||
|
func (c *Client) KV() *KV {
|
||||||
|
return &KV{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get is used to lookup a single key
|
||||||
|
func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
|
||||||
|
resp, qm, err := k.getInternal(key, nil, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if resp == nil {
|
||||||
|
return nil, qm, nil
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var entries []*KVPair
|
||||||
|
if err := decodeBody(resp, &entries); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(entries) > 0 {
|
||||||
|
return entries[0], qm, nil
|
||||||
|
}
|
||||||
|
return nil, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List is used to lookup all keys under a prefix
|
||||||
|
func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) {
|
||||||
|
resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if resp == nil {
|
||||||
|
return nil, qm, nil
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var entries []*KVPair
|
||||||
|
if err := decodeBody(resp, &entries); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return entries, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keys is used to list all the keys under a prefix. Optionally,
|
||||||
|
// a separator can be used to limit the responses.
|
||||||
|
func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) {
|
||||||
|
params := map[string]string{"keys": ""}
|
||||||
|
if separator != "" {
|
||||||
|
params["separator"] = separator
|
||||||
|
}
|
||||||
|
resp, qm, err := k.getInternal(prefix, params, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if resp == nil {
|
||||||
|
return nil, qm, nil
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var entries []string
|
||||||
|
if err := decodeBody(resp, &entries); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return entries, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) {
|
||||||
|
r := k.c.newRequest("GET", "/v1/kv/"+key)
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
for param, val := range params {
|
||||||
|
r.params.Set(param, val)
|
||||||
|
}
|
||||||
|
rtt, resp, err := k.c.doRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
qm := &QueryMeta{}
|
||||||
|
parseQueryMeta(resp, qm)
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
if resp.StatusCode == 404 {
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil, qm, nil
|
||||||
|
} else if resp.StatusCode != 200 {
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
return resp, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put is used to write a new value. Only the
|
||||||
|
// Key, Flags and Value is respected.
|
||||||
|
func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
params := make(map[string]string, 1)
|
||||||
|
if p.Flags != 0 {
|
||||||
|
params["flags"] = strconv.FormatUint(p.Flags, 10)
|
||||||
|
}
|
||||||
|
_, wm, err := k.put(p.Key, params, p.Value, q)
|
||||||
|
return wm, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CAS is used for a Check-And-Set operation. The Key,
|
||||||
|
// ModifyIndex, Flags and Value are respected. Returns true
|
||||||
|
// on success or false on failures.
|
||||||
|
func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||||
|
params := make(map[string]string, 2)
|
||||||
|
if p.Flags != 0 {
|
||||||
|
params["flags"] = strconv.FormatUint(p.Flags, 10)
|
||||||
|
}
|
||||||
|
params["cas"] = strconv.FormatUint(p.ModifyIndex, 10)
|
||||||
|
return k.put(p.Key, params, p.Value, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Acquire is used for a lock acquisiiton operation. The Key,
|
||||||
|
// Flags, Value and Session are respected. Returns true
|
||||||
|
// on success or false on failures.
|
||||||
|
func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||||
|
params := make(map[string]string, 2)
|
||||||
|
if p.Flags != 0 {
|
||||||
|
params["flags"] = strconv.FormatUint(p.Flags, 10)
|
||||||
|
}
|
||||||
|
params["acquire"] = p.Session
|
||||||
|
return k.put(p.Key, params, p.Value, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release is used for a lock release operation. The Key,
|
||||||
|
// Flags, Value and Session are respected. Returns true
|
||||||
|
// on success or false on failures.
|
||||||
|
func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||||
|
params := make(map[string]string, 2)
|
||||||
|
if p.Flags != 0 {
|
||||||
|
params["flags"] = strconv.FormatUint(p.Flags, 10)
|
||||||
|
}
|
||||||
|
params["release"] = p.Session
|
||||||
|
return k.put(p.Key, params, p.Value, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||||
|
r := k.c.newRequest("PUT", "/v1/kv/"+key)
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
for param, val := range params {
|
||||||
|
r.params.Set(param, val)
|
||||||
|
}
|
||||||
|
r.body = bytes.NewReader(body)
|
||||||
|
rtt, resp, err := requireOK(k.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return false, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &WriteMeta{}
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if _, err := io.Copy(&buf, resp.Body); err != nil {
|
||||||
|
return false, nil, fmt.Errorf("Failed to read response: %v", err)
|
||||||
|
}
|
||||||
|
res := strings.Contains(string(buf.Bytes()), "true")
|
||||||
|
return res, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete is used to delete a single key
|
||||||
|
func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) {
|
||||||
|
_, qm, err := k.deleteInternal(key, nil, w)
|
||||||
|
return qm, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteCAS is used for a Delete Check-And-Set operation. The Key
|
||||||
|
// and ModifyIndex are respected. Returns true on success or false on failures.
|
||||||
|
func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||||
|
params := map[string]string{
|
||||||
|
"cas": strconv.FormatUint(p.ModifyIndex, 10),
|
||||||
|
}
|
||||||
|
return k.deleteInternal(p.Key, params, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteTree is used to delete all keys under a prefix
|
||||||
|
func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) {
|
||||||
|
_, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w)
|
||||||
|
return qm, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||||
|
r := k.c.newRequest("DELETE", "/v1/kv/"+key)
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
for param, val := range params {
|
||||||
|
r.params.Set(param, val)
|
||||||
|
}
|
||||||
|
rtt, resp, err := requireOK(k.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return false, nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
qm := &WriteMeta{}
|
||||||
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if _, err := io.Copy(&buf, resp.Body); err != nil {
|
||||||
|
return false, nil, fmt.Errorf("Failed to read response: %v", err)
|
||||||
|
}
|
||||||
|
res := strings.Contains(string(buf.Bytes()), "true")
|
||||||
|
return res, qm, nil
|
||||||
|
}
|
439
Godeps/_workspace/src/github.com/hashicorp/consul/api/kv_test.go
generated
vendored
Normal file
439
Godeps/_workspace/src/github.com/hashicorp/consul/api/kv_test.go
generated
vendored
Normal file
|
@ -0,0 +1,439 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"path"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestClientPutGetDelete(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
kv := c.KV()
|
||||||
|
|
||||||
|
// Get a get without a key
|
||||||
|
key := testKey()
|
||||||
|
pair, _, err := kv.Get(key, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if pair != nil {
|
||||||
|
t.Fatalf("unexpected value: %#v", pair)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put the key
|
||||||
|
value := []byte("test")
|
||||||
|
p := &KVPair{Key: key, Flags: 42, Value: value}
|
||||||
|
if _, err := kv.Put(p, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get should work
|
||||||
|
pair, meta, err := kv.Get(key, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if pair == nil {
|
||||||
|
t.Fatalf("expected value: %#v", pair)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(pair.Value, value) {
|
||||||
|
t.Fatalf("unexpected value: %#v", pair)
|
||||||
|
}
|
||||||
|
if pair.Flags != 42 {
|
||||||
|
t.Fatalf("unexpected value: %#v", pair)
|
||||||
|
}
|
||||||
|
if meta.LastIndex == 0 {
|
||||||
|
t.Fatalf("unexpected value: %#v", meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete
|
||||||
|
if _, err := kv.Delete(key, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get should fail
|
||||||
|
pair, _, err = kv.Get(key, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if pair != nil {
|
||||||
|
t.Fatalf("unexpected value: %#v", pair)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClient_List_DeleteRecurse(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
kv := c.KV()
|
||||||
|
|
||||||
|
// Generate some test keys
|
||||||
|
prefix := testKey()
|
||||||
|
var keys []string
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
keys = append(keys, path.Join(prefix, testKey()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set values
|
||||||
|
value := []byte("test")
|
||||||
|
for _, key := range keys {
|
||||||
|
p := &KVPair{Key: key, Value: value}
|
||||||
|
if _, err := kv.Put(p, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the values
|
||||||
|
pairs, meta, err := kv.List(prefix, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if len(pairs) != len(keys) {
|
||||||
|
t.Fatalf("got %d keys", len(pairs))
|
||||||
|
}
|
||||||
|
for _, pair := range pairs {
|
||||||
|
if !bytes.Equal(pair.Value, value) {
|
||||||
|
t.Fatalf("unexpected value: %#v", pair)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if meta.LastIndex == 0 {
|
||||||
|
t.Fatalf("unexpected value: %#v", meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete all
|
||||||
|
if _, err := kv.DeleteTree(prefix, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the values
|
||||||
|
pairs, _, err = kv.List(prefix, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if len(pairs) != 0 {
|
||||||
|
t.Fatalf("got %d keys", len(pairs))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClient_DeleteCAS(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
kv := c.KV()
|
||||||
|
|
||||||
|
// Put the key
|
||||||
|
key := testKey()
|
||||||
|
value := []byte("test")
|
||||||
|
p := &KVPair{Key: key, Value: value}
|
||||||
|
if work, _, err := kv.CAS(p, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
} else if !work {
|
||||||
|
t.Fatalf("CAS failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get should work
|
||||||
|
pair, meta, err := kv.Get(key, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if pair == nil {
|
||||||
|
t.Fatalf("expected value: %#v", pair)
|
||||||
|
}
|
||||||
|
if meta.LastIndex == 0 {
|
||||||
|
t.Fatalf("unexpected value: %#v", meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CAS update with bad index
|
||||||
|
p.ModifyIndex = 1
|
||||||
|
if work, _, err := kv.DeleteCAS(p, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
} else if work {
|
||||||
|
t.Fatalf("unexpected CAS")
|
||||||
|
}
|
||||||
|
|
||||||
|
// CAS update with valid index
|
||||||
|
p.ModifyIndex = meta.LastIndex
|
||||||
|
if work, _, err := kv.DeleteCAS(p, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
} else if !work {
|
||||||
|
t.Fatalf("unexpected CAS failure")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClient_CAS(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
kv := c.KV()
|
||||||
|
|
||||||
|
// Put the key
|
||||||
|
key := testKey()
|
||||||
|
value := []byte("test")
|
||||||
|
p := &KVPair{Key: key, Value: value}
|
||||||
|
if work, _, err := kv.CAS(p, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
} else if !work {
|
||||||
|
t.Fatalf("CAS failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get should work
|
||||||
|
pair, meta, err := kv.Get(key, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if pair == nil {
|
||||||
|
t.Fatalf("expected value: %#v", pair)
|
||||||
|
}
|
||||||
|
if meta.LastIndex == 0 {
|
||||||
|
t.Fatalf("unexpected value: %#v", meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CAS update with bad index
|
||||||
|
newVal := []byte("foo")
|
||||||
|
p.Value = newVal
|
||||||
|
p.ModifyIndex = 1
|
||||||
|
if work, _, err := kv.CAS(p, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
} else if work {
|
||||||
|
t.Fatalf("unexpected CAS")
|
||||||
|
}
|
||||||
|
|
||||||
|
// CAS update with valid index
|
||||||
|
p.ModifyIndex = meta.LastIndex
|
||||||
|
if work, _, err := kv.CAS(p, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
} else if !work {
|
||||||
|
t.Fatalf("unexpected CAS failure")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClient_WatchGet(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
kv := c.KV()
|
||||||
|
|
||||||
|
// Get a get without a key
|
||||||
|
key := testKey()
|
||||||
|
pair, meta, err := kv.Get(key, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if pair != nil {
|
||||||
|
t.Fatalf("unexpected value: %#v", pair)
|
||||||
|
}
|
||||||
|
if meta.LastIndex == 0 {
|
||||||
|
t.Fatalf("unexpected value: %#v", meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put the key
|
||||||
|
value := []byte("test")
|
||||||
|
go func() {
|
||||||
|
kv := c.KV()
|
||||||
|
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
p := &KVPair{Key: key, Flags: 42, Value: value}
|
||||||
|
if _, err := kv.Put(p, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Get should work
|
||||||
|
options := &QueryOptions{WaitIndex: meta.LastIndex}
|
||||||
|
pair, meta2, err := kv.Get(key, options)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if pair == nil {
|
||||||
|
t.Fatalf("expected value: %#v", pair)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(pair.Value, value) {
|
||||||
|
t.Fatalf("unexpected value: %#v", pair)
|
||||||
|
}
|
||||||
|
if pair.Flags != 42 {
|
||||||
|
t.Fatalf("unexpected value: %#v", pair)
|
||||||
|
}
|
||||||
|
if meta2.LastIndex <= meta.LastIndex {
|
||||||
|
t.Fatalf("unexpected value: %#v", meta2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClient_WatchList(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
kv := c.KV()
|
||||||
|
|
||||||
|
// Get a get without a key
|
||||||
|
prefix := testKey()
|
||||||
|
key := path.Join(prefix, testKey())
|
||||||
|
pairs, meta, err := kv.List(prefix, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if len(pairs) != 0 {
|
||||||
|
t.Fatalf("unexpected value: %#v", pairs)
|
||||||
|
}
|
||||||
|
if meta.LastIndex == 0 {
|
||||||
|
t.Fatalf("unexpected value: %#v", meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put the key
|
||||||
|
value := []byte("test")
|
||||||
|
go func() {
|
||||||
|
kv := c.KV()
|
||||||
|
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
p := &KVPair{Key: key, Flags: 42, Value: value}
|
||||||
|
if _, err := kv.Put(p, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Get should work
|
||||||
|
options := &QueryOptions{WaitIndex: meta.LastIndex}
|
||||||
|
pairs, meta2, err := kv.List(prefix, options)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if len(pairs) != 1 {
|
||||||
|
t.Fatalf("expected value: %#v", pairs)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(pairs[0].Value, value) {
|
||||||
|
t.Fatalf("unexpected value: %#v", pairs)
|
||||||
|
}
|
||||||
|
if pairs[0].Flags != 42 {
|
||||||
|
t.Fatalf("unexpected value: %#v", pairs)
|
||||||
|
}
|
||||||
|
if meta2.LastIndex <= meta.LastIndex {
|
||||||
|
t.Fatalf("unexpected value: %#v", meta2)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClient_Keys_DeleteRecurse(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
kv := c.KV()
|
||||||
|
|
||||||
|
// Generate some test keys
|
||||||
|
prefix := testKey()
|
||||||
|
var keys []string
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
keys = append(keys, path.Join(prefix, testKey()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set values
|
||||||
|
value := []byte("test")
|
||||||
|
for _, key := range keys {
|
||||||
|
p := &KVPair{Key: key, Value: value}
|
||||||
|
if _, err := kv.Put(p, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the values
|
||||||
|
out, meta, err := kv.Keys(prefix, "", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if len(out) != len(keys) {
|
||||||
|
t.Fatalf("got %d keys", len(out))
|
||||||
|
}
|
||||||
|
if meta.LastIndex == 0 {
|
||||||
|
t.Fatalf("unexpected value: %#v", meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete all
|
||||||
|
if _, err := kv.DeleteTree(prefix, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the values
|
||||||
|
out, _, err = kv.Keys(prefix, "", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if len(out) != 0 {
|
||||||
|
t.Fatalf("got %d keys", len(out))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClient_AcquireRelease(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
session := c.Session()
|
||||||
|
kv := c.KV()
|
||||||
|
|
||||||
|
// Make a session
|
||||||
|
id, _, err := session.CreateNoChecks(nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
defer session.Destroy(id, nil)
|
||||||
|
|
||||||
|
// Acquire the key
|
||||||
|
key := testKey()
|
||||||
|
value := []byte("test")
|
||||||
|
p := &KVPair{Key: key, Value: value, Session: id}
|
||||||
|
if work, _, err := kv.Acquire(p, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
} else if !work {
|
||||||
|
t.Fatalf("Lock failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get should work
|
||||||
|
pair, meta, err := kv.Get(key, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if pair == nil {
|
||||||
|
t.Fatalf("expected value: %#v", pair)
|
||||||
|
}
|
||||||
|
if pair.LockIndex != 1 {
|
||||||
|
t.Fatalf("Expected lock: %v", pair)
|
||||||
|
}
|
||||||
|
if pair.Session != id {
|
||||||
|
t.Fatalf("Expected lock: %v", pair)
|
||||||
|
}
|
||||||
|
if meta.LastIndex == 0 {
|
||||||
|
t.Fatalf("unexpected value: %#v", meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release
|
||||||
|
if work, _, err := kv.Release(p, nil); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
} else if !work {
|
||||||
|
t.Fatalf("Release fail")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get should work
|
||||||
|
pair, meta, err = kv.Get(key, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if pair == nil {
|
||||||
|
t.Fatalf("expected value: %#v", pair)
|
||||||
|
}
|
||||||
|
if pair.LockIndex != 1 {
|
||||||
|
t.Fatalf("Expected lock: %v", pair)
|
||||||
|
}
|
||||||
|
if pair.Session != "" {
|
||||||
|
t.Fatalf("Expected unlock: %v", pair)
|
||||||
|
}
|
||||||
|
if meta.LastIndex == 0 {
|
||||||
|
t.Fatalf("unexpected value: %#v", meta)
|
||||||
|
}
|
||||||
|
}
|
326
Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go
generated
vendored
Normal file
326
Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go
generated
vendored
Normal file
|
@ -0,0 +1,326 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultLockSessionName is the Session Name we assign if none is provided
|
||||||
|
DefaultLockSessionName = "Consul API Lock"
|
||||||
|
|
||||||
|
// DefaultLockSessionTTL is the default session TTL if no Session is provided
|
||||||
|
// when creating a new Lock. This is used because we do not have another
|
||||||
|
// other check to depend upon.
|
||||||
|
DefaultLockSessionTTL = "15s"
|
||||||
|
|
||||||
|
// DefaultLockWaitTime is how long we block for at a time to check if lock
|
||||||
|
// acquisition is possible. This affects the minimum time it takes to cancel
|
||||||
|
// a Lock acquisition.
|
||||||
|
DefaultLockWaitTime = 15 * time.Second
|
||||||
|
|
||||||
|
// DefaultLockRetryTime is how long we wait after a failed lock acquisition
|
||||||
|
// before attempting to do the lock again. This is so that once a lock-delay
|
||||||
|
// is in affect, we do not hot loop retrying the acquisition.
|
||||||
|
DefaultLockRetryTime = 5 * time.Second
|
||||||
|
|
||||||
|
// LockFlagValue is a magic flag we set to indicate a key
|
||||||
|
// is being used for a lock. It is used to detect a potential
|
||||||
|
// conflict with a semaphore.
|
||||||
|
LockFlagValue = 0x2ddccbc058a50c18
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrLockHeld is returned if we attempt to double lock
|
||||||
|
ErrLockHeld = fmt.Errorf("Lock already held")
|
||||||
|
|
||||||
|
// ErrLockNotHeld is returned if we attempt to unlock a lock
|
||||||
|
// that we do not hold.
|
||||||
|
ErrLockNotHeld = fmt.Errorf("Lock not held")
|
||||||
|
|
||||||
|
// ErrLockInUse is returned if we attempt to destroy a lock
|
||||||
|
// that is in use.
|
||||||
|
ErrLockInUse = fmt.Errorf("Lock in use")
|
||||||
|
|
||||||
|
// ErrLockConflict is returned if the flags on a key
|
||||||
|
// used for a lock do not match expectation
|
||||||
|
ErrLockConflict = fmt.Errorf("Existing key does not match lock use")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Lock is used to implement client-side leader election. It is follows the
|
||||||
|
// algorithm as described here: https://consul.io/docs/guides/leader-election.html.
|
||||||
|
type Lock struct {
|
||||||
|
c *Client
|
||||||
|
opts *LockOptions
|
||||||
|
|
||||||
|
isHeld bool
|
||||||
|
sessionRenew chan struct{}
|
||||||
|
lockSession string
|
||||||
|
l sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// LockOptions is used to parameterize the Lock behavior.
|
||||||
|
type LockOptions struct {
|
||||||
|
Key string // Must be set and have write permissions
|
||||||
|
Value []byte // Optional, value to associate with the lock
|
||||||
|
Session string // Optional, created if not specified
|
||||||
|
SessionName string // Optional, defaults to DefaultLockSessionName
|
||||||
|
SessionTTL string // Optional, defaults to DefaultLockSessionTTL
|
||||||
|
}
|
||||||
|
|
||||||
|
// LockKey returns a handle to a lock struct which can be used
|
||||||
|
// to acquire and release the mutex. The key used must have
|
||||||
|
// write permissions.
|
||||||
|
func (c *Client) LockKey(key string) (*Lock, error) {
|
||||||
|
opts := &LockOptions{
|
||||||
|
Key: key,
|
||||||
|
}
|
||||||
|
return c.LockOpts(opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LockOpts returns a handle to a lock struct which can be used
|
||||||
|
// to acquire and release the mutex. The key used must have
|
||||||
|
// write permissions.
|
||||||
|
func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) {
|
||||||
|
if opts.Key == "" {
|
||||||
|
return nil, fmt.Errorf("missing key")
|
||||||
|
}
|
||||||
|
if opts.SessionName == "" {
|
||||||
|
opts.SessionName = DefaultLockSessionName
|
||||||
|
}
|
||||||
|
if opts.SessionTTL == "" {
|
||||||
|
opts.SessionTTL = DefaultLockSessionTTL
|
||||||
|
} else {
|
||||||
|
if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid SessionTTL: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
l := &Lock{
|
||||||
|
c: c,
|
||||||
|
opts: opts,
|
||||||
|
}
|
||||||
|
return l, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock attempts to acquire the lock and blocks while doing so.
|
||||||
|
// Providing a non-nil stopCh can be used to abort the lock attempt.
|
||||||
|
// Returns a channel that is closed if our lock is lost or an error.
|
||||||
|
// This channel could be closed at any time due to session invalidation,
|
||||||
|
// communication errors, operator intervention, etc. It is NOT safe to
|
||||||
|
// assume that the lock is held until Unlock() unless the Session is specifically
|
||||||
|
// created without any associated health checks. By default Consul sessions
|
||||||
|
// prefer liveness over safety and an application must be able to handle
|
||||||
|
// the lock being lost.
|
||||||
|
func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
||||||
|
// Hold the lock as we try to acquire
|
||||||
|
l.l.Lock()
|
||||||
|
defer l.l.Unlock()
|
||||||
|
|
||||||
|
// Check if we already hold the lock
|
||||||
|
if l.isHeld {
|
||||||
|
return nil, ErrLockHeld
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we need to create a session first
|
||||||
|
l.lockSession = l.opts.Session
|
||||||
|
if l.lockSession == "" {
|
||||||
|
if s, err := l.createSession(); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create session: %v", err)
|
||||||
|
} else {
|
||||||
|
l.sessionRenew = make(chan struct{})
|
||||||
|
l.lockSession = s
|
||||||
|
session := l.c.Session()
|
||||||
|
go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew)
|
||||||
|
|
||||||
|
// If we fail to acquire the lock, cleanup the session
|
||||||
|
defer func() {
|
||||||
|
if !l.isHeld {
|
||||||
|
close(l.sessionRenew)
|
||||||
|
l.sessionRenew = nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup the query options
|
||||||
|
kv := l.c.KV()
|
||||||
|
qOpts := &QueryOptions{
|
||||||
|
WaitTime: DefaultLockWaitTime,
|
||||||
|
}
|
||||||
|
|
||||||
|
WAIT:
|
||||||
|
// Check if we should quit
|
||||||
|
select {
|
||||||
|
case <-stopCh:
|
||||||
|
return nil, nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look for an existing lock, blocking until not taken
|
||||||
|
pair, meta, err := kv.Get(l.opts.Key, qOpts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read lock: %v", err)
|
||||||
|
}
|
||||||
|
if pair != nil && pair.Flags != LockFlagValue {
|
||||||
|
return nil, ErrLockConflict
|
||||||
|
}
|
||||||
|
locked := false
|
||||||
|
if pair != nil && pair.Session == l.lockSession {
|
||||||
|
goto HELD
|
||||||
|
}
|
||||||
|
if pair != nil && pair.Session != "" {
|
||||||
|
qOpts.WaitIndex = meta.LastIndex
|
||||||
|
goto WAIT
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to acquire the lock
|
||||||
|
pair = l.lockEntry(l.lockSession)
|
||||||
|
locked, _, err = kv.Acquire(pair, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to acquire lock: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle the case of not getting the lock
|
||||||
|
if !locked {
|
||||||
|
select {
|
||||||
|
case <-time.After(DefaultLockRetryTime):
|
||||||
|
goto WAIT
|
||||||
|
case <-stopCh:
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
HELD:
|
||||||
|
// Watch to ensure we maintain leadership
|
||||||
|
leaderCh := make(chan struct{})
|
||||||
|
go l.monitorLock(l.lockSession, leaderCh)
|
||||||
|
|
||||||
|
// Set that we own the lock
|
||||||
|
l.isHeld = true
|
||||||
|
|
||||||
|
// Locked! All done
|
||||||
|
return leaderCh, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlock released the lock. It is an error to call this
|
||||||
|
// if the lock is not currently held.
|
||||||
|
func (l *Lock) Unlock() error {
|
||||||
|
// Hold the lock as we try to release
|
||||||
|
l.l.Lock()
|
||||||
|
defer l.l.Unlock()
|
||||||
|
|
||||||
|
// Ensure the lock is actually held
|
||||||
|
if !l.isHeld {
|
||||||
|
return ErrLockNotHeld
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set that we no longer own the lock
|
||||||
|
l.isHeld = false
|
||||||
|
|
||||||
|
// Stop the session renew
|
||||||
|
if l.sessionRenew != nil {
|
||||||
|
defer func() {
|
||||||
|
close(l.sessionRenew)
|
||||||
|
l.sessionRenew = nil
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the lock entry, and clear the lock session
|
||||||
|
lockEnt := l.lockEntry(l.lockSession)
|
||||||
|
l.lockSession = ""
|
||||||
|
|
||||||
|
// Release the lock explicitly
|
||||||
|
kv := l.c.KV()
|
||||||
|
_, _, err := kv.Release(lockEnt, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to release lock: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy is used to cleanup the lock entry. It is not necessary
|
||||||
|
// to invoke. It will fail if the lock is in use.
|
||||||
|
func (l *Lock) Destroy() error {
|
||||||
|
// Hold the lock as we try to release
|
||||||
|
l.l.Lock()
|
||||||
|
defer l.l.Unlock()
|
||||||
|
|
||||||
|
// Check if we already hold the lock
|
||||||
|
if l.isHeld {
|
||||||
|
return ErrLockHeld
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look for an existing lock
|
||||||
|
kv := l.c.KV()
|
||||||
|
pair, _, err := kv.Get(l.opts.Key, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read lock: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nothing to do if the lock does not exist
|
||||||
|
if pair == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for possible flag conflict
|
||||||
|
if pair.Flags != LockFlagValue {
|
||||||
|
return ErrLockConflict
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if it is in use
|
||||||
|
if pair.Session != "" {
|
||||||
|
return ErrLockInUse
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt the delete
|
||||||
|
didRemove, _, err := kv.DeleteCAS(pair, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to remove lock: %v", err)
|
||||||
|
}
|
||||||
|
if !didRemove {
|
||||||
|
return ErrLockInUse
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// createSession is used to create a new managed session
|
||||||
|
func (l *Lock) createSession() (string, error) {
|
||||||
|
session := l.c.Session()
|
||||||
|
se := &SessionEntry{
|
||||||
|
Name: l.opts.SessionName,
|
||||||
|
TTL: l.opts.SessionTTL,
|
||||||
|
}
|
||||||
|
id, _, err := session.Create(se, nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// lockEntry returns a formatted KVPair for the lock
|
||||||
|
func (l *Lock) lockEntry(session string) *KVPair {
|
||||||
|
return &KVPair{
|
||||||
|
Key: l.opts.Key,
|
||||||
|
Value: l.opts.Value,
|
||||||
|
Session: session,
|
||||||
|
Flags: LockFlagValue,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// monitorLock is a long running routine to monitor a lock ownership
|
||||||
|
// It closes the stopCh if we lose our leadership.
|
||||||
|
func (l *Lock) monitorLock(session string, stopCh chan struct{}) {
|
||||||
|
defer close(stopCh)
|
||||||
|
kv := l.c.KV()
|
||||||
|
opts := &QueryOptions{RequireConsistent: true}
|
||||||
|
WAIT:
|
||||||
|
pair, meta, err := kv.Get(l.opts.Key, opts)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if pair != nil && pair.Session == session {
|
||||||
|
opts.WaitIndex = meta.LastIndex
|
||||||
|
goto WAIT
|
||||||
|
}
|
||||||
|
}
|
363
Godeps/_workspace/src/github.com/hashicorp/consul/api/lock_test.go
generated
vendored
Normal file
363
Godeps/_workspace/src/github.com/hashicorp/consul/api/lock_test.go
generated
vendored
Normal file
|
@ -0,0 +1,363 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLock_LockUnlock(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
lock, err := c.LockKey("test/lock")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initial unlock should fail
|
||||||
|
err = lock.Unlock()
|
||||||
|
if err != ErrLockNotHeld {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should work
|
||||||
|
leaderCh, err := lock.Lock(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if leaderCh == nil {
|
||||||
|
t.Fatalf("not leader")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Double lock should fail
|
||||||
|
_, err = lock.Lock(nil)
|
||||||
|
if err != ErrLockHeld {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should be leader
|
||||||
|
select {
|
||||||
|
case <-leaderCh:
|
||||||
|
t.Fatalf("should be leader")
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initial unlock should work
|
||||||
|
err = lock.Unlock()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Double unlock should fail
|
||||||
|
err = lock.Unlock()
|
||||||
|
if err != ErrLockNotHeld {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should loose leadership
|
||||||
|
select {
|
||||||
|
case <-leaderCh:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("should not be leader")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLock_ForceInvalidate(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
lock, err := c.LockKey("test/lock")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should work
|
||||||
|
leaderCh, err := lock.Lock(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if leaderCh == nil {
|
||||||
|
t.Fatalf("not leader")
|
||||||
|
}
|
||||||
|
defer lock.Unlock()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
// Nuke the session, simulator an operator invalidation
|
||||||
|
// or a health check failure
|
||||||
|
session := c.Session()
|
||||||
|
session.Destroy(lock.lockSession, nil)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Should loose leadership
|
||||||
|
select {
|
||||||
|
case <-leaderCh:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("should not be leader")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLock_DeleteKey(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
lock, err := c.LockKey("test/lock")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should work
|
||||||
|
leaderCh, err := lock.Lock(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if leaderCh == nil {
|
||||||
|
t.Fatalf("not leader")
|
||||||
|
}
|
||||||
|
defer lock.Unlock()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
// Nuke the key, simulate an operator intervention
|
||||||
|
kv := c.KV()
|
||||||
|
kv.Delete("test/lock", nil)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Should loose leadership
|
||||||
|
select {
|
||||||
|
case <-leaderCh:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("should not be leader")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLock_Contend(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
acquired := make([]bool, 3)
|
||||||
|
for idx := range acquired {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(idx int) {
|
||||||
|
defer wg.Done()
|
||||||
|
lock, err := c.LockKey("test/lock")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should work eventually, will contend
|
||||||
|
leaderCh, err := lock.Lock(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if leaderCh == nil {
|
||||||
|
t.Fatalf("not leader")
|
||||||
|
}
|
||||||
|
defer lock.Unlock()
|
||||||
|
log.Printf("Contender %d acquired", idx)
|
||||||
|
|
||||||
|
// Set acquired and then leave
|
||||||
|
acquired[idx] = true
|
||||||
|
}(idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for termination
|
||||||
|
doneCh := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(doneCh)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for everybody to get a turn
|
||||||
|
select {
|
||||||
|
case <-doneCh:
|
||||||
|
case <-time.After(3 * DefaultLockRetryTime):
|
||||||
|
t.Fatalf("timeout")
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx, did := range acquired {
|
||||||
|
if !did {
|
||||||
|
t.Fatalf("contender %d never acquired", idx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLock_Destroy(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
lock, err := c.LockKey("test/lock")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should work
|
||||||
|
leaderCh, err := lock.Lock(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if leaderCh == nil {
|
||||||
|
t.Fatalf("not leader")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy should fail
|
||||||
|
if err := lock.Destroy(); err != ErrLockHeld {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should be able to release
|
||||||
|
err = lock.Unlock()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Acquire with a different lock
|
||||||
|
l2, err := c.LockKey("test/lock")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should work
|
||||||
|
leaderCh, err = l2.Lock(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if leaderCh == nil {
|
||||||
|
t.Fatalf("not leader")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy should still fail
|
||||||
|
if err := lock.Destroy(); err != ErrLockInUse {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should relese
|
||||||
|
err = l2.Unlock()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy should work
|
||||||
|
err = lock.Destroy()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Double destroy should work
|
||||||
|
err = l2.Destroy()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLock_Conflict(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
sema, err := c.SemaphorePrefix("test/lock/", 2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should work
|
||||||
|
lockCh, err := sema.Acquire(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if lockCh == nil {
|
||||||
|
t.Fatalf("not hold")
|
||||||
|
}
|
||||||
|
defer sema.Release()
|
||||||
|
|
||||||
|
lock, err := c.LockKey("test/lock/.lock")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should conflict with semaphore
|
||||||
|
_, err = lock.Lock(nil)
|
||||||
|
if err != ErrLockConflict {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should conflict with semaphore
|
||||||
|
err = lock.Destroy()
|
||||||
|
if err != ErrLockConflict {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLock_ReclaimLock(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
session, _, err := c.Session().Create(&SessionEntry{}, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lock, err := c.LockOpts(&LockOptions{Key: "test/lock", Session: session})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should work
|
||||||
|
leaderCh, err := lock.Lock(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if leaderCh == nil {
|
||||||
|
t.Fatalf("not leader")
|
||||||
|
}
|
||||||
|
defer lock.Unlock()
|
||||||
|
|
||||||
|
l2, err := c.LockOpts(&LockOptions{Key: "test/lock", Session: session})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
reclaimed := make(chan (<-chan struct{}), 1)
|
||||||
|
go func() {
|
||||||
|
l2Ch, err := l2.Lock(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("not locked: %v", err)
|
||||||
|
}
|
||||||
|
reclaimed <- l2Ch
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Should reclaim the lock
|
||||||
|
var leader2Ch <-chan struct{}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case leader2Ch = <-reclaimed:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("should have locked")
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlock should work
|
||||||
|
err = l2.Unlock()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Both locks should see the unlock
|
||||||
|
select {
|
||||||
|
case <-leader2Ch:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("should not be leader")
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-leaderCh:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("should not be leader")
|
||||||
|
}
|
||||||
|
}
|
24
Godeps/_workspace/src/github.com/hashicorp/consul/api/raw.go
generated
vendored
Normal file
24
Godeps/_workspace/src/github.com/hashicorp/consul/api/raw.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
// Raw can be used to do raw queries against custom endpoints
|
||||||
|
type Raw struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Raw returns a handle to query endpoints
|
||||||
|
func (c *Client) Raw() *Raw {
|
||||||
|
return &Raw{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query is used to do a GET request against an endpoint
|
||||||
|
// and deserialize the response into an interface using
|
||||||
|
// standard Consul conventions.
|
||||||
|
func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
|
||||||
|
return raw.c.query(endpoint, out, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write is used to do a PUT request against an endpoint
|
||||||
|
// and serialize/deserialized using the standard Consul conventions.
|
||||||
|
func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
return raw.c.write(endpoint, in, out, q)
|
||||||
|
}
|
482
Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore.go
generated
vendored
Normal file
482
Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore.go
generated
vendored
Normal file
|
@ -0,0 +1,482 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultSemaphoreSessionName is the Session Name we assign if none is provided
|
||||||
|
DefaultSemaphoreSessionName = "Consul API Semaphore"
|
||||||
|
|
||||||
|
// DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided
|
||||||
|
// when creating a new Semaphore. This is used because we do not have another
|
||||||
|
// other check to depend upon.
|
||||||
|
DefaultSemaphoreSessionTTL = "15s"
|
||||||
|
|
||||||
|
// DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore
|
||||||
|
// acquisition is possible. This affects the minimum time it takes to cancel
|
||||||
|
// a Semaphore acquisition.
|
||||||
|
DefaultSemaphoreWaitTime = 15 * time.Second
|
||||||
|
|
||||||
|
// DefaultSemaphoreRetryTime is how long we wait after a failed lock acquisition
|
||||||
|
// before attempting to do the lock again. This is so that once a lock-delay
|
||||||
|
// is in affect, we do not hot loop retrying the acquisition.
|
||||||
|
DefaultSemaphoreRetryTime = 5 * time.Second
|
||||||
|
|
||||||
|
// DefaultSemaphoreKey is the key used within the prefix to
|
||||||
|
// use for coordination between all the contenders.
|
||||||
|
DefaultSemaphoreKey = ".lock"
|
||||||
|
|
||||||
|
// SemaphoreFlagValue is a magic flag we set to indicate a key
|
||||||
|
// is being used for a semaphore. It is used to detect a potential
|
||||||
|
// conflict with a lock.
|
||||||
|
SemaphoreFlagValue = 0xe0f69a2baa414de0
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrSemaphoreHeld is returned if we attempt to double lock
|
||||||
|
ErrSemaphoreHeld = fmt.Errorf("Semaphore already held")
|
||||||
|
|
||||||
|
// ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore
|
||||||
|
// that we do not hold.
|
||||||
|
ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held")
|
||||||
|
|
||||||
|
// ErrSemaphoreInUse is returned if we attempt to destroy a semaphore
|
||||||
|
// that is in use.
|
||||||
|
ErrSemaphoreInUse = fmt.Errorf("Semaphore in use")
|
||||||
|
|
||||||
|
// ErrSemaphoreConflict is returned if the flags on a key
|
||||||
|
// used for a semaphore do not match expectation
|
||||||
|
ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Semaphore is used to implement a distributed semaphore
|
||||||
|
// using the Consul KV primitives.
|
||||||
|
type Semaphore struct {
|
||||||
|
c *Client
|
||||||
|
opts *SemaphoreOptions
|
||||||
|
|
||||||
|
isHeld bool
|
||||||
|
sessionRenew chan struct{}
|
||||||
|
lockSession string
|
||||||
|
l sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// SemaphoreOptions is used to parameterize the Semaphore
|
||||||
|
type SemaphoreOptions struct {
|
||||||
|
Prefix string // Must be set and have write permissions
|
||||||
|
Limit int // Must be set, and be positive
|
||||||
|
Value []byte // Optional, value to associate with the contender entry
|
||||||
|
Session string // OPtional, created if not specified
|
||||||
|
SessionName string // Optional, defaults to DefaultLockSessionName
|
||||||
|
SessionTTL string // Optional, defaults to DefaultLockSessionTTL
|
||||||
|
}
|
||||||
|
|
||||||
|
// semaphoreLock is written under the DefaultSemaphoreKey and
|
||||||
|
// is used to coordinate between all the contenders.
|
||||||
|
type semaphoreLock struct {
|
||||||
|
// Limit is the integer limit of holders. This is used to
|
||||||
|
// verify that all the holders agree on the value.
|
||||||
|
Limit int
|
||||||
|
|
||||||
|
// Holders is a list of all the semaphore holders.
|
||||||
|
// It maps the session ID to true. It is used as a set effectively.
|
||||||
|
Holders map[string]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// SemaphorePrefix is used to created a Semaphore which will operate
|
||||||
|
// at the given KV prefix and uses the given limit for the semaphore.
|
||||||
|
// The prefix must have write privileges, and the limit must be agreed
|
||||||
|
// upon by all contenders.
|
||||||
|
func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) {
|
||||||
|
opts := &SemaphoreOptions{
|
||||||
|
Prefix: prefix,
|
||||||
|
Limit: limit,
|
||||||
|
}
|
||||||
|
return c.SemaphoreOpts(opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SemaphoreOpts is used to create a Semaphore with the given options.
|
||||||
|
// The prefix must have write privileges, and the limit must be agreed
|
||||||
|
// upon by all contenders. If a Session is not provided, one will be created.
|
||||||
|
func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) {
|
||||||
|
if opts.Prefix == "" {
|
||||||
|
return nil, fmt.Errorf("missing prefix")
|
||||||
|
}
|
||||||
|
if opts.Limit <= 0 {
|
||||||
|
return nil, fmt.Errorf("semaphore limit must be positive")
|
||||||
|
}
|
||||||
|
if opts.SessionName == "" {
|
||||||
|
opts.SessionName = DefaultSemaphoreSessionName
|
||||||
|
}
|
||||||
|
if opts.SessionTTL == "" {
|
||||||
|
opts.SessionTTL = DefaultSemaphoreSessionTTL
|
||||||
|
} else {
|
||||||
|
if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid SessionTTL: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s := &Semaphore{
|
||||||
|
c: c,
|
||||||
|
opts: opts,
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Acquire attempts to reserve a slot in the semaphore, blocking until
|
||||||
|
// success, interrupted via the stopCh or an error is encounted.
|
||||||
|
// Providing a non-nil stopCh can be used to abort the attempt.
|
||||||
|
// On success, a channel is returned that represents our slot.
|
||||||
|
// This channel could be closed at any time due to session invalidation,
|
||||||
|
// communication errors, operator intervention, etc. It is NOT safe to
|
||||||
|
// assume that the slot is held until Release() unless the Session is specifically
|
||||||
|
// created without any associated health checks. By default Consul sessions
|
||||||
|
// prefer liveness over safety and an application must be able to handle
|
||||||
|
// the session being lost.
|
||||||
|
func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
||||||
|
// Hold the lock as we try to acquire
|
||||||
|
s.l.Lock()
|
||||||
|
defer s.l.Unlock()
|
||||||
|
|
||||||
|
// Check if we already hold the semaphore
|
||||||
|
if s.isHeld {
|
||||||
|
return nil, ErrSemaphoreHeld
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we need to create a session first
|
||||||
|
s.lockSession = s.opts.Session
|
||||||
|
if s.lockSession == "" {
|
||||||
|
if sess, err := s.createSession(); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create session: %v", err)
|
||||||
|
} else {
|
||||||
|
s.sessionRenew = make(chan struct{})
|
||||||
|
s.lockSession = sess
|
||||||
|
session := s.c.Session()
|
||||||
|
go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew)
|
||||||
|
|
||||||
|
// If we fail to acquire the lock, cleanup the session
|
||||||
|
defer func() {
|
||||||
|
if !s.isHeld {
|
||||||
|
close(s.sessionRenew)
|
||||||
|
s.sessionRenew = nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the contender entry
|
||||||
|
kv := s.c.KV()
|
||||||
|
made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil)
|
||||||
|
if err != nil || !made {
|
||||||
|
return nil, fmt.Errorf("failed to make contender entry: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup the query options
|
||||||
|
qOpts := &QueryOptions{
|
||||||
|
WaitTime: DefaultSemaphoreWaitTime,
|
||||||
|
}
|
||||||
|
|
||||||
|
WAIT:
|
||||||
|
// Check if we should quit
|
||||||
|
select {
|
||||||
|
case <-stopCh:
|
||||||
|
return nil, nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the prefix
|
||||||
|
pairs, meta, err := kv.List(s.opts.Prefix, qOpts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read prefix: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode the lock
|
||||||
|
lockPair := s.findLock(pairs)
|
||||||
|
if lockPair.Flags != SemaphoreFlagValue {
|
||||||
|
return nil, ErrSemaphoreConflict
|
||||||
|
}
|
||||||
|
lock, err := s.decodeLock(lockPair)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify we agree with the limit
|
||||||
|
if lock.Limit != s.opts.Limit {
|
||||||
|
return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)",
|
||||||
|
lock.Limit, s.opts.Limit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prune the dead holders
|
||||||
|
s.pruneDeadHolders(lock, pairs)
|
||||||
|
|
||||||
|
// Check if the lock is held
|
||||||
|
if len(lock.Holders) >= lock.Limit {
|
||||||
|
qOpts.WaitIndex = meta.LastIndex
|
||||||
|
goto WAIT
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new lock with us as a holder
|
||||||
|
lock.Holders[s.lockSession] = true
|
||||||
|
newLock, err := s.encodeLock(lock, lockPair.ModifyIndex)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt the acquisition
|
||||||
|
didSet, _, err := kv.CAS(newLock, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to update lock: %v", err)
|
||||||
|
}
|
||||||
|
if !didSet {
|
||||||
|
// Update failed, could have been a race with another contender,
|
||||||
|
// retry the operation
|
||||||
|
goto WAIT
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch to ensure we maintain ownership of the slot
|
||||||
|
lockCh := make(chan struct{})
|
||||||
|
go s.monitorLock(s.lockSession, lockCh)
|
||||||
|
|
||||||
|
// Set that we own the lock
|
||||||
|
s.isHeld = true
|
||||||
|
|
||||||
|
// Acquired! All done
|
||||||
|
return lockCh, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release is used to voluntarily give up our semaphore slot. It is
|
||||||
|
// an error to call this if the semaphore has not been acquired.
|
||||||
|
func (s *Semaphore) Release() error {
|
||||||
|
// Hold the lock as we try to release
|
||||||
|
s.l.Lock()
|
||||||
|
defer s.l.Unlock()
|
||||||
|
|
||||||
|
// Ensure the lock is actually held
|
||||||
|
if !s.isHeld {
|
||||||
|
return ErrSemaphoreNotHeld
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set that we no longer own the lock
|
||||||
|
s.isHeld = false
|
||||||
|
|
||||||
|
// Stop the session renew
|
||||||
|
if s.sessionRenew != nil {
|
||||||
|
defer func() {
|
||||||
|
close(s.sessionRenew)
|
||||||
|
s.sessionRenew = nil
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get and clear the lock session
|
||||||
|
lockSession := s.lockSession
|
||||||
|
s.lockSession = ""
|
||||||
|
|
||||||
|
// Remove ourselves as a lock holder
|
||||||
|
kv := s.c.KV()
|
||||||
|
key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
|
||||||
|
READ:
|
||||||
|
pair, _, err := kv.Get(key, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if pair == nil {
|
||||||
|
pair = &KVPair{}
|
||||||
|
}
|
||||||
|
lock, err := s.decodeLock(pair)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new lock without us as a holder
|
||||||
|
if _, ok := lock.Holders[lockSession]; ok {
|
||||||
|
delete(lock.Holders, lockSession)
|
||||||
|
newLock, err := s.encodeLock(lock, pair.ModifyIndex)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap the locks
|
||||||
|
didSet, _, err := kv.CAS(newLock, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to update lock: %v", err)
|
||||||
|
}
|
||||||
|
if !didSet {
|
||||||
|
goto READ
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy the contender entry
|
||||||
|
contenderKey := path.Join(s.opts.Prefix, lockSession)
|
||||||
|
if _, err := kv.Delete(contenderKey, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy is used to cleanup the semaphore entry. It is not necessary
|
||||||
|
// to invoke. It will fail if the semaphore is in use.
|
||||||
|
func (s *Semaphore) Destroy() error {
|
||||||
|
// Hold the lock as we try to acquire
|
||||||
|
s.l.Lock()
|
||||||
|
defer s.l.Unlock()
|
||||||
|
|
||||||
|
// Check if we already hold the semaphore
|
||||||
|
if s.isHeld {
|
||||||
|
return ErrSemaphoreHeld
|
||||||
|
}
|
||||||
|
|
||||||
|
// List for the semaphore
|
||||||
|
kv := s.c.KV()
|
||||||
|
pairs, _, err := kv.List(s.opts.Prefix, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read prefix: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the lock pair, bail if it doesn't exist
|
||||||
|
lockPair := s.findLock(pairs)
|
||||||
|
if lockPair.ModifyIndex == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if lockPair.Flags != SemaphoreFlagValue {
|
||||||
|
return ErrSemaphoreConflict
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode the lock
|
||||||
|
lock, err := s.decodeLock(lockPair)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prune the dead holders
|
||||||
|
s.pruneDeadHolders(lock, pairs)
|
||||||
|
|
||||||
|
// Check if there are any holders
|
||||||
|
if len(lock.Holders) > 0 {
|
||||||
|
return ErrSemaphoreInUse
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt the delete
|
||||||
|
didRemove, _, err := kv.DeleteCAS(lockPair, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to remove semaphore: %v", err)
|
||||||
|
}
|
||||||
|
if !didRemove {
|
||||||
|
return ErrSemaphoreInUse
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// createSession is used to create a new managed session
|
||||||
|
func (s *Semaphore) createSession() (string, error) {
|
||||||
|
session := s.c.Session()
|
||||||
|
se := &SessionEntry{
|
||||||
|
Name: s.opts.SessionName,
|
||||||
|
TTL: s.opts.SessionTTL,
|
||||||
|
Behavior: SessionBehaviorDelete,
|
||||||
|
}
|
||||||
|
id, _, err := session.Create(se, nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// contenderEntry returns a formatted KVPair for the contender
|
||||||
|
func (s *Semaphore) contenderEntry(session string) *KVPair {
|
||||||
|
return &KVPair{
|
||||||
|
Key: path.Join(s.opts.Prefix, session),
|
||||||
|
Value: s.opts.Value,
|
||||||
|
Session: session,
|
||||||
|
Flags: SemaphoreFlagValue,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// findLock is used to find the KV Pair which is used for coordination
|
||||||
|
func (s *Semaphore) findLock(pairs KVPairs) *KVPair {
|
||||||
|
key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
|
||||||
|
for _, pair := range pairs {
|
||||||
|
if pair.Key == key {
|
||||||
|
return pair
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &KVPair{Flags: SemaphoreFlagValue}
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeLock is used to decode a semaphoreLock from an
|
||||||
|
// entry in Consul
|
||||||
|
func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) {
|
||||||
|
// Handle if there is no lock
|
||||||
|
if pair == nil || pair.Value == nil {
|
||||||
|
return &semaphoreLock{
|
||||||
|
Limit: s.opts.Limit,
|
||||||
|
Holders: make(map[string]bool),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
l := &semaphoreLock{}
|
||||||
|
if err := json.Unmarshal(pair.Value, l); err != nil {
|
||||||
|
return nil, fmt.Errorf("lock decoding failed: %v", err)
|
||||||
|
}
|
||||||
|
return l, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeLock is used to encode a semaphoreLock into a KVPair
|
||||||
|
// that can be PUT
|
||||||
|
func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) {
|
||||||
|
enc, err := json.Marshal(l)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("lock encoding failed: %v", err)
|
||||||
|
}
|
||||||
|
pair := &KVPair{
|
||||||
|
Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey),
|
||||||
|
Value: enc,
|
||||||
|
Flags: SemaphoreFlagValue,
|
||||||
|
ModifyIndex: oldIndex,
|
||||||
|
}
|
||||||
|
return pair, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// pruneDeadHolders is used to remove all the dead lock holders
|
||||||
|
func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) {
|
||||||
|
// Gather all the live holders
|
||||||
|
alive := make(map[string]struct{}, len(pairs))
|
||||||
|
for _, pair := range pairs {
|
||||||
|
if pair.Session != "" {
|
||||||
|
alive[pair.Session] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove any holders that are dead
|
||||||
|
for holder := range lock.Holders {
|
||||||
|
if _, ok := alive[holder]; !ok {
|
||||||
|
delete(lock.Holders, holder)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// monitorLock is a long running routine to monitor a semaphore ownership
|
||||||
|
// It closes the stopCh if we lose our slot.
|
||||||
|
func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) {
|
||||||
|
defer close(stopCh)
|
||||||
|
kv := s.c.KV()
|
||||||
|
opts := &QueryOptions{RequireConsistent: true}
|
||||||
|
WAIT:
|
||||||
|
pairs, meta, err := kv.List(s.opts.Prefix, opts)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lockPair := s.findLock(pairs)
|
||||||
|
lock, err := s.decodeLock(lockPair)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.pruneDeadHolders(lock, pairs)
|
||||||
|
if _, ok := lock.Holders[session]; ok {
|
||||||
|
opts.WaitIndex = meta.LastIndex
|
||||||
|
goto WAIT
|
||||||
|
}
|
||||||
|
}
|
313
Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore_test.go
generated
vendored
Normal file
313
Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore_test.go
generated
vendored
Normal file
|
@ -0,0 +1,313 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSemaphore_AcquireRelease(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
sema, err := c.SemaphorePrefix("test/semaphore", 2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initial release should fail
|
||||||
|
err = sema.Release()
|
||||||
|
if err != ErrSemaphoreNotHeld {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should work
|
||||||
|
lockCh, err := sema.Acquire(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if lockCh == nil {
|
||||||
|
t.Fatalf("not hold")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Double lock should fail
|
||||||
|
_, err = sema.Acquire(nil)
|
||||||
|
if err != ErrSemaphoreHeld {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should be held
|
||||||
|
select {
|
||||||
|
case <-lockCh:
|
||||||
|
t.Fatalf("should be held")
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initial release should work
|
||||||
|
err = sema.Release()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Double unlock should fail
|
||||||
|
err = sema.Release()
|
||||||
|
if err != ErrSemaphoreNotHeld {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should lose resource
|
||||||
|
select {
|
||||||
|
case <-lockCh:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("should not be held")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSemaphore_ForceInvalidate(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
sema, err := c.SemaphorePrefix("test/semaphore", 2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should work
|
||||||
|
lockCh, err := sema.Acquire(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if lockCh == nil {
|
||||||
|
t.Fatalf("not acquired")
|
||||||
|
}
|
||||||
|
defer sema.Release()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
// Nuke the session, simulator an operator invalidation
|
||||||
|
// or a health check failure
|
||||||
|
session := c.Session()
|
||||||
|
session.Destroy(sema.lockSession, nil)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Should loose slot
|
||||||
|
select {
|
||||||
|
case <-lockCh:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("should not be locked")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSemaphore_DeleteKey(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
sema, err := c.SemaphorePrefix("test/semaphore", 2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should work
|
||||||
|
lockCh, err := sema.Acquire(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if lockCh == nil {
|
||||||
|
t.Fatalf("not locked")
|
||||||
|
}
|
||||||
|
defer sema.Release()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
// Nuke the key, simulate an operator intervention
|
||||||
|
kv := c.KV()
|
||||||
|
kv.DeleteTree("test/semaphore", nil)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Should loose leadership
|
||||||
|
select {
|
||||||
|
case <-lockCh:
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatalf("should not be locked")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSemaphore_Contend(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
acquired := make([]bool, 4)
|
||||||
|
for idx := range acquired {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(idx int) {
|
||||||
|
defer wg.Done()
|
||||||
|
sema, err := c.SemaphorePrefix("test/semaphore", 2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should work eventually, will contend
|
||||||
|
lockCh, err := sema.Acquire(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if lockCh == nil {
|
||||||
|
t.Fatalf("not locked")
|
||||||
|
}
|
||||||
|
defer sema.Release()
|
||||||
|
log.Printf("Contender %d acquired", idx)
|
||||||
|
|
||||||
|
// Set acquired and then leave
|
||||||
|
acquired[idx] = true
|
||||||
|
}(idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for termination
|
||||||
|
doneCh := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(doneCh)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for everybody to get a turn
|
||||||
|
select {
|
||||||
|
case <-doneCh:
|
||||||
|
case <-time.After(3 * DefaultLockRetryTime):
|
||||||
|
t.Fatalf("timeout")
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx, did := range acquired {
|
||||||
|
if !did {
|
||||||
|
t.Fatalf("contender %d never acquired", idx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSemaphore_BadLimit(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
sema, err := c.SemaphorePrefix("test/semaphore", 0)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("should error")
|
||||||
|
}
|
||||||
|
|
||||||
|
sema, err = c.SemaphorePrefix("test/semaphore", 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = sema.Acquire(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sema2, err := c.SemaphorePrefix("test/semaphore", 2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = sema2.Acquire(nil)
|
||||||
|
if err.Error() != "semaphore limit conflict (lock: 1, local: 2)" {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSemaphore_Destroy(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
sema, err := c.SemaphorePrefix("test/semaphore", 2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sema2, err := c.SemaphorePrefix("test/semaphore", 2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = sema.Acquire(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = sema2.Acquire(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy should fail, still held
|
||||||
|
if err := sema.Destroy(); err != ErrSemaphoreHeld {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = sema.Release()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy should fail, still in use
|
||||||
|
if err := sema.Destroy(); err != ErrSemaphoreInUse {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = sema2.Release()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy should work
|
||||||
|
if err := sema.Destroy(); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy should work
|
||||||
|
if err := sema2.Destroy(); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSemaphore_Conflict(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
lock, err := c.LockKey("test/sema/.lock")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should work
|
||||||
|
leaderCh, err := lock.Lock(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if leaderCh == nil {
|
||||||
|
t.Fatalf("not leader")
|
||||||
|
}
|
||||||
|
defer lock.Unlock()
|
||||||
|
|
||||||
|
sema, err := c.SemaphorePrefix("test/sema/", 2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should conflict with lock
|
||||||
|
_, err = sema.Acquire(nil)
|
||||||
|
if err != ErrSemaphoreConflict {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should conflict with lock
|
||||||
|
err = sema.Destroy()
|
||||||
|
if err != ErrSemaphoreConflict {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
}
|
201
Godeps/_workspace/src/github.com/hashicorp/consul/api/session.go
generated
vendored
Normal file
201
Godeps/_workspace/src/github.com/hashicorp/consul/api/session.go
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// SessionBehaviorRelease is the default behavior and causes
|
||||||
|
// all associated locks to be released on session invalidation.
|
||||||
|
SessionBehaviorRelease = "release"
|
||||||
|
|
||||||
|
// SessionBehaviorDelete is new in Consul 0.5 and changes the
|
||||||
|
// behavior to delete all associated locks on session invalidation.
|
||||||
|
// It can be used in a way similar to Ephemeral Nodes in ZooKeeper.
|
||||||
|
SessionBehaviorDelete = "delete"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SessionEntry represents a session in consul
|
||||||
|
type SessionEntry struct {
|
||||||
|
CreateIndex uint64
|
||||||
|
ID string
|
||||||
|
Name string
|
||||||
|
Node string
|
||||||
|
Checks []string
|
||||||
|
LockDelay time.Duration
|
||||||
|
Behavior string
|
||||||
|
TTL string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Session can be used to query the Session endpoints
|
||||||
|
type Session struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Session returns a handle to the session endpoints
|
||||||
|
func (c *Client) Session() *Session {
|
||||||
|
return &Session{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateNoChecks is like Create but is used specifically to create
|
||||||
|
// a session with no associated health checks.
|
||||||
|
func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
|
||||||
|
body := make(map[string]interface{})
|
||||||
|
body["Checks"] = []string{}
|
||||||
|
if se != nil {
|
||||||
|
if se.Name != "" {
|
||||||
|
body["Name"] = se.Name
|
||||||
|
}
|
||||||
|
if se.Node != "" {
|
||||||
|
body["Node"] = se.Node
|
||||||
|
}
|
||||||
|
if se.LockDelay != 0 {
|
||||||
|
body["LockDelay"] = durToMsec(se.LockDelay)
|
||||||
|
}
|
||||||
|
if se.Behavior != "" {
|
||||||
|
body["Behavior"] = se.Behavior
|
||||||
|
}
|
||||||
|
if se.TTL != "" {
|
||||||
|
body["TTL"] = se.TTL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s.create(body, q)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create makes a new session. Providing a session entry can
|
||||||
|
// customize the session. It can also be nil to use defaults.
|
||||||
|
func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
|
||||||
|
var obj interface{}
|
||||||
|
if se != nil {
|
||||||
|
body := make(map[string]interface{})
|
||||||
|
obj = body
|
||||||
|
if se.Name != "" {
|
||||||
|
body["Name"] = se.Name
|
||||||
|
}
|
||||||
|
if se.Node != "" {
|
||||||
|
body["Node"] = se.Node
|
||||||
|
}
|
||||||
|
if se.LockDelay != 0 {
|
||||||
|
body["LockDelay"] = durToMsec(se.LockDelay)
|
||||||
|
}
|
||||||
|
if len(se.Checks) > 0 {
|
||||||
|
body["Checks"] = se.Checks
|
||||||
|
}
|
||||||
|
if se.Behavior != "" {
|
||||||
|
body["Behavior"] = se.Behavior
|
||||||
|
}
|
||||||
|
if se.TTL != "" {
|
||||||
|
body["TTL"] = se.TTL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s.create(obj, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) {
|
||||||
|
var out struct{ ID string }
|
||||||
|
wm, err := s.c.write("/v1/session/create", obj, &out, q)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
return out.ID, wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy invalides a given session
|
||||||
|
func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
|
||||||
|
wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Renew renews the TTL on a given session
|
||||||
|
func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) {
|
||||||
|
var entries []*SessionEntry
|
||||||
|
wm, err := s.c.write("/v1/session/renew/"+id, nil, &entries, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(entries) > 0 {
|
||||||
|
return entries[0], wm, nil
|
||||||
|
}
|
||||||
|
return nil, wm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenewPeriodic is used to periodically invoke Session.Renew on a
|
||||||
|
// session until a doneCh is closed. This is meant to be used in a long running
|
||||||
|
// goroutine to ensure a session stays valid.
|
||||||
|
func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh chan struct{}) error {
|
||||||
|
ttl, err := time.ParseDuration(initialTTL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
waitDur := ttl / 2
|
||||||
|
lastRenewTime := time.Now()
|
||||||
|
var lastErr error
|
||||||
|
for {
|
||||||
|
if time.Since(lastRenewTime) > ttl {
|
||||||
|
return lastErr
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-time.After(waitDur):
|
||||||
|
entry, _, err := s.Renew(id, q)
|
||||||
|
if err != nil {
|
||||||
|
waitDur = time.Second
|
||||||
|
lastErr = err
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if entry == nil {
|
||||||
|
waitDur = time.Second
|
||||||
|
lastErr = fmt.Errorf("No SessionEntry returned")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle the server updating the TTL
|
||||||
|
ttl, _ = time.ParseDuration(entry.TTL)
|
||||||
|
waitDur = ttl / 2
|
||||||
|
lastRenewTime = time.Now()
|
||||||
|
|
||||||
|
case <-doneCh:
|
||||||
|
// Attempt a session destroy
|
||||||
|
s.Destroy(id, q)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info looks up a single session
|
||||||
|
func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) {
|
||||||
|
var entries []*SessionEntry
|
||||||
|
qm, err := s.c.query("/v1/session/info/"+id, &entries, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(entries) > 0 {
|
||||||
|
return entries[0], qm, nil
|
||||||
|
}
|
||||||
|
return nil, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List gets sessions for a node
|
||||||
|
func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
|
||||||
|
var entries []*SessionEntry
|
||||||
|
qm, err := s.c.query("/v1/session/node/"+node, &entries, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return entries, qm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List gets all active sessions
|
||||||
|
func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
|
||||||
|
var entries []*SessionEntry
|
||||||
|
qm, err := s.c.query("/v1/session/list", &entries, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return entries, qm, nil
|
||||||
|
}
|
205
Godeps/_workspace/src/github.com/hashicorp/consul/api/session_test.go
generated
vendored
Normal file
205
Godeps/_workspace/src/github.com/hashicorp/consul/api/session_test.go
generated
vendored
Normal file
|
@ -0,0 +1,205 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSession_CreateDestroy(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
session := c.Session()
|
||||||
|
|
||||||
|
id, meta, err := session.Create(nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if meta.RequestTime == 0 {
|
||||||
|
t.Fatalf("bad: %v", meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
if id == "" {
|
||||||
|
t.Fatalf("invalid: %v", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
meta, err = session.Destroy(id, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if meta.RequestTime == 0 {
|
||||||
|
t.Fatalf("bad: %v", meta)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSession_CreateRenewDestroy(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
session := c.Session()
|
||||||
|
|
||||||
|
se := &SessionEntry{
|
||||||
|
TTL: "10s",
|
||||||
|
}
|
||||||
|
|
||||||
|
id, meta, err := session.Create(se, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
defer session.Destroy(id, nil)
|
||||||
|
|
||||||
|
if meta.RequestTime == 0 {
|
||||||
|
t.Fatalf("bad: %v", meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
if id == "" {
|
||||||
|
t.Fatalf("invalid: %v", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
if meta.RequestTime == 0 {
|
||||||
|
t.Fatalf("bad: %v", meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
renew, meta, err := session.Renew(id, nil)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if meta.RequestTime == 0 {
|
||||||
|
t.Fatalf("bad: %v", meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
if renew == nil {
|
||||||
|
t.Fatalf("should get session")
|
||||||
|
}
|
||||||
|
|
||||||
|
if renew.ID != id {
|
||||||
|
t.Fatalf("should have matching id")
|
||||||
|
}
|
||||||
|
|
||||||
|
if renew.TTL != "10s" {
|
||||||
|
t.Fatalf("should get session with TTL")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSession_Info(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
session := c.Session()
|
||||||
|
|
||||||
|
id, _, err := session.Create(nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
defer session.Destroy(id, nil)
|
||||||
|
|
||||||
|
info, qm, err := session.Info(id, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if qm.LastIndex == 0 {
|
||||||
|
t.Fatalf("bad: %v", qm)
|
||||||
|
}
|
||||||
|
if !qm.KnownLeader {
|
||||||
|
t.Fatalf("bad: %v", qm)
|
||||||
|
}
|
||||||
|
|
||||||
|
if info == nil {
|
||||||
|
t.Fatalf("should get session")
|
||||||
|
}
|
||||||
|
if info.CreateIndex == 0 {
|
||||||
|
t.Fatalf("bad: %v", info)
|
||||||
|
}
|
||||||
|
if info.ID != id {
|
||||||
|
t.Fatalf("bad: %v", info)
|
||||||
|
}
|
||||||
|
if info.Name != "" {
|
||||||
|
t.Fatalf("bad: %v", info)
|
||||||
|
}
|
||||||
|
if info.Node == "" {
|
||||||
|
t.Fatalf("bad: %v", info)
|
||||||
|
}
|
||||||
|
if len(info.Checks) == 0 {
|
||||||
|
t.Fatalf("bad: %v", info)
|
||||||
|
}
|
||||||
|
if info.LockDelay == 0 {
|
||||||
|
t.Fatalf("bad: %v", info)
|
||||||
|
}
|
||||||
|
if info.Behavior != "release" {
|
||||||
|
t.Fatalf("bad: %v", info)
|
||||||
|
}
|
||||||
|
if info.TTL != "" {
|
||||||
|
t.Fatalf("bad: %v", info)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSession_Node(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
session := c.Session()
|
||||||
|
|
||||||
|
id, _, err := session.Create(nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
defer session.Destroy(id, nil)
|
||||||
|
|
||||||
|
info, qm, err := session.Info(id, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sessions, qm, err := session.Node(info.Node, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sessions) != 1 {
|
||||||
|
t.Fatalf("bad: %v", sessions)
|
||||||
|
}
|
||||||
|
|
||||||
|
if qm.LastIndex == 0 {
|
||||||
|
t.Fatalf("bad: %v", qm)
|
||||||
|
}
|
||||||
|
if !qm.KnownLeader {
|
||||||
|
t.Fatalf("bad: %v", qm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSession_List(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
session := c.Session()
|
||||||
|
|
||||||
|
id, _, err := session.Create(nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
defer session.Destroy(id, nil)
|
||||||
|
|
||||||
|
sessions, qm, err := session.List(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sessions) != 1 {
|
||||||
|
t.Fatalf("bad: %v", sessions)
|
||||||
|
}
|
||||||
|
|
||||||
|
if qm.LastIndex == 0 {
|
||||||
|
t.Fatalf("bad: %v", qm)
|
||||||
|
}
|
||||||
|
if !qm.KnownLeader {
|
||||||
|
t.Fatalf("bad: %v", qm)
|
||||||
|
}
|
||||||
|
}
|
43
Godeps/_workspace/src/github.com/hashicorp/consul/api/status.go
generated
vendored
Normal file
43
Godeps/_workspace/src/github.com/hashicorp/consul/api/status.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
// Status can be used to query the Status endpoints
|
||||||
|
type Status struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status returns a handle to the status endpoints
|
||||||
|
func (c *Client) Status() *Status {
|
||||||
|
return &Status{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Leader is used to query for a known leader
|
||||||
|
func (s *Status) Leader() (string, error) {
|
||||||
|
r := s.c.newRequest("GET", "/v1/status/leader")
|
||||||
|
_, resp, err := requireOK(s.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var leader string
|
||||||
|
if err := decodeBody(resp, &leader); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return leader, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Peers is used to query for a known raft peers
|
||||||
|
func (s *Status) Peers() ([]string, error) {
|
||||||
|
r := s.c.newRequest("GET", "/v1/status/peers")
|
||||||
|
_, resp, err := requireOK(s.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var peers []string
|
||||||
|
if err := decodeBody(resp, &peers); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return peers, nil
|
||||||
|
}
|
37
Godeps/_workspace/src/github.com/hashicorp/consul/api/status_test.go
generated
vendored
Normal file
37
Godeps/_workspace/src/github.com/hashicorp/consul/api/status_test.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStatusLeader(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
status := c.Status()
|
||||||
|
|
||||||
|
leader, err := status.Leader()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if leader == "" {
|
||||||
|
t.Fatalf("Expected leader")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStatusPeers(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
status := c.Status()
|
||||||
|
|
||||||
|
peers, err := status.Peers()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
if len(peers) == 0 {
|
||||||
|
t.Fatalf("Expected peers ")
|
||||||
|
}
|
||||||
|
}
|
20
Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go
generated
vendored
20
Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go
generated
vendored
|
@ -14,6 +14,8 @@
|
||||||
package model
|
package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -39,8 +41,7 @@ const (
|
||||||
ReservedLabelPrefix = "__"
|
ReservedLabelPrefix = "__"
|
||||||
|
|
||||||
// MetaLabelPrefix is a prefix for labels that provide meta information.
|
// MetaLabelPrefix is a prefix for labels that provide meta information.
|
||||||
// Labels with this prefix are used for intermediate label processing and
|
// Labels with the prefix will not be attached to time series.
|
||||||
// will not be attached to time series.
|
|
||||||
MetaLabelPrefix = "__meta_"
|
MetaLabelPrefix = "__meta_"
|
||||||
|
|
||||||
// JobLabel is the label name indicating the job from which a timeseries
|
// JobLabel is the label name indicating the job from which a timeseries
|
||||||
|
@ -59,10 +60,25 @@ const (
|
||||||
QuantileLabel = "quantile"
|
QuantileLabel = "quantile"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
||||||
|
|
||||||
// A LabelName is a key for a LabelSet or Metric. It has a value associated
|
// A LabelName is a key for a LabelSet or Metric. It has a value associated
|
||||||
// therewith.
|
// therewith.
|
||||||
type LabelName string
|
type LabelName string
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the yaml.Unmarshaller interface.
|
||||||
|
func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
var s string
|
||||||
|
if err := unmarshal(&s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !labelNameRE.MatchString(s) {
|
||||||
|
return fmt.Errorf("%q is not a valid label name", s)
|
||||||
|
}
|
||||||
|
*ln = LabelName(s)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// LabelNames is a sortable LabelName slice. In implements sort.Interface.
|
// LabelNames is a sortable LabelName slice. In implements sort.Interface.
|
||||||
type LabelNames []LabelName
|
type LabelNames []LabelName
|
||||||
|
|
||||||
|
|
6
Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore
generated
vendored
Normal file
6
Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
# Setup a Global .gitignore for OS and editor generated files:
|
||||||
|
# https://help.github.com/articles/ignoring-files
|
||||||
|
# git config --global core.excludesfile ~/.gitignore_global
|
||||||
|
|
||||||
|
.vagrant
|
||||||
|
*.sublime-project
|
15
Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml
generated
vendored
Normal file
15
Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
sudo: false
|
||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.4.1
|
||||||
|
|
||||||
|
before_script:
|
||||||
|
- FIXED=$(go fmt ./... | wc -l); if [ $FIXED -gt 0 ]; then echo "gofmt - $FIXED file(s) not formatted correctly, please run gofmt to fix this." && exit 1; fi
|
||||||
|
|
||||||
|
os:
|
||||||
|
- linux
|
||||||
|
- osx
|
||||||
|
|
||||||
|
notifications:
|
||||||
|
email: false
|
34
Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS
generated
vendored
Normal file
34
Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
# Names should be added to this file as
|
||||||
|
# Name or Organization <email address>
|
||||||
|
# The email address is not required for organizations.
|
||||||
|
|
||||||
|
# You can update this list using the following command:
|
||||||
|
#
|
||||||
|
# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
|
||||||
|
|
||||||
|
# Please keep the list sorted.
|
||||||
|
|
||||||
|
Adrien Bustany <adrien@bustany.org>
|
||||||
|
Caleb Spare <cespare@gmail.com>
|
||||||
|
Case Nelson <case@teammating.com>
|
||||||
|
Chris Howey <howeyc@gmail.com> <chris@howey.me>
|
||||||
|
Christoffer Buchholz <christoffer.buchholz@gmail.com>
|
||||||
|
Dave Cheney <dave@cheney.net>
|
||||||
|
Francisco Souza <f@souza.cc>
|
||||||
|
Hari haran <hariharan.uno@gmail.com>
|
||||||
|
John C Barstow
|
||||||
|
Kelvin Fo <vmirage@gmail.com>
|
||||||
|
Matt Layher <mdlayher@gmail.com>
|
||||||
|
Nathan Youngman <git@nathany.com>
|
||||||
|
Paul Hammond <paul@paulhammond.org>
|
||||||
|
Pieter Droogendijk <pieter@binky.org.uk>
|
||||||
|
Pursuit92 <JoshChase@techpursuit.net>
|
||||||
|
Rob Figueiredo <robfig@gmail.com>
|
||||||
|
Soge Zhang <zhssoge@gmail.com>
|
||||||
|
Tilak Sharma <tilaks@google.com>
|
||||||
|
Travis Cline <travis.cline@gmail.com>
|
||||||
|
Tudor Golubenco <tudor.g@gmail.com>
|
||||||
|
Yukang <moorekang@gmail.com>
|
||||||
|
bronze1man <bronze1man@gmail.com>
|
||||||
|
debrando <denis.brandolini@gmail.com>
|
||||||
|
henrikedwards <henrik.edwards@gmail.com>
|
263
Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md
generated
vendored
Normal file
263
Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md
generated
vendored
Normal file
|
@ -0,0 +1,263 @@
|
||||||
|
# Changelog
|
||||||
|
|
||||||
|
## v1.2.0 / 2015-02-08
|
||||||
|
|
||||||
|
* inotify: use epoll to wake up readEvents [#66](https://github.com/go-fsnotify/fsnotify/pull/66) (thanks @PieterD)
|
||||||
|
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/go-fsnotify/fsnotify/pull/63) (thanks @PieterD)
|
||||||
|
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/go-fsnotify/fsnotify/issues/59)
|
||||||
|
|
||||||
|
## v1.1.1 / 2015-02-05
|
||||||
|
|
||||||
|
* inotify: Retry read on EINTR [#61](https://github.com/go-fsnotify/fsnotify/issues/61) (thanks @PieterD)
|
||||||
|
|
||||||
|
## v1.1.0 / 2014-12-12
|
||||||
|
|
||||||
|
* kqueue: rework internals [#43](https://github.com/go-fsnotify/fsnotify/pull/43)
|
||||||
|
* add low-level functions
|
||||||
|
* only need to store flags on directories
|
||||||
|
* less mutexes [#13](https://github.com/go-fsnotify/fsnotify/issues/13)
|
||||||
|
* done can be an unbuffered channel
|
||||||
|
* remove calls to os.NewSyscallError
|
||||||
|
* More efficient string concatenation for Event.String() [#52](https://github.com/go-fsnotify/fsnotify/pull/52) (thanks @mdlayher)
|
||||||
|
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/go-fsnotify/fsnotify/issues/48)
|
||||||
|
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51)
|
||||||
|
|
||||||
|
## v1.0.4 / 2014-09-07
|
||||||
|
|
||||||
|
* kqueue: add dragonfly to the build tags.
|
||||||
|
* Rename source code files, rearrange code so exported APIs are at the top.
|
||||||
|
* Add done channel to example code. [#37](https://github.com/go-fsnotify/fsnotify/pull/37) (thanks @chenyukang)
|
||||||
|
|
||||||
|
## v1.0.3 / 2014-08-19
|
||||||
|
|
||||||
|
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/go-fsnotify/fsnotify/issues/36)
|
||||||
|
|
||||||
|
## v1.0.2 / 2014-08-17
|
||||||
|
|
||||||
|
* [Fix] Missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||||
|
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
|
||||||
|
|
||||||
|
## v1.0.0 / 2014-08-15
|
||||||
|
|
||||||
|
* [API] Remove AddWatch on Windows, use Add.
|
||||||
|
* Improve documentation for exported identifiers. [#30](https://github.com/go-fsnotify/fsnotify/issues/30)
|
||||||
|
* Minor updates based on feedback from golint.
|
||||||
|
|
||||||
|
## dev / 2014-07-09
|
||||||
|
|
||||||
|
* Moved to [github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify).
|
||||||
|
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
|
||||||
|
|
||||||
|
## dev / 2014-07-04
|
||||||
|
|
||||||
|
* kqueue: fix incorrect mutex used in Close()
|
||||||
|
* Update example to demonstrate usage of Op.
|
||||||
|
|
||||||
|
## dev / 2014-06-28
|
||||||
|
|
||||||
|
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/go-fsnotify/fsnotify/issues/4)
|
||||||
|
* Fix for String() method on Event (thanks Alex Brainman)
|
||||||
|
* Don't build on Plan 9 or Solaris (thanks @4ad)
|
||||||
|
|
||||||
|
## dev / 2014-06-21
|
||||||
|
|
||||||
|
* Events channel of type Event rather than *Event.
|
||||||
|
* [internal] use syscall constants directly for inotify and kqueue.
|
||||||
|
* [internal] kqueue: rename events to kevents and fileEvent to event.
|
||||||
|
|
||||||
|
## dev / 2014-06-19
|
||||||
|
|
||||||
|
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
|
||||||
|
* [internal] remove cookie from Event struct (unused).
|
||||||
|
* [internal] Event struct has the same definition across every OS.
|
||||||
|
* [internal] remove internal watch and removeWatch methods.
|
||||||
|
|
||||||
|
## dev / 2014-06-12
|
||||||
|
|
||||||
|
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
|
||||||
|
* [API] Pluralized channel names: Events and Errors.
|
||||||
|
* [API] Renamed FileEvent struct to Event.
|
||||||
|
* [API] Op constants replace methods like IsCreate().
|
||||||
|
|
||||||
|
## dev / 2014-06-12
|
||||||
|
|
||||||
|
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||||
|
|
||||||
|
## dev / 2014-05-23
|
||||||
|
|
||||||
|
* [API] Remove current implementation of WatchFlags.
|
||||||
|
* current implementation doesn't take advantage of OS for efficiency
|
||||||
|
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
|
||||||
|
* no tests for the current implementation
|
||||||
|
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
|
||||||
|
|
||||||
|
## v0.9.3 / 2014-12-31
|
||||||
|
|
||||||
|
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51)
|
||||||
|
|
||||||
|
## v0.9.2 / 2014-08-17
|
||||||
|
|
||||||
|
* [Backport] Fix missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||||
|
|
||||||
|
## v0.9.1 / 2014-06-12
|
||||||
|
|
||||||
|
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||||
|
|
||||||
|
## v0.9.0 / 2014-01-17
|
||||||
|
|
||||||
|
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
|
||||||
|
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
|
||||||
|
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
|
||||||
|
|
||||||
|
## v0.8.12 / 2013-11-13
|
||||||
|
|
||||||
|
* [API] Remove FD_SET and friends from Linux adapter
|
||||||
|
|
||||||
|
## v0.8.11 / 2013-11-02
|
||||||
|
|
||||||
|
* [Doc] Add Changelog [#72][] (thanks @nathany)
|
||||||
|
* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond)
|
||||||
|
|
||||||
|
## v0.8.10 / 2013-10-19
|
||||||
|
|
||||||
|
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
|
||||||
|
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
|
||||||
|
* [Doc] specify OS-specific limits in README (thanks @debrando)
|
||||||
|
|
||||||
|
## v0.8.9 / 2013-09-08
|
||||||
|
|
||||||
|
* [Doc] Contributing (thanks @nathany)
|
||||||
|
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
|
||||||
|
* [Doc] GoCI badge in README (Linux only) [#60][]
|
||||||
|
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
|
||||||
|
|
||||||
|
## v0.8.8 / 2013-06-17
|
||||||
|
|
||||||
|
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
|
||||||
|
|
||||||
|
## v0.8.7 / 2013-06-03
|
||||||
|
|
||||||
|
* [API] Make syscall flags internal
|
||||||
|
* [Fix] inotify: ignore event changes
|
||||||
|
* [Fix] race in symlink test [#45][] (reported by @srid)
|
||||||
|
* [Fix] tests on Windows
|
||||||
|
* lower case error messages
|
||||||
|
|
||||||
|
## v0.8.6 / 2013-05-23
|
||||||
|
|
||||||
|
* kqueue: Use EVT_ONLY flag on Darwin
|
||||||
|
* [Doc] Update README with full example
|
||||||
|
|
||||||
|
## v0.8.5 / 2013-05-09
|
||||||
|
|
||||||
|
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
|
||||||
|
|
||||||
|
## v0.8.4 / 2013-04-07
|
||||||
|
|
||||||
|
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
|
||||||
|
|
||||||
|
## v0.8.3 / 2013-03-13
|
||||||
|
|
||||||
|
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
|
||||||
|
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
|
||||||
|
|
||||||
|
## v0.8.2 / 2013-02-07
|
||||||
|
|
||||||
|
* [Doc] add Authors
|
||||||
|
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
|
||||||
|
|
||||||
|
## v0.8.1 / 2013-01-09
|
||||||
|
|
||||||
|
* [Fix] Windows path separators
|
||||||
|
* [Doc] BSD License
|
||||||
|
|
||||||
|
## v0.8.0 / 2012-11-09
|
||||||
|
|
||||||
|
* kqueue: directory watching improvements (thanks @vmirage)
|
||||||
|
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
|
||||||
|
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
|
||||||
|
|
||||||
|
## v0.7.4 / 2012-10-09
|
||||||
|
|
||||||
|
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
|
||||||
|
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
|
||||||
|
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
|
||||||
|
* [Fix] kqueue: modify after recreation of file
|
||||||
|
|
||||||
|
## v0.7.3 / 2012-09-27
|
||||||
|
|
||||||
|
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
|
||||||
|
* [Fix] kqueue: no longer get duplicate CREATE events
|
||||||
|
|
||||||
|
## v0.7.2 / 2012-09-01
|
||||||
|
|
||||||
|
* kqueue: events for created directories
|
||||||
|
|
||||||
|
## v0.7.1 / 2012-07-14
|
||||||
|
|
||||||
|
* [Fix] for renaming files
|
||||||
|
|
||||||
|
## v0.7.0 / 2012-07-02
|
||||||
|
|
||||||
|
* [Feature] FSNotify flags
|
||||||
|
* [Fix] inotify: Added file name back to event path
|
||||||
|
|
||||||
|
## v0.6.0 / 2012-06-06
|
||||||
|
|
||||||
|
* kqueue: watch files after directory created (thanks @tmc)
|
||||||
|
|
||||||
|
## v0.5.1 / 2012-05-22
|
||||||
|
|
||||||
|
* [Fix] inotify: remove all watches before Close()
|
||||||
|
|
||||||
|
## v0.5.0 / 2012-05-03
|
||||||
|
|
||||||
|
* [API] kqueue: return errors during watch instead of sending over channel
|
||||||
|
* kqueue: match symlink behavior on Linux
|
||||||
|
* inotify: add `DELETE_SELF` (requested by @taralx)
|
||||||
|
* [Fix] kqueue: handle EINTR (reported by @robfig)
|
||||||
|
* [Doc] Godoc example [#1][] (thanks @davecheney)
|
||||||
|
|
||||||
|
## v0.4.0 / 2012-03-30
|
||||||
|
|
||||||
|
* Go 1 released: build with go tool
|
||||||
|
* [Feature] Windows support using winfsnotify
|
||||||
|
* Windows does not have attribute change notifications
|
||||||
|
* Roll attribute notifications into IsModify
|
||||||
|
|
||||||
|
## v0.3.0 / 2012-02-19
|
||||||
|
|
||||||
|
* kqueue: add files when watch directory
|
||||||
|
|
||||||
|
## v0.2.0 / 2011-12-30
|
||||||
|
|
||||||
|
* update to latest Go weekly code
|
||||||
|
|
||||||
|
## v0.1.0 / 2011-10-19
|
||||||
|
|
||||||
|
* kqueue: add watch on file creation to match inotify
|
||||||
|
* kqueue: create file event
|
||||||
|
* inotify: ignore `IN_IGNORED` events
|
||||||
|
* event String()
|
||||||
|
* linux: common FileEvent functions
|
||||||
|
* initial commit
|
||||||
|
|
||||||
|
[#79]: https://github.com/howeyc/fsnotify/pull/79
|
||||||
|
[#77]: https://github.com/howeyc/fsnotify/pull/77
|
||||||
|
[#72]: https://github.com/howeyc/fsnotify/issues/72
|
||||||
|
[#71]: https://github.com/howeyc/fsnotify/issues/71
|
||||||
|
[#70]: https://github.com/howeyc/fsnotify/issues/70
|
||||||
|
[#63]: https://github.com/howeyc/fsnotify/issues/63
|
||||||
|
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||||
|
[#60]: https://github.com/howeyc/fsnotify/issues/60
|
||||||
|
[#59]: https://github.com/howeyc/fsnotify/issues/59
|
||||||
|
[#49]: https://github.com/howeyc/fsnotify/issues/49
|
||||||
|
[#45]: https://github.com/howeyc/fsnotify/issues/45
|
||||||
|
[#40]: https://github.com/howeyc/fsnotify/issues/40
|
||||||
|
[#36]: https://github.com/howeyc/fsnotify/issues/36
|
||||||
|
[#33]: https://github.com/howeyc/fsnotify/issues/33
|
||||||
|
[#29]: https://github.com/howeyc/fsnotify/issues/29
|
||||||
|
[#25]: https://github.com/howeyc/fsnotify/issues/25
|
||||||
|
[#24]: https://github.com/howeyc/fsnotify/issues/24
|
||||||
|
[#21]: https://github.com/howeyc/fsnotify/issues/21
|
||||||
|
|
77
Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md
generated
vendored
Normal file
77
Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
# Contributing
|
||||||
|
|
||||||
|
## Issues
|
||||||
|
|
||||||
|
* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/go-fsnotify/fsnotify/issues).
|
||||||
|
* Please indicate the platform you are using fsnotify on.
|
||||||
|
* A code example to reproduce the problem is appreciated.
|
||||||
|
|
||||||
|
## Pull Requests
|
||||||
|
|
||||||
|
### Contributor License Agreement
|
||||||
|
|
||||||
|
fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/go-fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/go-fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
|
||||||
|
|
||||||
|
Please indicate that you have signed the CLA in your pull request.
|
||||||
|
|
||||||
|
### How fsnotify is Developed
|
||||||
|
|
||||||
|
* Development is done on feature branches.
|
||||||
|
* Tests are run on BSD, Linux, OS X and Windows.
|
||||||
|
* Pull requests are reviewed and [applied to master][am] using [hub][].
|
||||||
|
* Maintainers may modify or squash commits rather than asking contributors to.
|
||||||
|
* To issue a new release, the maintainers will:
|
||||||
|
* Update the CHANGELOG
|
||||||
|
* Tag a version, which will become available through gopkg.in.
|
||||||
|
|
||||||
|
### How to Fork
|
||||||
|
|
||||||
|
For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
|
||||||
|
|
||||||
|
1. Install from GitHub (`go get -u github.com/go-fsnotify/fsnotify`)
|
||||||
|
2. Create your feature branch (`git checkout -b my-new-feature`)
|
||||||
|
3. Ensure everything works and the tests pass (see below)
|
||||||
|
4. Commit your changes (`git commit -am 'Add some feature'`)
|
||||||
|
|
||||||
|
Contribute upstream:
|
||||||
|
|
||||||
|
1. Fork fsnotify on GitHub
|
||||||
|
2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
|
||||||
|
3. Push to the branch (`git push fork my-new-feature`)
|
||||||
|
4. Create a new Pull Request on GitHub
|
||||||
|
|
||||||
|
This workflow is [thoroughly explained by Katrina Owen](https://blog.splice.com/contributing-open-source-git-repositories-go/).
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
|
||||||
|
fsnotify uses build tags to compile different code on Linux, BSD, OS X, and Windows.
|
||||||
|
|
||||||
|
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
|
||||||
|
|
||||||
|
To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
|
||||||
|
|
||||||
|
* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
|
||||||
|
* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
|
||||||
|
* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
|
||||||
|
* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd go-fsnotify/fsnotify; go test'`.
|
||||||
|
* When you're done, you will want to halt or destroy the Vagrant boxes.
|
||||||
|
|
||||||
|
Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
|
||||||
|
|
||||||
|
Right now there is no equivalent solution for Windows and OS X, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
|
||||||
|
|
||||||
|
### Maintainers
|
||||||
|
|
||||||
|
Help maintaining fsnotify is welcome. To be a maintainer:
|
||||||
|
|
||||||
|
* Submit a pull request and sign the CLA as above.
|
||||||
|
* You must be able to run the test suite on Mac, Windows, Linux and BSD.
|
||||||
|
|
||||||
|
To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
|
||||||
|
|
||||||
|
All code changes should be internal pull requests.
|
||||||
|
|
||||||
|
Releases are tagged using [Semantic Versioning](http://semver.org/).
|
||||||
|
|
||||||
|
[hub]: https://github.com/github/hub
|
||||||
|
[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
|
28
Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE
generated
vendored
Normal file
28
Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||||
|
Copyright (c) 2012 fsnotify Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
59
Godeps/_workspace/src/gopkg.in/fsnotify.v1/README.md
generated
vendored
Normal file
59
Godeps/_workspace/src/gopkg.in/fsnotify.v1/README.md
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
# File system notifications for Go
|
||||||
|
|
||||||
|
[![Coverage](http://gocover.io/_badge/github.com/go-fsnotify/fsnotify)](http://gocover.io/github.com/go-fsnotify/fsnotify) [![GoDoc](https://godoc.org/gopkg.in/fsnotify.v1?status.svg)](https://godoc.org/gopkg.in/fsnotify.v1)
|
||||||
|
|
||||||
|
Go 1.3+ required.
|
||||||
|
|
||||||
|
Cross platform: Windows, Linux, BSD and OS X.
|
||||||
|
|
||||||
|
|Adapter |OS |Status |
|
||||||
|
|----------|----------|----------|
|
||||||
|
|inotify |Linux, Android\*|Supported [![Build Status](https://travis-ci.org/go-fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/go-fsnotify/fsnotify)|
|
||||||
|
|kqueue |BSD, OS X, iOS\*|Supported [![Circle CI](https://circleci.com/gh/go-fsnotify/fsnotify.svg?style=svg)](https://circleci.com/gh/go-fsnotify/fsnotify)|
|
||||||
|
|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
|
||||||
|
|FSEvents |OS X |[Planned](https://github.com/go-fsnotify/fsnotify/issues/11)|
|
||||||
|
|FEN |Solaris 11 |[Planned](https://github.com/go-fsnotify/fsnotify/issues/12)|
|
||||||
|
|fanotify |Linux 2.6.37+ | |
|
||||||
|
|USN Journals |Windows |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/53)|
|
||||||
|
|Polling |*All* |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/9)|
|
||||||
|
|
||||||
|
\* Android and iOS are untested.
|
||||||
|
|
||||||
|
Please see [the documentation](https://godoc.org/gopkg.in/fsnotify.v1) for usage. Consult the [Wiki](https://github.com/go-fsnotify/fsnotify/wiki) for the FAQ and further information.
|
||||||
|
|
||||||
|
## API stability
|
||||||
|
|
||||||
|
Two major versions of fsnotify exist.
|
||||||
|
|
||||||
|
**[fsnotify.v0](https://gopkg.in/fsnotify.v0)** is API-compatible with [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify). Bugfixes *may* be backported, but I recommend upgrading to v1.
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "gopkg.in/fsnotify.v0"
|
||||||
|
```
|
||||||
|
|
||||||
|
\* Refer to the package as fsnotify (without the .v0 suffix).
|
||||||
|
|
||||||
|
**[fsnotify.v1](https://gopkg.in/fsnotify.v1)** provides [a new API](https://godoc.org/gopkg.in/fsnotify.v1) based on [this design document](http://goo.gl/MrYxyA). You can import v1 with:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "gopkg.in/fsnotify.v1"
|
||||||
|
```
|
||||||
|
|
||||||
|
Further API changes are [planned](https://github.com/go-fsnotify/fsnotify/milestones), but a new major revision will be tagged, so you can depend on the v1 API.
|
||||||
|
|
||||||
|
**Master** may have unreleased changes. Use it to test the very latest code or when [contributing][], but don't expect it to remain API-compatible:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/go-fsnotify/fsnotify"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
See [example_test.go](https://github.com/go-fsnotify/fsnotify/blob/master/example_test.go).
|
||||||
|
|
||||||
|
|
||||||
|
[contributing]: https://github.com/go-fsnotify/fsnotify/blob/master/CONTRIBUTING.md
|
26
Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml
generated
vendored
Normal file
26
Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
## OS X build (CircleCI iOS beta)
|
||||||
|
|
||||||
|
# Pretend like it's an Xcode project, at least to get it running.
|
||||||
|
machine:
|
||||||
|
environment:
|
||||||
|
XCODE_WORKSPACE: NotUsed.xcworkspace
|
||||||
|
XCODE_SCHEME: NotUsed
|
||||||
|
# This is where the go project is actually checked out to:
|
||||||
|
CIRCLE_BUILD_DIR: $HOME/.go_project/src/github.com/go-fsnotify/fsnotify
|
||||||
|
|
||||||
|
dependencies:
|
||||||
|
pre:
|
||||||
|
- brew upgrade go
|
||||||
|
|
||||||
|
test:
|
||||||
|
override:
|
||||||
|
- go test ./...
|
||||||
|
|
||||||
|
# Idealized future config, eventually with cross-platform build matrix :-)
|
||||||
|
|
||||||
|
# machine:
|
||||||
|
# go:
|
||||||
|
# version: 1.4
|
||||||
|
# os:
|
||||||
|
# - osx
|
||||||
|
# - linux
|
42
Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go
generated
vendored
Normal file
42
Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !plan9,!solaris
|
||||||
|
|
||||||
|
package fsnotify_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/go-fsnotify/fsnotify"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ExampleNewWatcher() {
|
||||||
|
watcher, err := fsnotify.NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer watcher.Close()
|
||||||
|
|
||||||
|
done := make(chan bool)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case event := <-watcher.Events:
|
||||||
|
log.Println("event:", event)
|
||||||
|
if event.Op&fsnotify.Write == fsnotify.Write {
|
||||||
|
log.Println("modified file:", event.Name)
|
||||||
|
}
|
||||||
|
case err := <-watcher.Errors:
|
||||||
|
log.Println("error:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = watcher.Add("/tmp/foo")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
<-done
|
||||||
|
}
|
62
Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go
generated
vendored
Normal file
62
Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !plan9,!solaris
|
||||||
|
|
||||||
|
// Package fsnotify provides a platform-independent interface for file system notifications.
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Event represents a single file system notification.
|
||||||
|
type Event struct {
|
||||||
|
Name string // Relative path to the file or directory.
|
||||||
|
Op Op // File operation that triggered the event.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Op describes a set of file operations.
|
||||||
|
type Op uint32
|
||||||
|
|
||||||
|
// These are the generalized file operations that can trigger a notification.
|
||||||
|
const (
|
||||||
|
Create Op = 1 << iota
|
||||||
|
Write
|
||||||
|
Remove
|
||||||
|
Rename
|
||||||
|
Chmod
|
||||||
|
)
|
||||||
|
|
||||||
|
// String returns a string representation of the event in the form
|
||||||
|
// "file: REMOVE|WRITE|..."
|
||||||
|
func (e Event) String() string {
|
||||||
|
// Use a buffer for efficient string concatenation
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
|
||||||
|
if e.Op&Create == Create {
|
||||||
|
buffer.WriteString("|CREATE")
|
||||||
|
}
|
||||||
|
if e.Op&Remove == Remove {
|
||||||
|
buffer.WriteString("|REMOVE")
|
||||||
|
}
|
||||||
|
if e.Op&Write == Write {
|
||||||
|
buffer.WriteString("|WRITE")
|
||||||
|
}
|
||||||
|
if e.Op&Rename == Rename {
|
||||||
|
buffer.WriteString("|RENAME")
|
||||||
|
}
|
||||||
|
if e.Op&Chmod == Chmod {
|
||||||
|
buffer.WriteString("|CHMOD")
|
||||||
|
}
|
||||||
|
|
||||||
|
// If buffer remains empty, return no event names
|
||||||
|
if buffer.Len() == 0 {
|
||||||
|
return fmt.Sprintf("%q: ", e.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a list of event names, with leading pipe character stripped
|
||||||
|
return fmt.Sprintf("%q: %s", e.Name, buffer.String()[1:])
|
||||||
|
}
|
306
Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go
generated
vendored
Normal file
306
Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go
generated
vendored
Normal file
|
@ -0,0 +1,306 @@
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Watcher watches a set of files, delivering events to a channel.
|
||||||
|
type Watcher struct {
|
||||||
|
Events chan Event
|
||||||
|
Errors chan error
|
||||||
|
mu sync.Mutex // Map access
|
||||||
|
fd int
|
||||||
|
poller *fdPoller
|
||||||
|
watches map[string]*watch // Map of inotify watches (key: path)
|
||||||
|
paths map[int]string // Map of watched paths (key: watch descriptor)
|
||||||
|
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||||
|
doneResp chan struct{} // Channel to respond to Close
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||||
|
func NewWatcher() (*Watcher, error) {
|
||||||
|
// Create inotify fd
|
||||||
|
fd, errno := syscall.InotifyInit()
|
||||||
|
if fd == -1 {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
// Create epoll
|
||||||
|
poller, err := newFdPoller(fd)
|
||||||
|
if err != nil {
|
||||||
|
syscall.Close(fd)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
w := &Watcher{
|
||||||
|
fd: fd,
|
||||||
|
poller: poller,
|
||||||
|
watches: make(map[string]*watch),
|
||||||
|
paths: make(map[int]string),
|
||||||
|
Events: make(chan Event),
|
||||||
|
Errors: make(chan error),
|
||||||
|
done: make(chan struct{}),
|
||||||
|
doneResp: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
go w.readEvents()
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) isClosed() bool {
|
||||||
|
select {
|
||||||
|
case <-w.done:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes all watches and closes the events channel.
|
||||||
|
func (w *Watcher) Close() error {
|
||||||
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send 'close' signal to goroutine, and set the Watcher to closed.
|
||||||
|
close(w.done)
|
||||||
|
|
||||||
|
// Wake up goroutine
|
||||||
|
w.poller.wake()
|
||||||
|
|
||||||
|
// Wait for goroutine to close
|
||||||
|
<-w.doneResp
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add starts watching the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Add(name string) error {
|
||||||
|
name = filepath.Clean(name)
|
||||||
|
if w.isClosed() {
|
||||||
|
return errors.New("inotify instance already closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
const agnosticEvents = syscall.IN_MOVED_TO | syscall.IN_MOVED_FROM |
|
||||||
|
syscall.IN_CREATE | syscall.IN_ATTRIB | syscall.IN_MODIFY |
|
||||||
|
syscall.IN_MOVE_SELF | syscall.IN_DELETE | syscall.IN_DELETE_SELF
|
||||||
|
|
||||||
|
var flags uint32 = agnosticEvents
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
watchEntry, found := w.watches[name]
|
||||||
|
w.mu.Unlock()
|
||||||
|
if found {
|
||||||
|
watchEntry.flags |= flags
|
||||||
|
flags |= syscall.IN_MASK_ADD
|
||||||
|
}
|
||||||
|
wd, errno := syscall.InotifyAddWatch(w.fd, name, flags)
|
||||||
|
if wd == -1 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
|
||||||
|
w.paths[wd] = name
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove stops watching the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Remove(name string) error {
|
||||||
|
name = filepath.Clean(name)
|
||||||
|
|
||||||
|
// Fetch the watch.
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
watch, ok := w.watches[name]
|
||||||
|
|
||||||
|
// Remove it from inotify.
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
|
||||||
|
}
|
||||||
|
// inotify_rm_watch will return EINVAL if the file has been deleted;
|
||||||
|
// the inotify will already have been removed.
|
||||||
|
// That means we can safely delete it from our watches, whatever inotify_rm_watch does.
|
||||||
|
delete(w.watches, name)
|
||||||
|
success, errno := syscall.InotifyRmWatch(w.fd, watch.wd)
|
||||||
|
if success == -1 {
|
||||||
|
// TODO: Perhaps it's not helpful to return an error here in every case.
|
||||||
|
// the only two possible errors are:
|
||||||
|
// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
|
||||||
|
// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
|
||||||
|
// Watch descriptors are invalidated when they are removed explicitly or implicitly;
|
||||||
|
// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type watch struct {
|
||||||
|
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||||
|
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// readEvents reads from the inotify file descriptor, converts the
|
||||||
|
// received events into Event objects and sends them via the Events channel
|
||||||
|
func (w *Watcher) readEvents() {
|
||||||
|
var (
|
||||||
|
buf [syscall.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||||
|
n int // Number of bytes read with read()
|
||||||
|
errno error // Syscall errno
|
||||||
|
ok bool // For poller.wait
|
||||||
|
)
|
||||||
|
|
||||||
|
defer close(w.doneResp)
|
||||||
|
defer close(w.Errors)
|
||||||
|
defer close(w.Events)
|
||||||
|
defer syscall.Close(w.fd)
|
||||||
|
defer w.poller.close()
|
||||||
|
|
||||||
|
for {
|
||||||
|
// See if we have been closed.
|
||||||
|
if w.isClosed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ok, errno = w.poller.wait()
|
||||||
|
if errno != nil {
|
||||||
|
select {
|
||||||
|
case w.Errors <- errno:
|
||||||
|
case <-w.done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
n, errno = syscall.Read(w.fd, buf[:])
|
||||||
|
// If a signal interrupted execution, see if we've been asked to close, and try again.
|
||||||
|
// http://man7.org/linux/man-pages/man7/signal.7.html :
|
||||||
|
// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
|
||||||
|
if errno == syscall.EINTR {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// syscall.Read might have been woken up by Close. If so, we're done.
|
||||||
|
if w.isClosed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if n < syscall.SizeofInotifyEvent {
|
||||||
|
var err error
|
||||||
|
if n == 0 {
|
||||||
|
// If EOF is received. This should really never happen.
|
||||||
|
err = io.EOF
|
||||||
|
} else if n < 0 {
|
||||||
|
// If an error occured while reading.
|
||||||
|
err = errno
|
||||||
|
} else {
|
||||||
|
// Read was too short.
|
||||||
|
err = errors.New("notify: short read in readEvents()")
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case w.Errors <- err:
|
||||||
|
case <-w.done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var offset uint32
|
||||||
|
// We don't know how many events we just read into the buffer
|
||||||
|
// While the offset points to at least one whole event...
|
||||||
|
for offset <= uint32(n-syscall.SizeofInotifyEvent) {
|
||||||
|
// Point "raw" to the event in the buffer
|
||||||
|
raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||||
|
|
||||||
|
mask := uint32(raw.Mask)
|
||||||
|
nameLen := uint32(raw.Len)
|
||||||
|
// If the event happened to the watched directory or the watched file, the kernel
|
||||||
|
// doesn't append the filename to the event, but we would like to always fill the
|
||||||
|
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||||
|
// the "paths" map.
|
||||||
|
w.mu.Lock()
|
||||||
|
name := w.paths[int(raw.Wd)]
|
||||||
|
w.mu.Unlock()
|
||||||
|
if nameLen > 0 {
|
||||||
|
// Point "bytes" at the first byte of the filename
|
||||||
|
bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent]))
|
||||||
|
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||||
|
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||||
|
}
|
||||||
|
|
||||||
|
event := newEvent(name, mask)
|
||||||
|
|
||||||
|
// Send the events that are not ignored on the events channel
|
||||||
|
if !event.ignoreLinux(mask) {
|
||||||
|
select {
|
||||||
|
case w.Events <- event:
|
||||||
|
case <-w.done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move to the next event in the buffer
|
||||||
|
offset += syscall.SizeofInotifyEvent + nameLen
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Certain types of events can be "ignored" and not sent over the Events
|
||||||
|
// channel. Such as events marked ignore by the kernel, or MODIFY events
|
||||||
|
// against files that do not exist.
|
||||||
|
func (e *Event) ignoreLinux(mask uint32) bool {
|
||||||
|
// Ignore anything the inotify API says to ignore
|
||||||
|
if mask&syscall.IN_IGNORED == syscall.IN_IGNORED {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the event is not a DELETE or RENAME, the file must exist.
|
||||||
|
// Otherwise the event is ignored.
|
||||||
|
// *Note*: this was put in place because it was seen that a MODIFY
|
||||||
|
// event was sent after the DELETE. This ignores that MODIFY and
|
||||||
|
// assumes a DELETE will come or has come if the file doesn't exist.
|
||||||
|
if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
|
||||||
|
_, statErr := os.Lstat(e.Name)
|
||||||
|
return os.IsNotExist(statErr)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// newEvent returns an platform-independent Event based on an inotify mask.
|
||||||
|
func newEvent(name string, mask uint32) Event {
|
||||||
|
e := Event{Name: name}
|
||||||
|
if mask&syscall.IN_CREATE == syscall.IN_CREATE || mask&syscall.IN_MOVED_TO == syscall.IN_MOVED_TO {
|
||||||
|
e.Op |= Create
|
||||||
|
}
|
||||||
|
if mask&syscall.IN_DELETE_SELF == syscall.IN_DELETE_SELF || mask&syscall.IN_DELETE == syscall.IN_DELETE {
|
||||||
|
e.Op |= Remove
|
||||||
|
}
|
||||||
|
if mask&syscall.IN_MODIFY == syscall.IN_MODIFY {
|
||||||
|
e.Op |= Write
|
||||||
|
}
|
||||||
|
if mask&syscall.IN_MOVE_SELF == syscall.IN_MOVE_SELF || mask&syscall.IN_MOVED_FROM == syscall.IN_MOVED_FROM {
|
||||||
|
e.Op |= Rename
|
||||||
|
}
|
||||||
|
if mask&syscall.IN_ATTRIB == syscall.IN_ATTRIB {
|
||||||
|
e.Op |= Chmod
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
186
Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go
generated
vendored
Normal file
186
Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go
generated
vendored
Normal file
|
@ -0,0 +1,186 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fdPoller struct {
|
||||||
|
fd int // File descriptor (as returned by the inotify_init() syscall)
|
||||||
|
epfd int // Epoll file descriptor
|
||||||
|
pipe [2]int // Pipe for waking up
|
||||||
|
}
|
||||||
|
|
||||||
|
func emptyPoller(fd int) *fdPoller {
|
||||||
|
poller := new(fdPoller)
|
||||||
|
poller.fd = fd
|
||||||
|
poller.epfd = -1
|
||||||
|
poller.pipe[0] = -1
|
||||||
|
poller.pipe[1] = -1
|
||||||
|
return poller
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new inotify poller.
|
||||||
|
// This creates an inotify handler, and an epoll handler.
|
||||||
|
func newFdPoller(fd int) (*fdPoller, error) {
|
||||||
|
var errno error
|
||||||
|
poller := emptyPoller(fd)
|
||||||
|
defer func() {
|
||||||
|
if errno != nil {
|
||||||
|
poller.close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
poller.fd = fd
|
||||||
|
|
||||||
|
// Create epoll fd
|
||||||
|
poller.epfd, errno = syscall.EpollCreate(1)
|
||||||
|
if poller.epfd == -1 {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
|
||||||
|
errno = syscall.Pipe2(poller.pipe[:], syscall.O_NONBLOCK)
|
||||||
|
if errno != nil {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register inotify fd with epoll
|
||||||
|
event := syscall.EpollEvent{
|
||||||
|
Fd: int32(poller.fd),
|
||||||
|
Events: syscall.EPOLLIN,
|
||||||
|
}
|
||||||
|
errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.fd, &event)
|
||||||
|
if errno != nil {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register pipe fd with epoll
|
||||||
|
event = syscall.EpollEvent{
|
||||||
|
Fd: int32(poller.pipe[0]),
|
||||||
|
Events: syscall.EPOLLIN,
|
||||||
|
}
|
||||||
|
errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.pipe[0], &event)
|
||||||
|
if errno != nil {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
return poller, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait using epoll.
|
||||||
|
// Returns true if something is ready to be read,
|
||||||
|
// false if there is not.
|
||||||
|
func (poller *fdPoller) wait() (bool, error) {
|
||||||
|
// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
|
||||||
|
// I don't know whether epoll_wait returns the number of events returned,
|
||||||
|
// or the total number of events ready.
|
||||||
|
// I decided to catch both by making the buffer one larger than the maximum.
|
||||||
|
events := make([]syscall.EpollEvent, 7)
|
||||||
|
for {
|
||||||
|
n, errno := syscall.EpollWait(poller.epfd, events, -1)
|
||||||
|
if n == -1 {
|
||||||
|
if errno == syscall.EINTR {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return false, errno
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
// If there are no events, try again.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if n > 6 {
|
||||||
|
// This should never happen. More events were returned than should be possible.
|
||||||
|
return false, errors.New("epoll_wait returned more events than I know what to do with")
|
||||||
|
}
|
||||||
|
ready := events[:n]
|
||||||
|
epollhup := false
|
||||||
|
epollerr := false
|
||||||
|
epollin := false
|
||||||
|
for _, event := range ready {
|
||||||
|
if event.Fd == int32(poller.fd) {
|
||||||
|
if event.Events&syscall.EPOLLHUP != 0 {
|
||||||
|
// This should not happen, but if it does, treat it as a wakeup.
|
||||||
|
epollhup = true
|
||||||
|
}
|
||||||
|
if event.Events&syscall.EPOLLERR != 0 {
|
||||||
|
// If an error is waiting on the file descriptor, we should pretend
|
||||||
|
// something is ready to read, and let syscall.Read pick up the error.
|
||||||
|
epollerr = true
|
||||||
|
}
|
||||||
|
if event.Events&syscall.EPOLLIN != 0 {
|
||||||
|
// There is data to read.
|
||||||
|
epollin = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if event.Fd == int32(poller.pipe[0]) {
|
||||||
|
if event.Events&syscall.EPOLLHUP != 0 {
|
||||||
|
// Write pipe descriptor was closed, by us. This means we're closing down the
|
||||||
|
// watcher, and we should wake up.
|
||||||
|
}
|
||||||
|
if event.Events&syscall.EPOLLERR != 0 {
|
||||||
|
// If an error is waiting on the pipe file descriptor.
|
||||||
|
// This is an absolute mystery, and should never ever happen.
|
||||||
|
return false, errors.New("Error on the pipe descriptor.")
|
||||||
|
}
|
||||||
|
if event.Events&syscall.EPOLLIN != 0 {
|
||||||
|
// This is a regular wakeup, so we have to clear the buffer.
|
||||||
|
err := poller.clearWake()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if epollhup || epollerr || epollin {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the write end of the poller.
|
||||||
|
func (poller *fdPoller) wake() error {
|
||||||
|
buf := make([]byte, 1)
|
||||||
|
n, errno := syscall.Write(poller.pipe[1], buf)
|
||||||
|
if n == -1 {
|
||||||
|
if errno == syscall.EAGAIN {
|
||||||
|
// Buffer is full, poller will wake.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (poller *fdPoller) clearWake() error {
|
||||||
|
// You have to be woken up a LOT in order to get to 100!
|
||||||
|
buf := make([]byte, 100)
|
||||||
|
n, errno := syscall.Read(poller.pipe[0], buf)
|
||||||
|
if n == -1 {
|
||||||
|
if errno == syscall.EAGAIN {
|
||||||
|
// Buffer is empty, someone else cleared our wake.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close all poller file descriptors, but not the one passed to it.
|
||||||
|
func (poller *fdPoller) close() {
|
||||||
|
if poller.pipe[1] != -1 {
|
||||||
|
syscall.Close(poller.pipe[1])
|
||||||
|
}
|
||||||
|
if poller.pipe[0] != -1 {
|
||||||
|
syscall.Close(poller.pipe[0])
|
||||||
|
}
|
||||||
|
if poller.epfd != -1 {
|
||||||
|
syscall.Close(poller.epfd)
|
||||||
|
}
|
||||||
|
}
|
228
Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go
generated
vendored
Normal file
228
Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go
generated
vendored
Normal file
|
@ -0,0 +1,228 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testFd [2]int
|
||||||
|
|
||||||
|
func makeTestFd(t *testing.T) testFd {
|
||||||
|
var tfd testFd
|
||||||
|
errno := syscall.Pipe(tfd[:])
|
||||||
|
if errno != nil {
|
||||||
|
t.Fatalf("Failed to create pipe: %v", errno)
|
||||||
|
}
|
||||||
|
return tfd
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tfd testFd) fd() int {
|
||||||
|
return tfd[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tfd testFd) closeWrite(t *testing.T) {
|
||||||
|
errno := syscall.Close(tfd[1])
|
||||||
|
if errno != nil {
|
||||||
|
t.Fatalf("Failed to close write end of pipe: %v", errno)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tfd testFd) put(t *testing.T) {
|
||||||
|
buf := make([]byte, 10)
|
||||||
|
_, errno := syscall.Write(tfd[1], buf)
|
||||||
|
if errno != nil {
|
||||||
|
t.Fatalf("Failed to write to pipe: %v", errno)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tfd testFd) get(t *testing.T) {
|
||||||
|
buf := make([]byte, 10)
|
||||||
|
_, errno := syscall.Read(tfd[0], buf)
|
||||||
|
if errno != nil {
|
||||||
|
t.Fatalf("Failed to read from pipe: %v", errno)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tfd testFd) close() {
|
||||||
|
syscall.Close(tfd[1])
|
||||||
|
syscall.Close(tfd[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func makePoller(t *testing.T) (testFd, *fdPoller) {
|
||||||
|
tfd := makeTestFd(t)
|
||||||
|
poller, err := newFdPoller(tfd.fd())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create poller: %v", err)
|
||||||
|
}
|
||||||
|
return tfd, poller
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPollerWithBadFd(t *testing.T) {
|
||||||
|
_, err := newFdPoller(-1)
|
||||||
|
if err != syscall.EBADF {
|
||||||
|
t.Fatalf("Expected EBADF, got: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPollerWithData(t *testing.T) {
|
||||||
|
tfd, poller := makePoller(t)
|
||||||
|
defer tfd.close()
|
||||||
|
defer poller.close()
|
||||||
|
|
||||||
|
tfd.put(t)
|
||||||
|
ok, err := poller.wait()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("poller failed: %v", err)
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected poller to return true")
|
||||||
|
}
|
||||||
|
tfd.get(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPollerWithWakeup(t *testing.T) {
|
||||||
|
tfd, poller := makePoller(t)
|
||||||
|
defer tfd.close()
|
||||||
|
defer poller.close()
|
||||||
|
|
||||||
|
err := poller.wake()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("wake failed: %v", err)
|
||||||
|
}
|
||||||
|
ok, err := poller.wait()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("poller failed: %v", err)
|
||||||
|
}
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("expected poller to return false")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPollerWithClose(t *testing.T) {
|
||||||
|
tfd, poller := makePoller(t)
|
||||||
|
defer tfd.close()
|
||||||
|
defer poller.close()
|
||||||
|
|
||||||
|
tfd.closeWrite(t)
|
||||||
|
ok, err := poller.wait()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("poller failed: %v", err)
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected poller to return true")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPollerWithWakeupAndData(t *testing.T) {
|
||||||
|
tfd, poller := makePoller(t)
|
||||||
|
defer tfd.close()
|
||||||
|
defer poller.close()
|
||||||
|
|
||||||
|
tfd.put(t)
|
||||||
|
err := poller.wake()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("wake failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// both data and wakeup
|
||||||
|
ok, err := poller.wait()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("poller failed: %v", err)
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected poller to return true")
|
||||||
|
}
|
||||||
|
|
||||||
|
// data is still in the buffer, wakeup is cleared
|
||||||
|
ok, err = poller.wait()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("poller failed: %v", err)
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected poller to return true")
|
||||||
|
}
|
||||||
|
|
||||||
|
tfd.get(t)
|
||||||
|
// data is gone, only wakeup now
|
||||||
|
err = poller.wake()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("wake failed: %v", err)
|
||||||
|
}
|
||||||
|
ok, err = poller.wait()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("poller failed: %v", err)
|
||||||
|
}
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("expected poller to return false")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPollerConcurrent(t *testing.T) {
|
||||||
|
tfd, poller := makePoller(t)
|
||||||
|
defer tfd.close()
|
||||||
|
defer poller.close()
|
||||||
|
|
||||||
|
oks := make(chan bool)
|
||||||
|
live := make(chan bool)
|
||||||
|
defer close(live)
|
||||||
|
go func() {
|
||||||
|
defer close(oks)
|
||||||
|
for {
|
||||||
|
ok, err := poller.wait()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("poller failed: %v", err)
|
||||||
|
}
|
||||||
|
oks <- ok
|
||||||
|
if !<-live {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Try a write
|
||||||
|
select {
|
||||||
|
case <-time.After(50 * time.Millisecond):
|
||||||
|
case <-oks:
|
||||||
|
t.Fatalf("poller did not wait")
|
||||||
|
}
|
||||||
|
tfd.put(t)
|
||||||
|
if !<-oks {
|
||||||
|
t.Fatalf("expected true")
|
||||||
|
}
|
||||||
|
tfd.get(t)
|
||||||
|
live <- true
|
||||||
|
|
||||||
|
// Try a wakeup
|
||||||
|
select {
|
||||||
|
case <-time.After(50 * time.Millisecond):
|
||||||
|
case <-oks:
|
||||||
|
t.Fatalf("poller did not wait")
|
||||||
|
}
|
||||||
|
err := poller.wake()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("wake failed: %v", err)
|
||||||
|
}
|
||||||
|
if <-oks {
|
||||||
|
t.Fatalf("expected false")
|
||||||
|
}
|
||||||
|
live <- true
|
||||||
|
|
||||||
|
// Try a close
|
||||||
|
select {
|
||||||
|
case <-time.After(50 * time.Millisecond):
|
||||||
|
case <-oks:
|
||||||
|
t.Fatalf("poller did not wait")
|
||||||
|
}
|
||||||
|
tfd.closeWrite(t)
|
||||||
|
if !<-oks {
|
||||||
|
t.Fatalf("expected true")
|
||||||
|
}
|
||||||
|
tfd.get(t)
|
||||||
|
}
|
292
Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go
generated
vendored
Normal file
292
Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go
generated
vendored
Normal file
|
@ -0,0 +1,292 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInotifyCloseRightAway(t *testing.T) {
|
||||||
|
w, err := NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create watcher")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close immediately; it won't even reach the first syscall.Read.
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
// Wait for the close to complete.
|
||||||
|
<-time.After(50 * time.Millisecond)
|
||||||
|
isWatcherReallyClosed(t, w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInotifyCloseSlightlyLater(t *testing.T) {
|
||||||
|
w, err := NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create watcher")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until readEvents has reached syscall.Read, and Close.
|
||||||
|
<-time.After(50 * time.Millisecond)
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
// Wait for the close to complete.
|
||||||
|
<-time.After(50 * time.Millisecond)
|
||||||
|
isWatcherReallyClosed(t, w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) {
|
||||||
|
testDir := tempMkdir(t)
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
|
||||||
|
w, err := NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create watcher")
|
||||||
|
}
|
||||||
|
w.Add(testDir)
|
||||||
|
|
||||||
|
// Wait until readEvents has reached syscall.Read, and Close.
|
||||||
|
<-time.After(50 * time.Millisecond)
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
// Wait for the close to complete.
|
||||||
|
<-time.After(50 * time.Millisecond)
|
||||||
|
isWatcherReallyClosed(t, w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInotifyCloseAfterRead(t *testing.T) {
|
||||||
|
testDir := tempMkdir(t)
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
|
||||||
|
w, err := NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create watcher")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.Add(testDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to add .")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate an event.
|
||||||
|
os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING"))
|
||||||
|
|
||||||
|
// Wait for readEvents to read the event, then close the watcher.
|
||||||
|
<-time.After(50 * time.Millisecond)
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
// Wait for the close to complete.
|
||||||
|
<-time.After(50 * time.Millisecond)
|
||||||
|
isWatcherReallyClosed(t, w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isWatcherReallyClosed(t *testing.T, w *Watcher) {
|
||||||
|
select {
|
||||||
|
case err, ok := <-w.Errors:
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
t.Fatalf("w.Errors would have blocked; readEvents is still alive!")
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case _, ok := <-w.Events:
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("w.Events is not closed; readEvents is still alive after closing")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
t.Fatalf("w.Events would have blocked; readEvents is still alive!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInotifyCloseCreate(t *testing.T) {
|
||||||
|
testDir := tempMkdir(t)
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
|
||||||
|
w, err := NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create watcher: %v", err)
|
||||||
|
}
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
|
err = w.Add(testDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to add testDir: %v", err)
|
||||||
|
}
|
||||||
|
h, err := os.Create(filepath.Join(testDir, "testfile"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create file in testdir: %v", err)
|
||||||
|
}
|
||||||
|
h.Close()
|
||||||
|
select {
|
||||||
|
case _ = <-w.Events:
|
||||||
|
case err := <-w.Errors:
|
||||||
|
t.Fatalf("Error from watcher: %v", err)
|
||||||
|
case <-time.After(50 * time.Millisecond):
|
||||||
|
t.Fatalf("Took too long to wait for event")
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point, we've received one event, so the goroutine is ready.
|
||||||
|
// It's also blocking on syscall.Read.
|
||||||
|
// Now we try to swap the file descriptor under its nose.
|
||||||
|
w.Close()
|
||||||
|
w, err = NewWatcher()
|
||||||
|
defer w.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create second watcher: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
<-time.After(50 * time.Millisecond)
|
||||||
|
err = w.Add(testDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error adding testDir again: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInotifyStress(t *testing.T) {
|
||||||
|
testDir := tempMkdir(t)
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
testFile := filepath.Join(testDir, "testfile")
|
||||||
|
|
||||||
|
w, err := NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create watcher: %v", err)
|
||||||
|
}
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
|
killchan := make(chan struct{})
|
||||||
|
defer close(killchan)
|
||||||
|
|
||||||
|
err = w.Add(testDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to add testDir: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
proc, err := os.FindProcess(os.Getpid())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error finding process: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-time.After(5 * time.Millisecond):
|
||||||
|
err := proc.Signal(syscall.SIGUSR1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Signal failed: %v", err)
|
||||||
|
}
|
||||||
|
case <-killchan:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-time.After(11 * time.Millisecond):
|
||||||
|
err := w.poller.wake()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Wake failed: %v", err)
|
||||||
|
}
|
||||||
|
case <-killchan:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-killchan:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
handle, err := os.Create(testFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Create failed: %v", err)
|
||||||
|
}
|
||||||
|
handle.Close()
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
err = os.Remove(testFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Remove failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
creates := 0
|
||||||
|
removes := 0
|
||||||
|
after := time.After(5 * time.Second)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-after:
|
||||||
|
if creates-removes > 1 || creates-removes < -1 {
|
||||||
|
t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes)
|
||||||
|
}
|
||||||
|
if creates < 50 {
|
||||||
|
t.Fatalf("Expected at least 50 creates, got %d", creates)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
case err := <-w.Errors:
|
||||||
|
t.Fatalf("Got an error from watcher: %v", err)
|
||||||
|
case evt := <-w.Events:
|
||||||
|
if evt.Name != testFile {
|
||||||
|
t.Fatalf("Got an event for an unknown file: %s", evt.Name)
|
||||||
|
}
|
||||||
|
if evt.Op == Create {
|
||||||
|
creates++
|
||||||
|
}
|
||||||
|
if evt.Op == Remove {
|
||||||
|
removes++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInotifyRemoveTwice(t *testing.T) {
|
||||||
|
testDir := tempMkdir(t)
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
testFile := filepath.Join(testDir, "testfile")
|
||||||
|
|
||||||
|
handle, err := os.Create(testFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Create failed: %v", err)
|
||||||
|
}
|
||||||
|
handle.Close()
|
||||||
|
|
||||||
|
w, err := NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create watcher: %v", err)
|
||||||
|
}
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
|
err = w.Add(testFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to add testFile: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.Remove(testFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to remove testFile: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.Remove(testFile)
|
||||||
|
if err != syscall.EINVAL {
|
||||||
|
t.Fatalf("Expected EINVAL from Remove, got: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.Remove(testFile)
|
||||||
|
if err == syscall.EINVAL {
|
||||||
|
t.Fatalf("Got EINVAL again, watch was not removed")
|
||||||
|
}
|
||||||
|
}
|
1135
Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go
generated
vendored
Normal file
1135
Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
463
Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go
generated
vendored
Normal file
463
Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go
generated
vendored
Normal file
|
@ -0,0 +1,463 @@
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build freebsd openbsd netbsd dragonfly darwin
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Watcher watches a set of files, delivering events to a channel.
|
||||||
|
type Watcher struct {
|
||||||
|
Events chan Event
|
||||||
|
Errors chan error
|
||||||
|
done chan bool // Channel for sending a "quit message" to the reader goroutine
|
||||||
|
|
||||||
|
kq int // File descriptor (as returned by the kqueue() syscall).
|
||||||
|
|
||||||
|
mu sync.Mutex // Protects access to watcher data
|
||||||
|
watches map[string]int // Map of watched file descriptors (key: path).
|
||||||
|
externalWatches map[string]bool // Map of watches added by user of the library.
|
||||||
|
dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
|
||||||
|
paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
|
||||||
|
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
|
||||||
|
isClosed bool // Set to true when Close() is first called
|
||||||
|
}
|
||||||
|
|
||||||
|
type pathInfo struct {
|
||||||
|
name string
|
||||||
|
isDir bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||||
|
func NewWatcher() (*Watcher, error) {
|
||||||
|
kq, err := kqueue()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
w := &Watcher{
|
||||||
|
kq: kq,
|
||||||
|
watches: make(map[string]int),
|
||||||
|
dirFlags: make(map[string]uint32),
|
||||||
|
paths: make(map[int]pathInfo),
|
||||||
|
fileExists: make(map[string]bool),
|
||||||
|
externalWatches: make(map[string]bool),
|
||||||
|
Events: make(chan Event),
|
||||||
|
Errors: make(chan error),
|
||||||
|
done: make(chan bool),
|
||||||
|
}
|
||||||
|
|
||||||
|
go w.readEvents()
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes all watches and closes the events channel.
|
||||||
|
func (w *Watcher) Close() error {
|
||||||
|
w.mu.Lock()
|
||||||
|
if w.isClosed {
|
||||||
|
w.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
w.isClosed = true
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
ws := w.watches
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
for name := range ws {
|
||||||
|
if e := w.Remove(name); e != nil && err == nil {
|
||||||
|
err = e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send "quit" message to the reader goroutine:
|
||||||
|
w.done <- true
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add starts watching the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Add(name string) error {
|
||||||
|
w.mu.Lock()
|
||||||
|
w.externalWatches[name] = true
|
||||||
|
w.mu.Unlock()
|
||||||
|
return w.addWatch(name, noteAllEvents)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove stops watching the the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Remove(name string) error {
|
||||||
|
name = filepath.Clean(name)
|
||||||
|
w.mu.Lock()
|
||||||
|
watchfd, ok := w.watches[name]
|
||||||
|
w.mu.Unlock()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
const registerRemove = syscall.EV_DELETE
|
||||||
|
if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
syscall.Close(watchfd)
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
isDir := w.paths[watchfd].isDir
|
||||||
|
delete(w.watches, name)
|
||||||
|
delete(w.paths, watchfd)
|
||||||
|
delete(w.dirFlags, name)
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
// Find all watched paths that are in this directory that are not external.
|
||||||
|
if isDir {
|
||||||
|
var pathsToRemove []string
|
||||||
|
w.mu.Lock()
|
||||||
|
for _, path := range w.paths {
|
||||||
|
wdir, _ := filepath.Split(path.name)
|
||||||
|
if filepath.Clean(wdir) == name {
|
||||||
|
if !w.externalWatches[path.name] {
|
||||||
|
pathsToRemove = append(pathsToRemove, path.name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.mu.Unlock()
|
||||||
|
for _, name := range pathsToRemove {
|
||||||
|
// Since these are internal, not much sense in propagating error
|
||||||
|
// to the user, as that will just confuse them with an error about
|
||||||
|
// a path they did not explicitly watch themselves.
|
||||||
|
w.Remove(name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||||
|
const noteAllEvents = syscall.NOTE_DELETE | syscall.NOTE_WRITE | syscall.NOTE_ATTRIB | syscall.NOTE_RENAME
|
||||||
|
|
||||||
|
// keventWaitTime to block on each read from kevent
|
||||||
|
var keventWaitTime = durationToTimespec(100 * time.Millisecond)
|
||||||
|
|
||||||
|
// addWatch adds name to the watched file set.
|
||||||
|
// The flags are interpreted as described in kevent(2).
|
||||||
|
func (w *Watcher) addWatch(name string, flags uint32) error {
|
||||||
|
var isDir bool
|
||||||
|
// Make ./name and name equivalent
|
||||||
|
name = filepath.Clean(name)
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
if w.isClosed {
|
||||||
|
w.mu.Unlock()
|
||||||
|
return errors.New("kevent instance already closed")
|
||||||
|
}
|
||||||
|
watchfd, alreadyWatching := w.watches[name]
|
||||||
|
// We already have a watch, but we can still override flags.
|
||||||
|
if alreadyWatching {
|
||||||
|
isDir = w.paths[watchfd].isDir
|
||||||
|
}
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
if !alreadyWatching {
|
||||||
|
fi, err := os.Lstat(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't watch sockets.
|
||||||
|
if fi.Mode()&os.ModeSocket == os.ModeSocket {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Follow Symlinks
|
||||||
|
// Unfortunately, Linux can add bogus symlinks to watch list without
|
||||||
|
// issue, and Windows can't do symlinks period (AFAIK). To maintain
|
||||||
|
// consistency, we will act like everything is fine. There will simply
|
||||||
|
// be no file events for broken symlinks.
|
||||||
|
// Hence the returns of nil on errors.
|
||||||
|
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||||
|
name, err = filepath.EvalSymlinks(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fi, err = os.Lstat(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
watchfd, err = syscall.Open(name, openMode, 0700)
|
||||||
|
if watchfd == -1 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
isDir = fi.IsDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
const registerAdd = syscall.EV_ADD | syscall.EV_CLEAR | syscall.EV_ENABLE
|
||||||
|
if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
|
||||||
|
syscall.Close(watchfd)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !alreadyWatching {
|
||||||
|
w.mu.Lock()
|
||||||
|
w.watches[name] = watchfd
|
||||||
|
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
|
||||||
|
w.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if isDir {
|
||||||
|
// Watch the directory if it has not been watched before,
|
||||||
|
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||||
|
w.mu.Lock()
|
||||||
|
watchDir := (flags&syscall.NOTE_WRITE) == syscall.NOTE_WRITE &&
|
||||||
|
(!alreadyWatching || (w.dirFlags[name]&syscall.NOTE_WRITE) != syscall.NOTE_WRITE)
|
||||||
|
// Store flags so this watch can be updated later
|
||||||
|
w.dirFlags[name] = flags
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
if watchDir {
|
||||||
|
if err := w.watchDirectoryFiles(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readEvents reads from kqueue and converts the received kevents into
|
||||||
|
// Event values that it sends down the Events channel.
|
||||||
|
func (w *Watcher) readEvents() {
|
||||||
|
eventBuffer := make([]syscall.Kevent_t, 10)
|
||||||
|
|
||||||
|
for {
|
||||||
|
// See if there is a message on the "done" channel
|
||||||
|
select {
|
||||||
|
case <-w.done:
|
||||||
|
err := syscall.Close(w.kq)
|
||||||
|
if err != nil {
|
||||||
|
w.Errors <- err
|
||||||
|
}
|
||||||
|
close(w.Events)
|
||||||
|
close(w.Errors)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get new events
|
||||||
|
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
|
||||||
|
// EINTR is okay, the syscall was interrupted before timeout expired.
|
||||||
|
if err != nil && err != syscall.EINTR {
|
||||||
|
w.Errors <- err
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush the events we received to the Events channel
|
||||||
|
for len(kevents) > 0 {
|
||||||
|
kevent := &kevents[0]
|
||||||
|
watchfd := int(kevent.Ident)
|
||||||
|
mask := uint32(kevent.Fflags)
|
||||||
|
w.mu.Lock()
|
||||||
|
path := w.paths[watchfd]
|
||||||
|
w.mu.Unlock()
|
||||||
|
event := newEvent(path.name, mask)
|
||||||
|
|
||||||
|
if path.isDir && !(event.Op&Remove == Remove) {
|
||||||
|
// Double check to make sure the directory exists. This can happen when
|
||||||
|
// we do a rm -fr on a recursively watched folders and we receive a
|
||||||
|
// modification event first but the folder has been deleted and later
|
||||||
|
// receive the delete event
|
||||||
|
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
|
||||||
|
// mark is as delete event
|
||||||
|
event.Op |= Remove
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if event.Op&Rename == Rename || event.Op&Remove == Remove {
|
||||||
|
w.Remove(event.Name)
|
||||||
|
w.mu.Lock()
|
||||||
|
delete(w.fileExists, event.Name)
|
||||||
|
w.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
|
||||||
|
w.sendDirectoryChangeEvents(event.Name)
|
||||||
|
} else {
|
||||||
|
// Send the event on the Events channel
|
||||||
|
w.Events <- event
|
||||||
|
}
|
||||||
|
|
||||||
|
if event.Op&Remove == Remove {
|
||||||
|
// Look for a file that may have overwritten this.
|
||||||
|
// For example, mv f1 f2 will delete f2, then create f2.
|
||||||
|
fileDir, _ := filepath.Split(event.Name)
|
||||||
|
fileDir = filepath.Clean(fileDir)
|
||||||
|
w.mu.Lock()
|
||||||
|
_, found := w.watches[fileDir]
|
||||||
|
w.mu.Unlock()
|
||||||
|
if found {
|
||||||
|
// make sure the directory exists before we watch for changes. When we
|
||||||
|
// do a recursive watch and perform rm -fr, the parent directory might
|
||||||
|
// have gone missing, ignore the missing directory and let the
|
||||||
|
// upcoming delete event remove the watch from the parent directory.
|
||||||
|
if _, err := os.Lstat(fileDir); os.IsExist(err) {
|
||||||
|
w.sendDirectoryChangeEvents(fileDir)
|
||||||
|
// FIXME: should this be for events on files or just isDir?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move to next event
|
||||||
|
kevents = kevents[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newEvent returns an platform-independent Event based on kqueue Fflags.
|
||||||
|
func newEvent(name string, mask uint32) Event {
|
||||||
|
e := Event{Name: name}
|
||||||
|
if mask&syscall.NOTE_DELETE == syscall.NOTE_DELETE {
|
||||||
|
e.Op |= Remove
|
||||||
|
}
|
||||||
|
if mask&syscall.NOTE_WRITE == syscall.NOTE_WRITE {
|
||||||
|
e.Op |= Write
|
||||||
|
}
|
||||||
|
if mask&syscall.NOTE_RENAME == syscall.NOTE_RENAME {
|
||||||
|
e.Op |= Rename
|
||||||
|
}
|
||||||
|
if mask&syscall.NOTE_ATTRIB == syscall.NOTE_ATTRIB {
|
||||||
|
e.Op |= Chmod
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCreateEvent(name string) Event {
|
||||||
|
return Event{Name: name, Op: Create}
|
||||||
|
}
|
||||||
|
|
||||||
|
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
||||||
|
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
||||||
|
// Get all files
|
||||||
|
files, err := ioutil.ReadDir(dirPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fileInfo := range files {
|
||||||
|
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||||
|
if err := w.internalWatch(filePath, fileInfo); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
w.fileExists[filePath] = true
|
||||||
|
w.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendDirectoryEvents searches the directory for newly created files
|
||||||
|
// and sends them over the event channel. This functionality is to have
|
||||||
|
// the BSD version of fsnotify match Linux inotify which provides a
|
||||||
|
// create event for files created in a watched directory.
|
||||||
|
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
|
||||||
|
// Get all files
|
||||||
|
files, err := ioutil.ReadDir(dirPath)
|
||||||
|
if err != nil {
|
||||||
|
w.Errors <- err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search for new files
|
||||||
|
for _, fileInfo := range files {
|
||||||
|
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||||
|
w.mu.Lock()
|
||||||
|
_, doesExist := w.fileExists[filePath]
|
||||||
|
w.mu.Unlock()
|
||||||
|
if !doesExist {
|
||||||
|
// Send create event
|
||||||
|
w.Events <- newCreateEvent(filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// like watchDirectoryFiles (but without doing another ReadDir)
|
||||||
|
if err := w.internalWatch(filePath, fileInfo); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
w.fileExists[filePath] = true
|
||||||
|
w.mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) error {
|
||||||
|
if fileInfo.IsDir() {
|
||||||
|
// mimic Linux providing delete events for subdirectories
|
||||||
|
// but preserve the flags used if currently watching subdirectory
|
||||||
|
w.mu.Lock()
|
||||||
|
flags := w.dirFlags[name]
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
flags |= syscall.NOTE_DELETE
|
||||||
|
return w.addWatch(name, flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// watch file to mimic Linux inotify
|
||||||
|
return w.addWatch(name, noteAllEvents)
|
||||||
|
}
|
||||||
|
|
||||||
|
// kqueue creates a new kernel event queue and returns a descriptor.
|
||||||
|
func kqueue() (kq int, err error) {
|
||||||
|
kq, err = syscall.Kqueue()
|
||||||
|
if kq == -1 {
|
||||||
|
return kq, err
|
||||||
|
}
|
||||||
|
return kq, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// register events with the queue
|
||||||
|
func register(kq int, fds []int, flags int, fflags uint32) error {
|
||||||
|
changes := make([]syscall.Kevent_t, len(fds))
|
||||||
|
|
||||||
|
for i, fd := range fds {
|
||||||
|
// SetKevent converts int to the platform-specific types:
|
||||||
|
syscall.SetKevent(&changes[i], fd, syscall.EVFILT_VNODE, flags)
|
||||||
|
changes[i].Fflags = fflags
|
||||||
|
}
|
||||||
|
|
||||||
|
// register the events
|
||||||
|
success, err := syscall.Kevent(kq, changes, nil, nil)
|
||||||
|
if success == -1 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// read retrieves pending events, or waits until an event occurs.
|
||||||
|
// A timeout of nil blocks indefinitely, while 0 polls the queue.
|
||||||
|
func read(kq int, events []syscall.Kevent_t, timeout *syscall.Timespec) ([]syscall.Kevent_t, error) {
|
||||||
|
n, err := syscall.Kevent(kq, nil, events, timeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return events[0:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// durationToTimespec prepares a timeout value
|
||||||
|
func durationToTimespec(d time.Duration) syscall.Timespec {
|
||||||
|
return syscall.NsecToTimespec(d.Nanoseconds())
|
||||||
|
}
|
11
Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go
generated
vendored
Normal file
11
Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build freebsd openbsd netbsd dragonfly
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const openMode = syscall.O_NONBLOCK | syscall.O_RDONLY
|
12
Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go
generated
vendored
Normal file
12
Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build darwin
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
// note: this constant is not defined on BSD
|
||||||
|
const openMode = syscall.O_EVTONLY
|
561
Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go
generated
vendored
Normal file
561
Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go
generated
vendored
Normal file
|
@ -0,0 +1,561 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package fsnotify
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Watcher watches a set of files, delivering events to a channel.
|
||||||
|
type Watcher struct {
|
||||||
|
Events chan Event
|
||||||
|
Errors chan error
|
||||||
|
isClosed bool // Set to true when Close() is first called
|
||||||
|
mu sync.Mutex // Map access
|
||||||
|
port syscall.Handle // Handle to completion port
|
||||||
|
watches watchMap // Map of watches (key: i-number)
|
||||||
|
input chan *input // Inputs to the reader are sent on this channel
|
||||||
|
quit chan chan<- error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||||
|
func NewWatcher() (*Watcher, error) {
|
||||||
|
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
|
||||||
|
if e != nil {
|
||||||
|
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
|
||||||
|
}
|
||||||
|
w := &Watcher{
|
||||||
|
port: port,
|
||||||
|
watches: make(watchMap),
|
||||||
|
input: make(chan *input, 1),
|
||||||
|
Events: make(chan Event, 50),
|
||||||
|
Errors: make(chan error),
|
||||||
|
quit: make(chan chan<- error, 1),
|
||||||
|
}
|
||||||
|
go w.readEvents()
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes all watches and closes the events channel.
|
||||||
|
func (w *Watcher) Close() error {
|
||||||
|
if w.isClosed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
w.isClosed = true
|
||||||
|
|
||||||
|
// Send "quit" message to the reader goroutine
|
||||||
|
ch := make(chan error)
|
||||||
|
w.quit <- ch
|
||||||
|
if err := w.wakeupReader(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return <-ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add starts watching the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Add(name string) error {
|
||||||
|
if w.isClosed {
|
||||||
|
return errors.New("watcher already closed")
|
||||||
|
}
|
||||||
|
in := &input{
|
||||||
|
op: opAddWatch,
|
||||||
|
path: filepath.Clean(name),
|
||||||
|
flags: sys_FS_ALL_EVENTS,
|
||||||
|
reply: make(chan error),
|
||||||
|
}
|
||||||
|
w.input <- in
|
||||||
|
if err := w.wakeupReader(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return <-in.reply
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove stops watching the the named file or directory (non-recursively).
|
||||||
|
func (w *Watcher) Remove(name string) error {
|
||||||
|
in := &input{
|
||||||
|
op: opRemoveWatch,
|
||||||
|
path: filepath.Clean(name),
|
||||||
|
reply: make(chan error),
|
||||||
|
}
|
||||||
|
w.input <- in
|
||||||
|
if err := w.wakeupReader(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return <-in.reply
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Options for AddWatch
|
||||||
|
sys_FS_ONESHOT = 0x80000000
|
||||||
|
sys_FS_ONLYDIR = 0x1000000
|
||||||
|
|
||||||
|
// Events
|
||||||
|
sys_FS_ACCESS = 0x1
|
||||||
|
sys_FS_ALL_EVENTS = 0xfff
|
||||||
|
sys_FS_ATTRIB = 0x4
|
||||||
|
sys_FS_CLOSE = 0x18
|
||||||
|
sys_FS_CREATE = 0x100
|
||||||
|
sys_FS_DELETE = 0x200
|
||||||
|
sys_FS_DELETE_SELF = 0x400
|
||||||
|
sys_FS_MODIFY = 0x2
|
||||||
|
sys_FS_MOVE = 0xc0
|
||||||
|
sys_FS_MOVED_FROM = 0x40
|
||||||
|
sys_FS_MOVED_TO = 0x80
|
||||||
|
sys_FS_MOVE_SELF = 0x800
|
||||||
|
|
||||||
|
// Special events
|
||||||
|
sys_FS_IGNORED = 0x8000
|
||||||
|
sys_FS_Q_OVERFLOW = 0x4000
|
||||||
|
)
|
||||||
|
|
||||||
|
func newEvent(name string, mask uint32) Event {
|
||||||
|
e := Event{Name: name}
|
||||||
|
if mask&sys_FS_CREATE == sys_FS_CREATE || mask&sys_FS_MOVED_TO == sys_FS_MOVED_TO {
|
||||||
|
e.Op |= Create
|
||||||
|
}
|
||||||
|
if mask&sys_FS_DELETE == sys_FS_DELETE || mask&sys_FS_DELETE_SELF == sys_FS_DELETE_SELF {
|
||||||
|
e.Op |= Remove
|
||||||
|
}
|
||||||
|
if mask&sys_FS_MODIFY == sys_FS_MODIFY {
|
||||||
|
e.Op |= Write
|
||||||
|
}
|
||||||
|
if mask&sys_FS_MOVE == sys_FS_MOVE || mask&sys_FS_MOVE_SELF == sys_FS_MOVE_SELF || mask&sys_FS_MOVED_FROM == sys_FS_MOVED_FROM {
|
||||||
|
e.Op |= Rename
|
||||||
|
}
|
||||||
|
if mask&sys_FS_ATTRIB == sys_FS_ATTRIB {
|
||||||
|
e.Op |= Chmod
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
opAddWatch = iota
|
||||||
|
opRemoveWatch
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
provisional uint64 = 1 << (32 + iota)
|
||||||
|
)
|
||||||
|
|
||||||
|
type input struct {
|
||||||
|
op int
|
||||||
|
path string
|
||||||
|
flags uint32
|
||||||
|
reply chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
type inode struct {
|
||||||
|
handle syscall.Handle
|
||||||
|
volume uint32
|
||||||
|
index uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type watch struct {
|
||||||
|
ov syscall.Overlapped
|
||||||
|
ino *inode // i-number
|
||||||
|
path string // Directory path
|
||||||
|
mask uint64 // Directory itself is being watched with these notify flags
|
||||||
|
names map[string]uint64 // Map of names being watched and their notify flags
|
||||||
|
rename string // Remembers the old name while renaming a file
|
||||||
|
buf [4096]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type indexMap map[uint64]*watch
|
||||||
|
type watchMap map[uint32]indexMap
|
||||||
|
|
||||||
|
func (w *Watcher) wakeupReader() error {
|
||||||
|
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||||
|
if e != nil {
|
||||||
|
return os.NewSyscallError("PostQueuedCompletionStatus", e)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDir(pathname string) (dir string, err error) {
|
||||||
|
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
|
||||||
|
if e != nil {
|
||||||
|
return "", os.NewSyscallError("GetFileAttributes", e)
|
||||||
|
}
|
||||||
|
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||||
|
dir = pathname
|
||||||
|
} else {
|
||||||
|
dir, _ = filepath.Split(pathname)
|
||||||
|
dir = filepath.Clean(dir)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIno(path string) (ino *inode, err error) {
|
||||||
|
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
|
||||||
|
syscall.FILE_LIST_DIRECTORY,
|
||||||
|
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||||
|
nil, syscall.OPEN_EXISTING,
|
||||||
|
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
|
||||||
|
if e != nil {
|
||||||
|
return nil, os.NewSyscallError("CreateFile", e)
|
||||||
|
}
|
||||||
|
var fi syscall.ByHandleFileInformation
|
||||||
|
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
|
||||||
|
syscall.CloseHandle(h)
|
||||||
|
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
|
||||||
|
}
|
||||||
|
ino = &inode{
|
||||||
|
handle: h,
|
||||||
|
volume: fi.VolumeSerialNumber,
|
||||||
|
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
||||||
|
}
|
||||||
|
return ino, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (m watchMap) get(ino *inode) *watch {
|
||||||
|
if i := m[ino.volume]; i != nil {
|
||||||
|
return i[ino.index]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (m watchMap) set(ino *inode, watch *watch) {
|
||||||
|
i := m[ino.volume]
|
||||||
|
if i == nil {
|
||||||
|
i = make(indexMap)
|
||||||
|
m[ino.volume] = i
|
||||||
|
}
|
||||||
|
i[ino.index] = watch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||||
|
dir, err := getDir(pathname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if flags&sys_FS_ONLYDIR != 0 && pathname != dir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ino, err := getIno(dir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.mu.Lock()
|
||||||
|
watchEntry := w.watches.get(ino)
|
||||||
|
w.mu.Unlock()
|
||||||
|
if watchEntry == nil {
|
||||||
|
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
|
||||||
|
syscall.CloseHandle(ino.handle)
|
||||||
|
return os.NewSyscallError("CreateIoCompletionPort", e)
|
||||||
|
}
|
||||||
|
watchEntry = &watch{
|
||||||
|
ino: ino,
|
||||||
|
path: dir,
|
||||||
|
names: make(map[string]uint64),
|
||||||
|
}
|
||||||
|
w.mu.Lock()
|
||||||
|
w.watches.set(ino, watchEntry)
|
||||||
|
w.mu.Unlock()
|
||||||
|
flags |= provisional
|
||||||
|
} else {
|
||||||
|
syscall.CloseHandle(ino.handle)
|
||||||
|
}
|
||||||
|
if pathname == dir {
|
||||||
|
watchEntry.mask |= flags
|
||||||
|
} else {
|
||||||
|
watchEntry.names[filepath.Base(pathname)] |= flags
|
||||||
|
}
|
||||||
|
if err = w.startRead(watchEntry); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if pathname == dir {
|
||||||
|
watchEntry.mask &= ^provisional
|
||||||
|
} else {
|
||||||
|
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (w *Watcher) remWatch(pathname string) error {
|
||||||
|
dir, err := getDir(pathname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ino, err := getIno(dir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.mu.Lock()
|
||||||
|
watch := w.watches.get(ino)
|
||||||
|
w.mu.Unlock()
|
||||||
|
if watch == nil {
|
||||||
|
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
|
||||||
|
}
|
||||||
|
if pathname == dir {
|
||||||
|
w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
|
||||||
|
watch.mask = 0
|
||||||
|
} else {
|
||||||
|
name := filepath.Base(pathname)
|
||||||
|
w.sendEvent(watch.path+"\\"+name, watch.names[name]&sys_FS_IGNORED)
|
||||||
|
delete(watch.names, name)
|
||||||
|
}
|
||||||
|
return w.startRead(watch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (w *Watcher) deleteWatch(watch *watch) {
|
||||||
|
for name, mask := range watch.names {
|
||||||
|
if mask&provisional == 0 {
|
||||||
|
w.sendEvent(watch.path+"\\"+name, mask&sys_FS_IGNORED)
|
||||||
|
}
|
||||||
|
delete(watch.names, name)
|
||||||
|
}
|
||||||
|
if watch.mask != 0 {
|
||||||
|
if watch.mask&provisional == 0 {
|
||||||
|
w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
|
||||||
|
}
|
||||||
|
watch.mask = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must run within the I/O thread.
|
||||||
|
func (w *Watcher) startRead(watch *watch) error {
|
||||||
|
if e := syscall.CancelIo(watch.ino.handle); e != nil {
|
||||||
|
w.Errors <- os.NewSyscallError("CancelIo", e)
|
||||||
|
w.deleteWatch(watch)
|
||||||
|
}
|
||||||
|
mask := toWindowsFlags(watch.mask)
|
||||||
|
for _, m := range watch.names {
|
||||||
|
mask |= toWindowsFlags(m)
|
||||||
|
}
|
||||||
|
if mask == 0 {
|
||||||
|
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
|
||||||
|
w.Errors <- os.NewSyscallError("CloseHandle", e)
|
||||||
|
}
|
||||||
|
w.mu.Lock()
|
||||||
|
delete(w.watches[watch.ino.volume], watch.ino.index)
|
||||||
|
w.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
|
||||||
|
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
|
||||||
|
if e != nil {
|
||||||
|
err := os.NewSyscallError("ReadDirectoryChanges", e)
|
||||||
|
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||||
|
// Watched directory was probably removed
|
||||||
|
if w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) {
|
||||||
|
if watch.mask&sys_FS_ONESHOT != 0 {
|
||||||
|
watch.mask = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
w.deleteWatch(watch)
|
||||||
|
w.startRead(watch)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readEvents reads from the I/O completion port, converts the
|
||||||
|
// received events into Event objects and sends them via the Events channel.
|
||||||
|
// Entry point to the I/O thread.
|
||||||
|
func (w *Watcher) readEvents() {
|
||||||
|
var (
|
||||||
|
n, key uint32
|
||||||
|
ov *syscall.Overlapped
|
||||||
|
)
|
||||||
|
runtime.LockOSThread()
|
||||||
|
|
||||||
|
for {
|
||||||
|
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
|
||||||
|
watch := (*watch)(unsafe.Pointer(ov))
|
||||||
|
|
||||||
|
if watch == nil {
|
||||||
|
select {
|
||||||
|
case ch := <-w.quit:
|
||||||
|
w.mu.Lock()
|
||||||
|
var indexes []indexMap
|
||||||
|
for _, index := range w.watches {
|
||||||
|
indexes = append(indexes, index)
|
||||||
|
}
|
||||||
|
w.mu.Unlock()
|
||||||
|
for _, index := range indexes {
|
||||||
|
for _, watch := range index {
|
||||||
|
w.deleteWatch(watch)
|
||||||
|
w.startRead(watch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
if e := syscall.CloseHandle(w.port); e != nil {
|
||||||
|
err = os.NewSyscallError("CloseHandle", e)
|
||||||
|
}
|
||||||
|
close(w.Events)
|
||||||
|
close(w.Errors)
|
||||||
|
ch <- err
|
||||||
|
return
|
||||||
|
case in := <-w.input:
|
||||||
|
switch in.op {
|
||||||
|
case opAddWatch:
|
||||||
|
in.reply <- w.addWatch(in.path, uint64(in.flags))
|
||||||
|
case opRemoveWatch:
|
||||||
|
in.reply <- w.remWatch(in.path)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch e {
|
||||||
|
case syscall.ERROR_MORE_DATA:
|
||||||
|
if watch == nil {
|
||||||
|
w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
|
||||||
|
} else {
|
||||||
|
// The i/o succeeded but the buffer is full.
|
||||||
|
// In theory we should be building up a full packet.
|
||||||
|
// In practice we can get away with just carrying on.
|
||||||
|
n = uint32(unsafe.Sizeof(watch.buf))
|
||||||
|
}
|
||||||
|
case syscall.ERROR_ACCESS_DENIED:
|
||||||
|
// Watched directory was probably removed
|
||||||
|
w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF)
|
||||||
|
w.deleteWatch(watch)
|
||||||
|
w.startRead(watch)
|
||||||
|
continue
|
||||||
|
case syscall.ERROR_OPERATION_ABORTED:
|
||||||
|
// CancelIo was called on this handle
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
|
||||||
|
continue
|
||||||
|
case nil:
|
||||||
|
}
|
||||||
|
|
||||||
|
var offset uint32
|
||||||
|
for {
|
||||||
|
if n == 0 {
|
||||||
|
w.Events <- newEvent("", sys_FS_Q_OVERFLOW)
|
||||||
|
w.Errors <- errors.New("short read in readEvents()")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Point "raw" to the event in the buffer
|
||||||
|
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||||
|
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
|
||||||
|
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
|
||||||
|
fullname := watch.path + "\\" + name
|
||||||
|
|
||||||
|
var mask uint64
|
||||||
|
switch raw.Action {
|
||||||
|
case syscall.FILE_ACTION_REMOVED:
|
||||||
|
mask = sys_FS_DELETE_SELF
|
||||||
|
case syscall.FILE_ACTION_MODIFIED:
|
||||||
|
mask = sys_FS_MODIFY
|
||||||
|
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||||
|
watch.rename = name
|
||||||
|
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||||
|
if watch.names[watch.rename] != 0 {
|
||||||
|
watch.names[name] |= watch.names[watch.rename]
|
||||||
|
delete(watch.names, watch.rename)
|
||||||
|
mask = sys_FS_MOVE_SELF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sendNameEvent := func() {
|
||||||
|
if w.sendEvent(fullname, watch.names[name]&mask) {
|
||||||
|
if watch.names[name]&sys_FS_ONESHOT != 0 {
|
||||||
|
delete(watch.names, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||||
|
sendNameEvent()
|
||||||
|
}
|
||||||
|
if raw.Action == syscall.FILE_ACTION_REMOVED {
|
||||||
|
w.sendEvent(fullname, watch.names[name]&sys_FS_IGNORED)
|
||||||
|
delete(watch.names, name)
|
||||||
|
}
|
||||||
|
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
|
||||||
|
if watch.mask&sys_FS_ONESHOT != 0 {
|
||||||
|
watch.mask = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||||
|
fullname = watch.path + "\\" + watch.rename
|
||||||
|
sendNameEvent()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move to the next event in the buffer
|
||||||
|
if raw.NextEntryOffset == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
offset += raw.NextEntryOffset
|
||||||
|
|
||||||
|
// Error!
|
||||||
|
if offset >= n {
|
||||||
|
w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.startRead(watch); err != nil {
|
||||||
|
w.Errors <- err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||||
|
if mask == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
event := newEvent(name, uint32(mask))
|
||||||
|
select {
|
||||||
|
case ch := <-w.quit:
|
||||||
|
w.quit <- ch
|
||||||
|
case w.Events <- event:
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func toWindowsFlags(mask uint64) uint32 {
|
||||||
|
var m uint32
|
||||||
|
if mask&sys_FS_ACCESS != 0 {
|
||||||
|
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
|
||||||
|
}
|
||||||
|
if mask&sys_FS_MODIFY != 0 {
|
||||||
|
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||||
|
}
|
||||||
|
if mask&sys_FS_ATTRIB != 0 {
|
||||||
|
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
|
||||||
|
}
|
||||||
|
if mask&(sys_FS_MOVE|sys_FS_CREATE|sys_FS_DELETE) != 0 {
|
||||||
|
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func toFSnotifyFlags(action uint32) uint64 {
|
||||||
|
switch action {
|
||||||
|
case syscall.FILE_ACTION_ADDED:
|
||||||
|
return sys_FS_CREATE
|
||||||
|
case syscall.FILE_ACTION_REMOVED:
|
||||||
|
return sys_FS_DELETE
|
||||||
|
case syscall.FILE_ACTION_MODIFIED:
|
||||||
|
return sys_FS_MODIFY
|
||||||
|
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||||
|
return sys_FS_MOVED_FROM
|
||||||
|
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||||
|
return sys_FS_MOVED_TO
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
188
Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE
generated
vendored
Normal file
188
Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,188 @@
|
||||||
|
|
||||||
|
Copyright (c) 2011-2014 - Canonical Inc.
|
||||||
|
|
||||||
|
This software is licensed under the LGPLv3, included below.
|
||||||
|
|
||||||
|
As a special exception to the GNU Lesser General Public License version 3
|
||||||
|
("LGPL3"), the copyright holders of this Library give you permission to
|
||||||
|
convey to a third party a Combined Work that links statically or dynamically
|
||||||
|
to this Library without providing any Minimal Corresponding Source or
|
||||||
|
Minimal Application Code as set out in 4d or providing the installation
|
||||||
|
information set out in section 4e, provided that you comply with the other
|
||||||
|
provisions of LGPL3 and provided that you meet, for the Application the
|
||||||
|
terms and conditions of the license(s) which apply to the Application.
|
||||||
|
|
||||||
|
Except as stated in this special exception, the provisions of LGPL3 will
|
||||||
|
continue to comply in full to this Library. If you modify this Library, you
|
||||||
|
may apply this exception to your version of this Library, but you are not
|
||||||
|
obliged to do so. If you do not wish to do so, delete this exception
|
||||||
|
statement from your version. This exception does not (and cannot) modify any
|
||||||
|
license terms which apply to the Application, with which you must still
|
||||||
|
comply.
|
||||||
|
|
||||||
|
|
||||||
|
GNU LESSER GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 29 June 2007
|
||||||
|
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
|
||||||
|
This version of the GNU Lesser General Public License incorporates
|
||||||
|
the terms and conditions of version 3 of the GNU General Public
|
||||||
|
License, supplemented by the additional permissions listed below.
|
||||||
|
|
||||||
|
0. Additional Definitions.
|
||||||
|
|
||||||
|
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||||
|
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||||
|
General Public License.
|
||||||
|
|
||||||
|
"The Library" refers to a covered work governed by this License,
|
||||||
|
other than an Application or a Combined Work as defined below.
|
||||||
|
|
||||||
|
An "Application" is any work that makes use of an interface provided
|
||||||
|
by the Library, but which is not otherwise based on the Library.
|
||||||
|
Defining a subclass of a class defined by the Library is deemed a mode
|
||||||
|
of using an interface provided by the Library.
|
||||||
|
|
||||||
|
A "Combined Work" is a work produced by combining or linking an
|
||||||
|
Application with the Library. The particular version of the Library
|
||||||
|
with which the Combined Work was made is also called the "Linked
|
||||||
|
Version".
|
||||||
|
|
||||||
|
The "Minimal Corresponding Source" for a Combined Work means the
|
||||||
|
Corresponding Source for the Combined Work, excluding any source code
|
||||||
|
for portions of the Combined Work that, considered in isolation, are
|
||||||
|
based on the Application, and not on the Linked Version.
|
||||||
|
|
||||||
|
The "Corresponding Application Code" for a Combined Work means the
|
||||||
|
object code and/or source code for the Application, including any data
|
||||||
|
and utility programs needed for reproducing the Combined Work from the
|
||||||
|
Application, but excluding the System Libraries of the Combined Work.
|
||||||
|
|
||||||
|
1. Exception to Section 3 of the GNU GPL.
|
||||||
|
|
||||||
|
You may convey a covered work under sections 3 and 4 of this License
|
||||||
|
without being bound by section 3 of the GNU GPL.
|
||||||
|
|
||||||
|
2. Conveying Modified Versions.
|
||||||
|
|
||||||
|
If you modify a copy of the Library, and, in your modifications, a
|
||||||
|
facility refers to a function or data to be supplied by an Application
|
||||||
|
that uses the facility (other than as an argument passed when the
|
||||||
|
facility is invoked), then you may convey a copy of the modified
|
||||||
|
version:
|
||||||
|
|
||||||
|
a) under this License, provided that you make a good faith effort to
|
||||||
|
ensure that, in the event an Application does not supply the
|
||||||
|
function or data, the facility still operates, and performs
|
||||||
|
whatever part of its purpose remains meaningful, or
|
||||||
|
|
||||||
|
b) under the GNU GPL, with none of the additional permissions of
|
||||||
|
this License applicable to that copy.
|
||||||
|
|
||||||
|
3. Object Code Incorporating Material from Library Header Files.
|
||||||
|
|
||||||
|
The object code form of an Application may incorporate material from
|
||||||
|
a header file that is part of the Library. You may convey such object
|
||||||
|
code under terms of your choice, provided that, if the incorporated
|
||||||
|
material is not limited to numerical parameters, data structure
|
||||||
|
layouts and accessors, or small macros, inline functions and templates
|
||||||
|
(ten or fewer lines in length), you do both of the following:
|
||||||
|
|
||||||
|
a) Give prominent notice with each copy of the object code that the
|
||||||
|
Library is used in it and that the Library and its use are
|
||||||
|
covered by this License.
|
||||||
|
|
||||||
|
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||||
|
document.
|
||||||
|
|
||||||
|
4. Combined Works.
|
||||||
|
|
||||||
|
You may convey a Combined Work under terms of your choice that,
|
||||||
|
taken together, effectively do not restrict modification of the
|
||||||
|
portions of the Library contained in the Combined Work and reverse
|
||||||
|
engineering for debugging such modifications, if you also do each of
|
||||||
|
the following:
|
||||||
|
|
||||||
|
a) Give prominent notice with each copy of the Combined Work that
|
||||||
|
the Library is used in it and that the Library and its use are
|
||||||
|
covered by this License.
|
||||||
|
|
||||||
|
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||||
|
document.
|
||||||
|
|
||||||
|
c) For a Combined Work that displays copyright notices during
|
||||||
|
execution, include the copyright notice for the Library among
|
||||||
|
these notices, as well as a reference directing the user to the
|
||||||
|
copies of the GNU GPL and this license document.
|
||||||
|
|
||||||
|
d) Do one of the following:
|
||||||
|
|
||||||
|
0) Convey the Minimal Corresponding Source under the terms of this
|
||||||
|
License, and the Corresponding Application Code in a form
|
||||||
|
suitable for, and under terms that permit, the user to
|
||||||
|
recombine or relink the Application with a modified version of
|
||||||
|
the Linked Version to produce a modified Combined Work, in the
|
||||||
|
manner specified by section 6 of the GNU GPL for conveying
|
||||||
|
Corresponding Source.
|
||||||
|
|
||||||
|
1) Use a suitable shared library mechanism for linking with the
|
||||||
|
Library. A suitable mechanism is one that (a) uses at run time
|
||||||
|
a copy of the Library already present on the user's computer
|
||||||
|
system, and (b) will operate properly with a modified version
|
||||||
|
of the Library that is interface-compatible with the Linked
|
||||||
|
Version.
|
||||||
|
|
||||||
|
e) Provide Installation Information, but only if you would otherwise
|
||||||
|
be required to provide such information under section 6 of the
|
||||||
|
GNU GPL, and only to the extent that such information is
|
||||||
|
necessary to install and execute a modified version of the
|
||||||
|
Combined Work produced by recombining or relinking the
|
||||||
|
Application with a modified version of the Linked Version. (If
|
||||||
|
you use option 4d0, the Installation Information must accompany
|
||||||
|
the Minimal Corresponding Source and Corresponding Application
|
||||||
|
Code. If you use option 4d1, you must provide the Installation
|
||||||
|
Information in the manner specified by section 6 of the GNU GPL
|
||||||
|
for conveying Corresponding Source.)
|
||||||
|
|
||||||
|
5. Combined Libraries.
|
||||||
|
|
||||||
|
You may place library facilities that are a work based on the
|
||||||
|
Library side by side in a single library together with other library
|
||||||
|
facilities that are not Applications and are not covered by this
|
||||||
|
License, and convey such a combined library under terms of your
|
||||||
|
choice, if you do both of the following:
|
||||||
|
|
||||||
|
a) Accompany the combined library with a copy of the same work based
|
||||||
|
on the Library, uncombined with any other library facilities,
|
||||||
|
conveyed under the terms of this License.
|
||||||
|
|
||||||
|
b) Give prominent notice with the combined library that part of it
|
||||||
|
is a work based on the Library, and explaining where to find the
|
||||||
|
accompanying uncombined form of the same work.
|
||||||
|
|
||||||
|
6. Revised Versions of the GNU Lesser General Public License.
|
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions
|
||||||
|
of the GNU Lesser General Public License from time to time. Such new
|
||||||
|
versions will be similar in spirit to the present version, but may
|
||||||
|
differ in detail to address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the
|
||||||
|
Library as you received it specifies that a certain numbered version
|
||||||
|
of the GNU Lesser General Public License "or any later version"
|
||||||
|
applies to it, you have the option of following the terms and
|
||||||
|
conditions either of that published version or of any later version
|
||||||
|
published by the Free Software Foundation. If the Library as you
|
||||||
|
received it does not specify a version number of the GNU Lesser
|
||||||
|
General Public License, you may choose any version of the GNU Lesser
|
||||||
|
General Public License ever published by the Free Software Foundation.
|
||||||
|
|
||||||
|
If the Library as you received it specifies that a proxy can decide
|
||||||
|
whether future versions of the GNU Lesser General Public License shall
|
||||||
|
apply, that proxy's public statement of acceptance of any version is
|
||||||
|
permanent authorization for you to choose that version for the
|
||||||
|
Library.
|
31
Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml
generated
vendored
Normal file
31
Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
The following files were ported to Go from C files of libyaml, and thus
|
||||||
|
are still covered by their original copyright and license:
|
||||||
|
|
||||||
|
apic.go
|
||||||
|
emitterc.go
|
||||||
|
parserc.go
|
||||||
|
readerc.go
|
||||||
|
scannerc.go
|
||||||
|
writerc.go
|
||||||
|
yamlh.go
|
||||||
|
yamlprivateh.go
|
||||||
|
|
||||||
|
Copyright (c) 2006 Kirill Simonov
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||||
|
of the Software, and to permit persons to whom the Software is furnished to do
|
||||||
|
so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
128
Godeps/_workspace/src/gopkg.in/yaml.v2/README.md
generated
vendored
Normal file
128
Godeps/_workspace/src/gopkg.in/yaml.v2/README.md
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
||||||
|
# YAML support for the Go language
|
||||||
|
|
||||||
|
Introduction
|
||||||
|
------------
|
||||||
|
|
||||||
|
The yaml package enables Go programs to comfortably encode and decode YAML
|
||||||
|
values. It was developed within [Canonical](https://www.canonical.com) as
|
||||||
|
part of the [juju](https://juju.ubuntu.com) project, and is based on a
|
||||||
|
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
|
||||||
|
C library to parse and generate YAML data quickly and reliably.
|
||||||
|
|
||||||
|
Compatibility
|
||||||
|
-------------
|
||||||
|
|
||||||
|
The yaml package supports most of YAML 1.1 and 1.2, including support for
|
||||||
|
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
|
||||||
|
implemented, and base-60 floats from YAML 1.1 are purposefully not
|
||||||
|
supported since they're a poor design and are gone in YAML 1.2.
|
||||||
|
|
||||||
|
Installation and usage
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
The import path for the package is *gopkg.in/yaml.v2*.
|
||||||
|
|
||||||
|
To install it, run:
|
||||||
|
|
||||||
|
go get gopkg.in/yaml.v2
|
||||||
|
|
||||||
|
API documentation
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
If opened in a browser, the import path itself leads to the API documentation:
|
||||||
|
|
||||||
|
* [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
|
||||||
|
|
||||||
|
API stability
|
||||||
|
-------------
|
||||||
|
|
||||||
|
The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
|
||||||
|
|
||||||
|
|
||||||
|
License
|
||||||
|
-------
|
||||||
|
|
||||||
|
The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
|
||||||
|
|
||||||
|
|
||||||
|
Example
|
||||||
|
-------
|
||||||
|
|
||||||
|
```Go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var data = `
|
||||||
|
a: Easy!
|
||||||
|
b:
|
||||||
|
c: 2
|
||||||
|
d: [3, 4]
|
||||||
|
`
|
||||||
|
|
||||||
|
type T struct {
|
||||||
|
A string
|
||||||
|
B struct{C int; D []int ",flow"}
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
t := T{}
|
||||||
|
|
||||||
|
err := yaml.Unmarshal([]byte(data), &t)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("--- t:\n%v\n\n", t)
|
||||||
|
|
||||||
|
d, err := yaml.Marshal(&t)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("--- t dump:\n%s\n\n", string(d))
|
||||||
|
|
||||||
|
m := make(map[interface{}]interface{})
|
||||||
|
|
||||||
|
err = yaml.Unmarshal([]byte(data), &m)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("--- m:\n%v\n\n", m)
|
||||||
|
|
||||||
|
d, err = yaml.Marshal(&m)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("--- m dump:\n%s\n\n", string(d))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This example will generate the following output:
|
||||||
|
|
||||||
|
```
|
||||||
|
--- t:
|
||||||
|
{Easy! {2 [3 4]}}
|
||||||
|
|
||||||
|
--- t dump:
|
||||||
|
a: Easy!
|
||||||
|
b:
|
||||||
|
c: 2
|
||||||
|
d: [3, 4]
|
||||||
|
|
||||||
|
|
||||||
|
--- m:
|
||||||
|
map[a:Easy! b:map[c:2 d:[3 4]]]
|
||||||
|
|
||||||
|
--- m dump:
|
||||||
|
a: Easy!
|
||||||
|
b:
|
||||||
|
c: 2
|
||||||
|
d:
|
||||||
|
- 3
|
||||||
|
- 4
|
||||||
|
```
|
||||||
|
|
742
Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go
generated
vendored
Normal file
742
Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go
generated
vendored
Normal file
|
@ -0,0 +1,742 @@
|
||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
|
||||||
|
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
|
||||||
|
|
||||||
|
// Check if we can move the queue at the beginning of the buffer.
|
||||||
|
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
|
||||||
|
if parser.tokens_head != len(parser.tokens) {
|
||||||
|
copy(parser.tokens, parser.tokens[parser.tokens_head:])
|
||||||
|
}
|
||||||
|
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
|
||||||
|
parser.tokens_head = 0
|
||||||
|
}
|
||||||
|
parser.tokens = append(parser.tokens, *token)
|
||||||
|
if pos < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
|
||||||
|
parser.tokens[parser.tokens_head+pos] = *token
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new parser object.
|
||||||
|
func yaml_parser_initialize(parser *yaml_parser_t) bool {
|
||||||
|
*parser = yaml_parser_t{
|
||||||
|
raw_buffer: make([]byte, 0, input_raw_buffer_size),
|
||||||
|
buffer: make([]byte, 0, input_buffer_size),
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy a parser object.
|
||||||
|
func yaml_parser_delete(parser *yaml_parser_t) {
|
||||||
|
*parser = yaml_parser_t{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String read handler.
|
||||||
|
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||||
|
if parser.input_pos == len(parser.input) {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
n = copy(buffer, parser.input[parser.input_pos:])
|
||||||
|
parser.input_pos += n
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// File read handler.
|
||||||
|
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||||
|
return parser.input_file.Read(buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set a string input.
|
||||||
|
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
|
||||||
|
if parser.read_handler != nil {
|
||||||
|
panic("must set the input source only once")
|
||||||
|
}
|
||||||
|
parser.read_handler = yaml_string_read_handler
|
||||||
|
parser.input = input
|
||||||
|
parser.input_pos = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set a file input.
|
||||||
|
func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
|
||||||
|
if parser.read_handler != nil {
|
||||||
|
panic("must set the input source only once")
|
||||||
|
}
|
||||||
|
parser.read_handler = yaml_file_read_handler
|
||||||
|
parser.input_file = file
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the source encoding.
|
||||||
|
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
|
||||||
|
if parser.encoding != yaml_ANY_ENCODING {
|
||||||
|
panic("must set the encoding only once")
|
||||||
|
}
|
||||||
|
parser.encoding = encoding
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new emitter object.
|
||||||
|
func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
|
||||||
|
*emitter = yaml_emitter_t{
|
||||||
|
buffer: make([]byte, output_buffer_size),
|
||||||
|
raw_buffer: make([]byte, 0, output_raw_buffer_size),
|
||||||
|
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
|
||||||
|
events: make([]yaml_event_t, 0, initial_queue_size),
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy an emitter object.
|
||||||
|
func yaml_emitter_delete(emitter *yaml_emitter_t) {
|
||||||
|
*emitter = yaml_emitter_t{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String write handler.
|
||||||
|
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||||
|
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// File write handler.
|
||||||
|
func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||||
|
_, err := emitter.output_file.Write(buffer)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set a string output.
|
||||||
|
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
|
||||||
|
if emitter.write_handler != nil {
|
||||||
|
panic("must set the output target only once")
|
||||||
|
}
|
||||||
|
emitter.write_handler = yaml_string_write_handler
|
||||||
|
emitter.output_buffer = output_buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set a file output.
|
||||||
|
func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
|
||||||
|
if emitter.write_handler != nil {
|
||||||
|
panic("must set the output target only once")
|
||||||
|
}
|
||||||
|
emitter.write_handler = yaml_file_write_handler
|
||||||
|
emitter.output_file = file
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the output encoding.
|
||||||
|
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
|
||||||
|
if emitter.encoding != yaml_ANY_ENCODING {
|
||||||
|
panic("must set the output encoding only once")
|
||||||
|
}
|
||||||
|
emitter.encoding = encoding
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the canonical output style.
|
||||||
|
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
|
||||||
|
emitter.canonical = canonical
|
||||||
|
}
|
||||||
|
|
||||||
|
//// Set the indentation increment.
|
||||||
|
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
|
||||||
|
if indent < 2 || indent > 9 {
|
||||||
|
indent = 2
|
||||||
|
}
|
||||||
|
emitter.best_indent = indent
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the preferred line width.
|
||||||
|
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
|
||||||
|
if width < 0 {
|
||||||
|
width = -1
|
||||||
|
}
|
||||||
|
emitter.best_width = width
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set if unescaped non-ASCII characters are allowed.
|
||||||
|
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
|
||||||
|
emitter.unicode = unicode
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the preferred line break character.
|
||||||
|
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
|
||||||
|
emitter.line_break = line_break
|
||||||
|
}
|
||||||
|
|
||||||
|
///*
|
||||||
|
// * Destroy a token object.
|
||||||
|
// */
|
||||||
|
//
|
||||||
|
//YAML_DECLARE(void)
|
||||||
|
//yaml_token_delete(yaml_token_t *token)
|
||||||
|
//{
|
||||||
|
// assert(token); // Non-NULL token object expected.
|
||||||
|
//
|
||||||
|
// switch (token.type)
|
||||||
|
// {
|
||||||
|
// case YAML_TAG_DIRECTIVE_TOKEN:
|
||||||
|
// yaml_free(token.data.tag_directive.handle);
|
||||||
|
// yaml_free(token.data.tag_directive.prefix);
|
||||||
|
// break;
|
||||||
|
//
|
||||||
|
// case YAML_ALIAS_TOKEN:
|
||||||
|
// yaml_free(token.data.alias.value);
|
||||||
|
// break;
|
||||||
|
//
|
||||||
|
// case YAML_ANCHOR_TOKEN:
|
||||||
|
// yaml_free(token.data.anchor.value);
|
||||||
|
// break;
|
||||||
|
//
|
||||||
|
// case YAML_TAG_TOKEN:
|
||||||
|
// yaml_free(token.data.tag.handle);
|
||||||
|
// yaml_free(token.data.tag.suffix);
|
||||||
|
// break;
|
||||||
|
//
|
||||||
|
// case YAML_SCALAR_TOKEN:
|
||||||
|
// yaml_free(token.data.scalar.value);
|
||||||
|
// break;
|
||||||
|
//
|
||||||
|
// default:
|
||||||
|
// break;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// memset(token, 0, sizeof(yaml_token_t));
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
///*
|
||||||
|
// * Check if a string is a valid UTF-8 sequence.
|
||||||
|
// *
|
||||||
|
// * Check 'reader.c' for more details on UTF-8 encoding.
|
||||||
|
// */
|
||||||
|
//
|
||||||
|
//static int
|
||||||
|
//yaml_check_utf8(yaml_char_t *start, size_t length)
|
||||||
|
//{
|
||||||
|
// yaml_char_t *end = start+length;
|
||||||
|
// yaml_char_t *pointer = start;
|
||||||
|
//
|
||||||
|
// while (pointer < end) {
|
||||||
|
// unsigned char octet;
|
||||||
|
// unsigned int width;
|
||||||
|
// unsigned int value;
|
||||||
|
// size_t k;
|
||||||
|
//
|
||||||
|
// octet = pointer[0];
|
||||||
|
// width = (octet & 0x80) == 0x00 ? 1 :
|
||||||
|
// (octet & 0xE0) == 0xC0 ? 2 :
|
||||||
|
// (octet & 0xF0) == 0xE0 ? 3 :
|
||||||
|
// (octet & 0xF8) == 0xF0 ? 4 : 0;
|
||||||
|
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
|
||||||
|
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
|
||||||
|
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
|
||||||
|
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
|
||||||
|
// if (!width) return 0;
|
||||||
|
// if (pointer+width > end) return 0;
|
||||||
|
// for (k = 1; k < width; k ++) {
|
||||||
|
// octet = pointer[k];
|
||||||
|
// if ((octet & 0xC0) != 0x80) return 0;
|
||||||
|
// value = (value << 6) + (octet & 0x3F);
|
||||||
|
// }
|
||||||
|
// if (!((width == 1) ||
|
||||||
|
// (width == 2 && value >= 0x80) ||
|
||||||
|
// (width == 3 && value >= 0x800) ||
|
||||||
|
// (width == 4 && value >= 0x10000))) return 0;
|
||||||
|
//
|
||||||
|
// pointer += width;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// return 1;
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
|
||||||
|
// Create STREAM-START.
|
||||||
|
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
|
||||||
|
*event = yaml_event_t{
|
||||||
|
typ: yaml_STREAM_START_EVENT,
|
||||||
|
encoding: encoding,
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create STREAM-END.
|
||||||
|
func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
|
||||||
|
*event = yaml_event_t{
|
||||||
|
typ: yaml_STREAM_END_EVENT,
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create DOCUMENT-START.
|
||||||
|
func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
|
||||||
|
tag_directives []yaml_tag_directive_t, implicit bool) bool {
|
||||||
|
*event = yaml_event_t{
|
||||||
|
typ: yaml_DOCUMENT_START_EVENT,
|
||||||
|
version_directive: version_directive,
|
||||||
|
tag_directives: tag_directives,
|
||||||
|
implicit: implicit,
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create DOCUMENT-END.
|
||||||
|
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
|
||||||
|
*event = yaml_event_t{
|
||||||
|
typ: yaml_DOCUMENT_END_EVENT,
|
||||||
|
implicit: implicit,
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
///*
|
||||||
|
// * Create ALIAS.
|
||||||
|
// */
|
||||||
|
//
|
||||||
|
//YAML_DECLARE(int)
|
||||||
|
//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
|
||||||
|
//{
|
||||||
|
// mark yaml_mark_t = { 0, 0, 0 }
|
||||||
|
// anchor_copy *yaml_char_t = NULL
|
||||||
|
//
|
||||||
|
// assert(event) // Non-NULL event object is expected.
|
||||||
|
// assert(anchor) // Non-NULL anchor is expected.
|
||||||
|
//
|
||||||
|
// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
|
||||||
|
//
|
||||||
|
// anchor_copy = yaml_strdup(anchor)
|
||||||
|
// if (!anchor_copy)
|
||||||
|
// return 0
|
||||||
|
//
|
||||||
|
// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
|
||||||
|
//
|
||||||
|
// return 1
|
||||||
|
//}
|
||||||
|
|
||||||
|
// Create SCALAR.
|
||||||
|
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
|
||||||
|
*event = yaml_event_t{
|
||||||
|
typ: yaml_SCALAR_EVENT,
|
||||||
|
anchor: anchor,
|
||||||
|
tag: tag,
|
||||||
|
value: value,
|
||||||
|
implicit: plain_implicit,
|
||||||
|
quoted_implicit: quoted_implicit,
|
||||||
|
style: yaml_style_t(style),
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create SEQUENCE-START.
|
||||||
|
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
|
||||||
|
*event = yaml_event_t{
|
||||||
|
typ: yaml_SEQUENCE_START_EVENT,
|
||||||
|
anchor: anchor,
|
||||||
|
tag: tag,
|
||||||
|
implicit: implicit,
|
||||||
|
style: yaml_style_t(style),
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create SEQUENCE-END.
|
||||||
|
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
|
||||||
|
*event = yaml_event_t{
|
||||||
|
typ: yaml_SEQUENCE_END_EVENT,
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create MAPPING-START.
|
||||||
|
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
|
||||||
|
*event = yaml_event_t{
|
||||||
|
typ: yaml_MAPPING_START_EVENT,
|
||||||
|
anchor: anchor,
|
||||||
|
tag: tag,
|
||||||
|
implicit: implicit,
|
||||||
|
style: yaml_style_t(style),
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create MAPPING-END.
|
||||||
|
func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
|
||||||
|
*event = yaml_event_t{
|
||||||
|
typ: yaml_MAPPING_END_EVENT,
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy an event object.
|
||||||
|
func yaml_event_delete(event *yaml_event_t) {
|
||||||
|
*event = yaml_event_t{}
|
||||||
|
}
|
||||||
|
|
||||||
|
///*
|
||||||
|
// * Create a document object.
|
||||||
|
// */
|
||||||
|
//
|
||||||
|
//YAML_DECLARE(int)
|
||||||
|
//yaml_document_initialize(document *yaml_document_t,
|
||||||
|
// version_directive *yaml_version_directive_t,
|
||||||
|
// tag_directives_start *yaml_tag_directive_t,
|
||||||
|
// tag_directives_end *yaml_tag_directive_t,
|
||||||
|
// start_implicit int, end_implicit int)
|
||||||
|
//{
|
||||||
|
// struct {
|
||||||
|
// error yaml_error_type_t
|
||||||
|
// } context
|
||||||
|
// struct {
|
||||||
|
// start *yaml_node_t
|
||||||
|
// end *yaml_node_t
|
||||||
|
// top *yaml_node_t
|
||||||
|
// } nodes = { NULL, NULL, NULL }
|
||||||
|
// version_directive_copy *yaml_version_directive_t = NULL
|
||||||
|
// struct {
|
||||||
|
// start *yaml_tag_directive_t
|
||||||
|
// end *yaml_tag_directive_t
|
||||||
|
// top *yaml_tag_directive_t
|
||||||
|
// } tag_directives_copy = { NULL, NULL, NULL }
|
||||||
|
// value yaml_tag_directive_t = { NULL, NULL }
|
||||||
|
// mark yaml_mark_t = { 0, 0, 0 }
|
||||||
|
//
|
||||||
|
// assert(document) // Non-NULL document object is expected.
|
||||||
|
// assert((tag_directives_start && tag_directives_end) ||
|
||||||
|
// (tag_directives_start == tag_directives_end))
|
||||||
|
// // Valid tag directives are expected.
|
||||||
|
//
|
||||||
|
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
|
||||||
|
//
|
||||||
|
// if (version_directive) {
|
||||||
|
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
|
||||||
|
// if (!version_directive_copy) goto error
|
||||||
|
// version_directive_copy.major = version_directive.major
|
||||||
|
// version_directive_copy.minor = version_directive.minor
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// if (tag_directives_start != tag_directives_end) {
|
||||||
|
// tag_directive *yaml_tag_directive_t
|
||||||
|
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
|
||||||
|
// goto error
|
||||||
|
// for (tag_directive = tag_directives_start
|
||||||
|
// tag_directive != tag_directives_end; tag_directive ++) {
|
||||||
|
// assert(tag_directive.handle)
|
||||||
|
// assert(tag_directive.prefix)
|
||||||
|
// if (!yaml_check_utf8(tag_directive.handle,
|
||||||
|
// strlen((char *)tag_directive.handle)))
|
||||||
|
// goto error
|
||||||
|
// if (!yaml_check_utf8(tag_directive.prefix,
|
||||||
|
// strlen((char *)tag_directive.prefix)))
|
||||||
|
// goto error
|
||||||
|
// value.handle = yaml_strdup(tag_directive.handle)
|
||||||
|
// value.prefix = yaml_strdup(tag_directive.prefix)
|
||||||
|
// if (!value.handle || !value.prefix) goto error
|
||||||
|
// if (!PUSH(&context, tag_directives_copy, value))
|
||||||
|
// goto error
|
||||||
|
// value.handle = NULL
|
||||||
|
// value.prefix = NULL
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
|
||||||
|
// tag_directives_copy.start, tag_directives_copy.top,
|
||||||
|
// start_implicit, end_implicit, mark, mark)
|
||||||
|
//
|
||||||
|
// return 1
|
||||||
|
//
|
||||||
|
//error:
|
||||||
|
// STACK_DEL(&context, nodes)
|
||||||
|
// yaml_free(version_directive_copy)
|
||||||
|
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
|
||||||
|
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
|
||||||
|
// yaml_free(value.handle)
|
||||||
|
// yaml_free(value.prefix)
|
||||||
|
// }
|
||||||
|
// STACK_DEL(&context, tag_directives_copy)
|
||||||
|
// yaml_free(value.handle)
|
||||||
|
// yaml_free(value.prefix)
|
||||||
|
//
|
||||||
|
// return 0
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
///*
|
||||||
|
// * Destroy a document object.
|
||||||
|
// */
|
||||||
|
//
|
||||||
|
//YAML_DECLARE(void)
|
||||||
|
//yaml_document_delete(document *yaml_document_t)
|
||||||
|
//{
|
||||||
|
// struct {
|
||||||
|
// error yaml_error_type_t
|
||||||
|
// } context
|
||||||
|
// tag_directive *yaml_tag_directive_t
|
||||||
|
//
|
||||||
|
// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
|
||||||
|
//
|
||||||
|
// assert(document) // Non-NULL document object is expected.
|
||||||
|
//
|
||||||
|
// while (!STACK_EMPTY(&context, document.nodes)) {
|
||||||
|
// node yaml_node_t = POP(&context, document.nodes)
|
||||||
|
// yaml_free(node.tag)
|
||||||
|
// switch (node.type) {
|
||||||
|
// case YAML_SCALAR_NODE:
|
||||||
|
// yaml_free(node.data.scalar.value)
|
||||||
|
// break
|
||||||
|
// case YAML_SEQUENCE_NODE:
|
||||||
|
// STACK_DEL(&context, node.data.sequence.items)
|
||||||
|
// break
|
||||||
|
// case YAML_MAPPING_NODE:
|
||||||
|
// STACK_DEL(&context, node.data.mapping.pairs)
|
||||||
|
// break
|
||||||
|
// default:
|
||||||
|
// assert(0) // Should not happen.
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// STACK_DEL(&context, document.nodes)
|
||||||
|
//
|
||||||
|
// yaml_free(document.version_directive)
|
||||||
|
// for (tag_directive = document.tag_directives.start
|
||||||
|
// tag_directive != document.tag_directives.end
|
||||||
|
// tag_directive++) {
|
||||||
|
// yaml_free(tag_directive.handle)
|
||||||
|
// yaml_free(tag_directive.prefix)
|
||||||
|
// }
|
||||||
|
// yaml_free(document.tag_directives.start)
|
||||||
|
//
|
||||||
|
// memset(document, 0, sizeof(yaml_document_t))
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
///**
|
||||||
|
// * Get a document node.
|
||||||
|
// */
|
||||||
|
//
|
||||||
|
//YAML_DECLARE(yaml_node_t *)
|
||||||
|
//yaml_document_get_node(document *yaml_document_t, index int)
|
||||||
|
//{
|
||||||
|
// assert(document) // Non-NULL document object is expected.
|
||||||
|
//
|
||||||
|
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
|
||||||
|
// return document.nodes.start + index - 1
|
||||||
|
// }
|
||||||
|
// return NULL
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
///**
|
||||||
|
// * Get the root object.
|
||||||
|
// */
|
||||||
|
//
|
||||||
|
//YAML_DECLARE(yaml_node_t *)
|
||||||
|
//yaml_document_get_root_node(document *yaml_document_t)
|
||||||
|
//{
|
||||||
|
// assert(document) // Non-NULL document object is expected.
|
||||||
|
//
|
||||||
|
// if (document.nodes.top != document.nodes.start) {
|
||||||
|
// return document.nodes.start
|
||||||
|
// }
|
||||||
|
// return NULL
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
///*
|
||||||
|
// * Add a scalar node to a document.
|
||||||
|
// */
|
||||||
|
//
|
||||||
|
//YAML_DECLARE(int)
|
||||||
|
//yaml_document_add_scalar(document *yaml_document_t,
|
||||||
|
// tag *yaml_char_t, value *yaml_char_t, length int,
|
||||||
|
// style yaml_scalar_style_t)
|
||||||
|
//{
|
||||||
|
// struct {
|
||||||
|
// error yaml_error_type_t
|
||||||
|
// } context
|
||||||
|
// mark yaml_mark_t = { 0, 0, 0 }
|
||||||
|
// tag_copy *yaml_char_t = NULL
|
||||||
|
// value_copy *yaml_char_t = NULL
|
||||||
|
// node yaml_node_t
|
||||||
|
//
|
||||||
|
// assert(document) // Non-NULL document object is expected.
|
||||||
|
// assert(value) // Non-NULL value is expected.
|
||||||
|
//
|
||||||
|
// if (!tag) {
|
||||||
|
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||||
|
// tag_copy = yaml_strdup(tag)
|
||||||
|
// if (!tag_copy) goto error
|
||||||
|
//
|
||||||
|
// if (length < 0) {
|
||||||
|
// length = strlen((char *)value)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// if (!yaml_check_utf8(value, length)) goto error
|
||||||
|
// value_copy = yaml_malloc(length+1)
|
||||||
|
// if (!value_copy) goto error
|
||||||
|
// memcpy(value_copy, value, length)
|
||||||
|
// value_copy[length] = '\0'
|
||||||
|
//
|
||||||
|
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
|
||||||
|
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||||
|
//
|
||||||
|
// return document.nodes.top - document.nodes.start
|
||||||
|
//
|
||||||
|
//error:
|
||||||
|
// yaml_free(tag_copy)
|
||||||
|
// yaml_free(value_copy)
|
||||||
|
//
|
||||||
|
// return 0
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
///*
|
||||||
|
// * Add a sequence node to a document.
|
||||||
|
// */
|
||||||
|
//
|
||||||
|
//YAML_DECLARE(int)
|
||||||
|
//yaml_document_add_sequence(document *yaml_document_t,
|
||||||
|
// tag *yaml_char_t, style yaml_sequence_style_t)
|
||||||
|
//{
|
||||||
|
// struct {
|
||||||
|
// error yaml_error_type_t
|
||||||
|
// } context
|
||||||
|
// mark yaml_mark_t = { 0, 0, 0 }
|
||||||
|
// tag_copy *yaml_char_t = NULL
|
||||||
|
// struct {
|
||||||
|
// start *yaml_node_item_t
|
||||||
|
// end *yaml_node_item_t
|
||||||
|
// top *yaml_node_item_t
|
||||||
|
// } items = { NULL, NULL, NULL }
|
||||||
|
// node yaml_node_t
|
||||||
|
//
|
||||||
|
// assert(document) // Non-NULL document object is expected.
|
||||||
|
//
|
||||||
|
// if (!tag) {
|
||||||
|
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||||
|
// tag_copy = yaml_strdup(tag)
|
||||||
|
// if (!tag_copy) goto error
|
||||||
|
//
|
||||||
|
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
|
||||||
|
//
|
||||||
|
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
|
||||||
|
// style, mark, mark)
|
||||||
|
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||||
|
//
|
||||||
|
// return document.nodes.top - document.nodes.start
|
||||||
|
//
|
||||||
|
//error:
|
||||||
|
// STACK_DEL(&context, items)
|
||||||
|
// yaml_free(tag_copy)
|
||||||
|
//
|
||||||
|
// return 0
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
///*
|
||||||
|
// * Add a mapping node to a document.
|
||||||
|
// */
|
||||||
|
//
|
||||||
|
//YAML_DECLARE(int)
|
||||||
|
//yaml_document_add_mapping(document *yaml_document_t,
|
||||||
|
// tag *yaml_char_t, style yaml_mapping_style_t)
|
||||||
|
//{
|
||||||
|
// struct {
|
||||||
|
// error yaml_error_type_t
|
||||||
|
// } context
|
||||||
|
// mark yaml_mark_t = { 0, 0, 0 }
|
||||||
|
// tag_copy *yaml_char_t = NULL
|
||||||
|
// struct {
|
||||||
|
// start *yaml_node_pair_t
|
||||||
|
// end *yaml_node_pair_t
|
||||||
|
// top *yaml_node_pair_t
|
||||||
|
// } pairs = { NULL, NULL, NULL }
|
||||||
|
// node yaml_node_t
|
||||||
|
//
|
||||||
|
// assert(document) // Non-NULL document object is expected.
|
||||||
|
//
|
||||||
|
// if (!tag) {
|
||||||
|
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||||
|
// tag_copy = yaml_strdup(tag)
|
||||||
|
// if (!tag_copy) goto error
|
||||||
|
//
|
||||||
|
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
|
||||||
|
//
|
||||||
|
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
|
||||||
|
// style, mark, mark)
|
||||||
|
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||||
|
//
|
||||||
|
// return document.nodes.top - document.nodes.start
|
||||||
|
//
|
||||||
|
//error:
|
||||||
|
// STACK_DEL(&context, pairs)
|
||||||
|
// yaml_free(tag_copy)
|
||||||
|
//
|
||||||
|
// return 0
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
///*
|
||||||
|
// * Append an item to a sequence node.
|
||||||
|
// */
|
||||||
|
//
|
||||||
|
//YAML_DECLARE(int)
|
||||||
|
//yaml_document_append_sequence_item(document *yaml_document_t,
|
||||||
|
// sequence int, item int)
|
||||||
|
//{
|
||||||
|
// struct {
|
||||||
|
// error yaml_error_type_t
|
||||||
|
// } context
|
||||||
|
//
|
||||||
|
// assert(document) // Non-NULL document is required.
|
||||||
|
// assert(sequence > 0
|
||||||
|
// && document.nodes.start + sequence <= document.nodes.top)
|
||||||
|
// // Valid sequence id is required.
|
||||||
|
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
|
||||||
|
// // A sequence node is required.
|
||||||
|
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
|
||||||
|
// // Valid item id is required.
|
||||||
|
//
|
||||||
|
// if (!PUSH(&context,
|
||||||
|
// document.nodes.start[sequence-1].data.sequence.items, item))
|
||||||
|
// return 0
|
||||||
|
//
|
||||||
|
// return 1
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
///*
|
||||||
|
// * Append a pair of a key and a value to a mapping node.
|
||||||
|
// */
|
||||||
|
//
|
||||||
|
//YAML_DECLARE(int)
|
||||||
|
//yaml_document_append_mapping_pair(document *yaml_document_t,
|
||||||
|
// mapping int, key int, value int)
|
||||||
|
//{
|
||||||
|
// struct {
|
||||||
|
// error yaml_error_type_t
|
||||||
|
// } context
|
||||||
|
//
|
||||||
|
// pair yaml_node_pair_t
|
||||||
|
//
|
||||||
|
// assert(document) // Non-NULL document is required.
|
||||||
|
// assert(mapping > 0
|
||||||
|
// && document.nodes.start + mapping <= document.nodes.top)
|
||||||
|
// // Valid mapping id is required.
|
||||||
|
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
|
||||||
|
// // A mapping node is required.
|
||||||
|
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
|
||||||
|
// // Valid key id is required.
|
||||||
|
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
|
||||||
|
// // Valid value id is required.
|
||||||
|
//
|
||||||
|
// pair.key = key
|
||||||
|
// pair.value = value
|
||||||
|
//
|
||||||
|
// if (!PUSH(&context,
|
||||||
|
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
|
||||||
|
// return 0
|
||||||
|
//
|
||||||
|
// return 1
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
//
|
683
Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go
generated
vendored
Normal file
683
Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go
generated
vendored
Normal file
|
@ -0,0 +1,683 @@
|
||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
documentNode = 1 << iota
|
||||||
|
mappingNode
|
||||||
|
sequenceNode
|
||||||
|
scalarNode
|
||||||
|
aliasNode
|
||||||
|
)
|
||||||
|
|
||||||
|
type node struct {
|
||||||
|
kind int
|
||||||
|
line, column int
|
||||||
|
tag string
|
||||||
|
value string
|
||||||
|
implicit bool
|
||||||
|
children []*node
|
||||||
|
anchors map[string]*node
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// Parser, produces a node tree out of a libyaml event stream.
|
||||||
|
|
||||||
|
type parser struct {
|
||||||
|
parser yaml_parser_t
|
||||||
|
event yaml_event_t
|
||||||
|
doc *node
|
||||||
|
}
|
||||||
|
|
||||||
|
func newParser(b []byte) *parser {
|
||||||
|
p := parser{}
|
||||||
|
if !yaml_parser_initialize(&p.parser) {
|
||||||
|
panic("failed to initialize YAML emitter")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(b) == 0 {
|
||||||
|
b = []byte{'\n'}
|
||||||
|
}
|
||||||
|
|
||||||
|
yaml_parser_set_input_string(&p.parser, b)
|
||||||
|
|
||||||
|
p.skip()
|
||||||
|
if p.event.typ != yaml_STREAM_START_EVENT {
|
||||||
|
panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
|
||||||
|
}
|
||||||
|
p.skip()
|
||||||
|
return &p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) destroy() {
|
||||||
|
if p.event.typ != yaml_NO_EVENT {
|
||||||
|
yaml_event_delete(&p.event)
|
||||||
|
}
|
||||||
|
yaml_parser_delete(&p.parser)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) skip() {
|
||||||
|
if p.event.typ != yaml_NO_EVENT {
|
||||||
|
if p.event.typ == yaml_STREAM_END_EVENT {
|
||||||
|
failf("attempted to go past the end of stream; corrupted value?")
|
||||||
|
}
|
||||||
|
yaml_event_delete(&p.event)
|
||||||
|
}
|
||||||
|
if !yaml_parser_parse(&p.parser, &p.event) {
|
||||||
|
p.fail()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) fail() {
|
||||||
|
var where string
|
||||||
|
var line int
|
||||||
|
if p.parser.problem_mark.line != 0 {
|
||||||
|
line = p.parser.problem_mark.line
|
||||||
|
} else if p.parser.context_mark.line != 0 {
|
||||||
|
line = p.parser.context_mark.line
|
||||||
|
}
|
||||||
|
if line != 0 {
|
||||||
|
where = "line " + strconv.Itoa(line) + ": "
|
||||||
|
}
|
||||||
|
var msg string
|
||||||
|
if len(p.parser.problem) > 0 {
|
||||||
|
msg = p.parser.problem
|
||||||
|
} else {
|
||||||
|
msg = "unknown problem parsing YAML content"
|
||||||
|
}
|
||||||
|
failf("%s%s", where, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) anchor(n *node, anchor []byte) {
|
||||||
|
if anchor != nil {
|
||||||
|
p.doc.anchors[string(anchor)] = n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) parse() *node {
|
||||||
|
switch p.event.typ {
|
||||||
|
case yaml_SCALAR_EVENT:
|
||||||
|
return p.scalar()
|
||||||
|
case yaml_ALIAS_EVENT:
|
||||||
|
return p.alias()
|
||||||
|
case yaml_MAPPING_START_EVENT:
|
||||||
|
return p.mapping()
|
||||||
|
case yaml_SEQUENCE_START_EVENT:
|
||||||
|
return p.sequence()
|
||||||
|
case yaml_DOCUMENT_START_EVENT:
|
||||||
|
return p.document()
|
||||||
|
case yaml_STREAM_END_EVENT:
|
||||||
|
// Happens when attempting to decode an empty buffer.
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) node(kind int) *node {
|
||||||
|
return &node{
|
||||||
|
kind: kind,
|
||||||
|
line: p.event.start_mark.line,
|
||||||
|
column: p.event.start_mark.column,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) document() *node {
|
||||||
|
n := p.node(documentNode)
|
||||||
|
n.anchors = make(map[string]*node)
|
||||||
|
p.doc = n
|
||||||
|
p.skip()
|
||||||
|
n.children = append(n.children, p.parse())
|
||||||
|
if p.event.typ != yaml_DOCUMENT_END_EVENT {
|
||||||
|
panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
|
||||||
|
}
|
||||||
|
p.skip()
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) alias() *node {
|
||||||
|
n := p.node(aliasNode)
|
||||||
|
n.value = string(p.event.anchor)
|
||||||
|
p.skip()
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) scalar() *node {
|
||||||
|
n := p.node(scalarNode)
|
||||||
|
n.value = string(p.event.value)
|
||||||
|
n.tag = string(p.event.tag)
|
||||||
|
n.implicit = p.event.implicit
|
||||||
|
p.anchor(n, p.event.anchor)
|
||||||
|
p.skip()
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) sequence() *node {
|
||||||
|
n := p.node(sequenceNode)
|
||||||
|
p.anchor(n, p.event.anchor)
|
||||||
|
p.skip()
|
||||||
|
for p.event.typ != yaml_SEQUENCE_END_EVENT {
|
||||||
|
n.children = append(n.children, p.parse())
|
||||||
|
}
|
||||||
|
p.skip()
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) mapping() *node {
|
||||||
|
n := p.node(mappingNode)
|
||||||
|
p.anchor(n, p.event.anchor)
|
||||||
|
p.skip()
|
||||||
|
for p.event.typ != yaml_MAPPING_END_EVENT {
|
||||||
|
n.children = append(n.children, p.parse(), p.parse())
|
||||||
|
}
|
||||||
|
p.skip()
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// Decoder, unmarshals a node into a provided value.
|
||||||
|
|
||||||
|
type decoder struct {
|
||||||
|
doc *node
|
||||||
|
aliases map[string]bool
|
||||||
|
mapType reflect.Type
|
||||||
|
terrors []string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
mapItemType = reflect.TypeOf(MapItem{})
|
||||||
|
durationType = reflect.TypeOf(time.Duration(0))
|
||||||
|
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
|
||||||
|
ifaceType = defaultMapType.Elem()
|
||||||
|
)
|
||||||
|
|
||||||
|
func newDecoder() *decoder {
|
||||||
|
d := &decoder{mapType: defaultMapType}
|
||||||
|
d.aliases = make(map[string]bool)
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) terror(n *node, tag string, out reflect.Value) {
|
||||||
|
if n.tag != "" {
|
||||||
|
tag = n.tag
|
||||||
|
}
|
||||||
|
value := n.value
|
||||||
|
if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
|
||||||
|
if len(value) > 10 {
|
||||||
|
value = " `" + value[:7] + "...`"
|
||||||
|
} else {
|
||||||
|
value = " `" + value + "`"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
|
||||||
|
terrlen := len(d.terrors)
|
||||||
|
err := u.UnmarshalYAML(func(v interface{}) (err error) {
|
||||||
|
defer handleErr(&err)
|
||||||
|
d.unmarshal(n, reflect.ValueOf(v))
|
||||||
|
if len(d.terrors) > terrlen {
|
||||||
|
issues := d.terrors[terrlen:]
|
||||||
|
d.terrors = d.terrors[:terrlen]
|
||||||
|
return &TypeError{issues}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if e, ok := err.(*TypeError); ok {
|
||||||
|
d.terrors = append(d.terrors, e.Errors...)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fail(err)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
|
||||||
|
// if a value is found to implement it.
|
||||||
|
// It returns the initialized and dereferenced out value, whether
|
||||||
|
// unmarshalling was already done by UnmarshalYAML, and if so whether
|
||||||
|
// its types unmarshalled appropriately.
|
||||||
|
//
|
||||||
|
// If n holds a null value, prepare returns before doing anything.
|
||||||
|
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
|
||||||
|
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
|
||||||
|
return out, false, false
|
||||||
|
}
|
||||||
|
again := true
|
||||||
|
for again {
|
||||||
|
again = false
|
||||||
|
if out.Kind() == reflect.Ptr {
|
||||||
|
if out.IsNil() {
|
||||||
|
out.Set(reflect.New(out.Type().Elem()))
|
||||||
|
}
|
||||||
|
out = out.Elem()
|
||||||
|
again = true
|
||||||
|
}
|
||||||
|
if out.CanAddr() {
|
||||||
|
if u, ok := out.Addr().Interface().(Unmarshaler); ok {
|
||||||
|
good = d.callUnmarshaler(n, u)
|
||||||
|
return out, true, good
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out, false, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
|
||||||
|
switch n.kind {
|
||||||
|
case documentNode:
|
||||||
|
return d.document(n, out)
|
||||||
|
case aliasNode:
|
||||||
|
return d.alias(n, out)
|
||||||
|
}
|
||||||
|
out, unmarshaled, good := d.prepare(n, out)
|
||||||
|
if unmarshaled {
|
||||||
|
return good
|
||||||
|
}
|
||||||
|
switch n.kind {
|
||||||
|
case scalarNode:
|
||||||
|
good = d.scalar(n, out)
|
||||||
|
case mappingNode:
|
||||||
|
good = d.mapping(n, out)
|
||||||
|
case sequenceNode:
|
||||||
|
good = d.sequence(n, out)
|
||||||
|
default:
|
||||||
|
panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
|
||||||
|
}
|
||||||
|
return good
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) document(n *node, out reflect.Value) (good bool) {
|
||||||
|
if len(n.children) == 1 {
|
||||||
|
d.doc = n
|
||||||
|
d.unmarshal(n.children[0], out)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
|
||||||
|
an, ok := d.doc.anchors[n.value]
|
||||||
|
if !ok {
|
||||||
|
failf("unknown anchor '%s' referenced", n.value)
|
||||||
|
}
|
||||||
|
if d.aliases[n.value] {
|
||||||
|
failf("anchor '%s' value contains itself", n.value)
|
||||||
|
}
|
||||||
|
d.aliases[n.value] = true
|
||||||
|
good = d.unmarshal(an, out)
|
||||||
|
delete(d.aliases, n.value)
|
||||||
|
return good
|
||||||
|
}
|
||||||
|
|
||||||
|
var zeroValue reflect.Value
|
||||||
|
|
||||||
|
func resetMap(out reflect.Value) {
|
||||||
|
for _, k := range out.MapKeys() {
|
||||||
|
out.SetMapIndex(k, zeroValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
||||||
|
var tag string
|
||||||
|
var resolved interface{}
|
||||||
|
if n.tag == "" && !n.implicit {
|
||||||
|
tag = yaml_STR_TAG
|
||||||
|
resolved = n.value
|
||||||
|
} else {
|
||||||
|
tag, resolved = resolve(n.tag, n.value)
|
||||||
|
if tag == yaml_BINARY_TAG {
|
||||||
|
data, err := base64.StdEncoding.DecodeString(resolved.(string))
|
||||||
|
if err != nil {
|
||||||
|
failf("!!binary value contains invalid base64 data")
|
||||||
|
}
|
||||||
|
resolved = string(data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if resolved == nil {
|
||||||
|
if out.Kind() == reflect.Map && !out.CanAddr() {
|
||||||
|
resetMap(out)
|
||||||
|
} else {
|
||||||
|
out.Set(reflect.Zero(out.Type()))
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if s, ok := resolved.(string); ok && out.CanAddr() {
|
||||||
|
if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
|
||||||
|
err := u.UnmarshalText([]byte(s))
|
||||||
|
if err != nil {
|
||||||
|
fail(err)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch out.Kind() {
|
||||||
|
case reflect.String:
|
||||||
|
if tag == yaml_BINARY_TAG {
|
||||||
|
out.SetString(resolved.(string))
|
||||||
|
good = true
|
||||||
|
} else if resolved != nil {
|
||||||
|
out.SetString(n.value)
|
||||||
|
good = true
|
||||||
|
}
|
||||||
|
case reflect.Interface:
|
||||||
|
if resolved == nil {
|
||||||
|
out.Set(reflect.Zero(out.Type()))
|
||||||
|
} else {
|
||||||
|
out.Set(reflect.ValueOf(resolved))
|
||||||
|
}
|
||||||
|
good = true
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
switch resolved := resolved.(type) {
|
||||||
|
case int:
|
||||||
|
if !out.OverflowInt(int64(resolved)) {
|
||||||
|
out.SetInt(int64(resolved))
|
||||||
|
good = true
|
||||||
|
}
|
||||||
|
case int64:
|
||||||
|
if !out.OverflowInt(resolved) {
|
||||||
|
out.SetInt(resolved)
|
||||||
|
good = true
|
||||||
|
}
|
||||||
|
case uint64:
|
||||||
|
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
||||||
|
out.SetInt(int64(resolved))
|
||||||
|
good = true
|
||||||
|
}
|
||||||
|
case float64:
|
||||||
|
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
||||||
|
out.SetInt(int64(resolved))
|
||||||
|
good = true
|
||||||
|
}
|
||||||
|
case string:
|
||||||
|
if out.Type() == durationType {
|
||||||
|
d, err := time.ParseDuration(resolved)
|
||||||
|
if err == nil {
|
||||||
|
out.SetInt(int64(d))
|
||||||
|
good = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
switch resolved := resolved.(type) {
|
||||||
|
case int:
|
||||||
|
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
||||||
|
out.SetUint(uint64(resolved))
|
||||||
|
good = true
|
||||||
|
}
|
||||||
|
case int64:
|
||||||
|
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
||||||
|
out.SetUint(uint64(resolved))
|
||||||
|
good = true
|
||||||
|
}
|
||||||
|
case uint64:
|
||||||
|
if !out.OverflowUint(uint64(resolved)) {
|
||||||
|
out.SetUint(uint64(resolved))
|
||||||
|
good = true
|
||||||
|
}
|
||||||
|
case float64:
|
||||||
|
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
|
||||||
|
out.SetUint(uint64(resolved))
|
||||||
|
good = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Bool:
|
||||||
|
switch resolved := resolved.(type) {
|
||||||
|
case bool:
|
||||||
|
out.SetBool(resolved)
|
||||||
|
good = true
|
||||||
|
}
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
switch resolved := resolved.(type) {
|
||||||
|
case int:
|
||||||
|
out.SetFloat(float64(resolved))
|
||||||
|
good = true
|
||||||
|
case int64:
|
||||||
|
out.SetFloat(float64(resolved))
|
||||||
|
good = true
|
||||||
|
case uint64:
|
||||||
|
out.SetFloat(float64(resolved))
|
||||||
|
good = true
|
||||||
|
case float64:
|
||||||
|
out.SetFloat(resolved)
|
||||||
|
good = true
|
||||||
|
}
|
||||||
|
case reflect.Ptr:
|
||||||
|
if out.Type().Elem() == reflect.TypeOf(resolved) {
|
||||||
|
// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
|
||||||
|
elem := reflect.New(out.Type().Elem())
|
||||||
|
elem.Elem().Set(reflect.ValueOf(resolved))
|
||||||
|
out.Set(elem)
|
||||||
|
good = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !good {
|
||||||
|
d.terror(n, tag, out)
|
||||||
|
}
|
||||||
|
return good
|
||||||
|
}
|
||||||
|
|
||||||
|
func settableValueOf(i interface{}) reflect.Value {
|
||||||
|
v := reflect.ValueOf(i)
|
||||||
|
sv := reflect.New(v.Type()).Elem()
|
||||||
|
sv.Set(v)
|
||||||
|
return sv
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
|
||||||
|
l := len(n.children)
|
||||||
|
|
||||||
|
var iface reflect.Value
|
||||||
|
switch out.Kind() {
|
||||||
|
case reflect.Slice:
|
||||||
|
out.Set(reflect.MakeSlice(out.Type(), l, l))
|
||||||
|
case reflect.Interface:
|
||||||
|
// No type hints. Will have to use a generic sequence.
|
||||||
|
iface = out
|
||||||
|
out = settableValueOf(make([]interface{}, l))
|
||||||
|
default:
|
||||||
|
d.terror(n, yaml_SEQ_TAG, out)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
et := out.Type().Elem()
|
||||||
|
|
||||||
|
j := 0
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
e := reflect.New(et).Elem()
|
||||||
|
if ok := d.unmarshal(n.children[i], e); ok {
|
||||||
|
out.Index(j).Set(e)
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out.Set(out.Slice(0, j))
|
||||||
|
if iface.IsValid() {
|
||||||
|
iface.Set(out)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
||||||
|
switch out.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
return d.mappingStruct(n, out)
|
||||||
|
case reflect.Slice:
|
||||||
|
return d.mappingSlice(n, out)
|
||||||
|
case reflect.Map:
|
||||||
|
// okay
|
||||||
|
case reflect.Interface:
|
||||||
|
if d.mapType.Kind() == reflect.Map {
|
||||||
|
iface := out
|
||||||
|
out = reflect.MakeMap(d.mapType)
|
||||||
|
iface.Set(out)
|
||||||
|
} else {
|
||||||
|
slicev := reflect.New(d.mapType).Elem()
|
||||||
|
if !d.mappingSlice(n, slicev) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
out.Set(slicev)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
d.terror(n, yaml_MAP_TAG, out)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
outt := out.Type()
|
||||||
|
kt := outt.Key()
|
||||||
|
et := outt.Elem()
|
||||||
|
|
||||||
|
mapType := d.mapType
|
||||||
|
if outt.Key() == ifaceType && outt.Elem() == ifaceType {
|
||||||
|
d.mapType = outt
|
||||||
|
}
|
||||||
|
|
||||||
|
if out.IsNil() {
|
||||||
|
out.Set(reflect.MakeMap(outt))
|
||||||
|
}
|
||||||
|
l := len(n.children)
|
||||||
|
for i := 0; i < l; i += 2 {
|
||||||
|
if isMerge(n.children[i]) {
|
||||||
|
d.merge(n.children[i+1], out)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
k := reflect.New(kt).Elem()
|
||||||
|
if d.unmarshal(n.children[i], k) {
|
||||||
|
kkind := k.Kind()
|
||||||
|
if kkind == reflect.Interface {
|
||||||
|
kkind = k.Elem().Kind()
|
||||||
|
}
|
||||||
|
if kkind == reflect.Map || kkind == reflect.Slice {
|
||||||
|
failf("invalid map key: %#v", k.Interface())
|
||||||
|
}
|
||||||
|
e := reflect.New(et).Elem()
|
||||||
|
if d.unmarshal(n.children[i+1], e) {
|
||||||
|
out.SetMapIndex(k, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.mapType = mapType
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
|
||||||
|
outt := out.Type()
|
||||||
|
if outt.Elem() != mapItemType {
|
||||||
|
d.terror(n, yaml_MAP_TAG, out)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
mapType := d.mapType
|
||||||
|
d.mapType = outt
|
||||||
|
|
||||||
|
var slice []MapItem
|
||||||
|
var l = len(n.children)
|
||||||
|
for i := 0; i < l; i += 2 {
|
||||||
|
if isMerge(n.children[i]) {
|
||||||
|
d.merge(n.children[i+1], out)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
item := MapItem{}
|
||||||
|
k := reflect.ValueOf(&item.Key).Elem()
|
||||||
|
if d.unmarshal(n.children[i], k) {
|
||||||
|
v := reflect.ValueOf(&item.Value).Elem()
|
||||||
|
if d.unmarshal(n.children[i+1], v) {
|
||||||
|
slice = append(slice, item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out.Set(reflect.ValueOf(slice))
|
||||||
|
d.mapType = mapType
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
||||||
|
sinfo, err := getStructInfo(out.Type())
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
name := settableValueOf("")
|
||||||
|
l := len(n.children)
|
||||||
|
|
||||||
|
var inlineMap reflect.Value
|
||||||
|
var elemType reflect.Type
|
||||||
|
if sinfo.InlineMap != -1 {
|
||||||
|
inlineMap = out.Field(sinfo.InlineMap)
|
||||||
|
inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
|
||||||
|
elemType = inlineMap.Type().Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < l; i += 2 {
|
||||||
|
ni := n.children[i]
|
||||||
|
if isMerge(ni) {
|
||||||
|
d.merge(n.children[i+1], out)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !d.unmarshal(ni, name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if info, ok := sinfo.FieldsMap[name.String()]; ok {
|
||||||
|
var field reflect.Value
|
||||||
|
if info.Inline == nil {
|
||||||
|
field = out.Field(info.Num)
|
||||||
|
} else {
|
||||||
|
field = out.FieldByIndex(info.Inline)
|
||||||
|
}
|
||||||
|
d.unmarshal(n.children[i+1], field)
|
||||||
|
} else if sinfo.InlineMap != -1 {
|
||||||
|
if inlineMap.IsNil() {
|
||||||
|
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
|
||||||
|
}
|
||||||
|
value := reflect.New(elemType).Elem()
|
||||||
|
d.unmarshal(n.children[i+1], value)
|
||||||
|
inlineMap.SetMapIndex(name, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func failWantMap() {
|
||||||
|
failf("map merge requires map or sequence of maps as the value")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) merge(n *node, out reflect.Value) {
|
||||||
|
switch n.kind {
|
||||||
|
case mappingNode:
|
||||||
|
d.unmarshal(n, out)
|
||||||
|
case aliasNode:
|
||||||
|
an, ok := d.doc.anchors[n.value]
|
||||||
|
if ok && an.kind != mappingNode {
|
||||||
|
failWantMap()
|
||||||
|
}
|
||||||
|
d.unmarshal(n, out)
|
||||||
|
case sequenceNode:
|
||||||
|
// Step backwards as earlier nodes take precedence.
|
||||||
|
for i := len(n.children) - 1; i >= 0; i-- {
|
||||||
|
ni := n.children[i]
|
||||||
|
if ni.kind == aliasNode {
|
||||||
|
an, ok := d.doc.anchors[ni.value]
|
||||||
|
if ok && an.kind != mappingNode {
|
||||||
|
failWantMap()
|
||||||
|
}
|
||||||
|
} else if ni.kind != mappingNode {
|
||||||
|
failWantMap()
|
||||||
|
}
|
||||||
|
d.unmarshal(ni, out)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
failWantMap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isMerge(n *node) bool {
|
||||||
|
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
|
||||||
|
}
|
966
Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go
generated
vendored
Normal file
966
Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go
generated
vendored
Normal file
|
@ -0,0 +1,966 @@
|
||||||
|
package yaml_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
. "gopkg.in/check.v1"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
"math"
|
||||||
|
"net"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var unmarshalIntTest = 123
|
||||||
|
|
||||||
|
var unmarshalTests = []struct {
|
||||||
|
data string
|
||||||
|
value interface{}
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"",
|
||||||
|
&struct{}{},
|
||||||
|
}, {
|
||||||
|
"{}", &struct{}{},
|
||||||
|
}, {
|
||||||
|
"v: hi",
|
||||||
|
map[string]string{"v": "hi"},
|
||||||
|
}, {
|
||||||
|
"v: hi", map[string]interface{}{"v": "hi"},
|
||||||
|
}, {
|
||||||
|
"v: true",
|
||||||
|
map[string]string{"v": "true"},
|
||||||
|
}, {
|
||||||
|
"v: true",
|
||||||
|
map[string]interface{}{"v": true},
|
||||||
|
}, {
|
||||||
|
"v: 10",
|
||||||
|
map[string]interface{}{"v": 10},
|
||||||
|
}, {
|
||||||
|
"v: 0b10",
|
||||||
|
map[string]interface{}{"v": 2},
|
||||||
|
}, {
|
||||||
|
"v: 0xA",
|
||||||
|
map[string]interface{}{"v": 10},
|
||||||
|
}, {
|
||||||
|
"v: 4294967296",
|
||||||
|
map[string]int64{"v": 4294967296},
|
||||||
|
}, {
|
||||||
|
"v: 0.1",
|
||||||
|
map[string]interface{}{"v": 0.1},
|
||||||
|
}, {
|
||||||
|
"v: .1",
|
||||||
|
map[string]interface{}{"v": 0.1},
|
||||||
|
}, {
|
||||||
|
"v: .Inf",
|
||||||
|
map[string]interface{}{"v": math.Inf(+1)},
|
||||||
|
}, {
|
||||||
|
"v: -.Inf",
|
||||||
|
map[string]interface{}{"v": math.Inf(-1)},
|
||||||
|
}, {
|
||||||
|
"v: -10",
|
||||||
|
map[string]interface{}{"v": -10},
|
||||||
|
}, {
|
||||||
|
"v: -.1",
|
||||||
|
map[string]interface{}{"v": -0.1},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Simple values.
|
||||||
|
{
|
||||||
|
"123",
|
||||||
|
&unmarshalIntTest,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Floats from spec
|
||||||
|
{
|
||||||
|
"canonical: 6.8523e+5",
|
||||||
|
map[string]interface{}{"canonical": 6.8523e+5},
|
||||||
|
}, {
|
||||||
|
"expo: 685.230_15e+03",
|
||||||
|
map[string]interface{}{"expo": 685.23015e+03},
|
||||||
|
}, {
|
||||||
|
"fixed: 685_230.15",
|
||||||
|
map[string]interface{}{"fixed": 685230.15},
|
||||||
|
}, {
|
||||||
|
"neginf: -.inf",
|
||||||
|
map[string]interface{}{"neginf": math.Inf(-1)},
|
||||||
|
}, {
|
||||||
|
"fixed: 685_230.15",
|
||||||
|
map[string]float64{"fixed": 685230.15},
|
||||||
|
},
|
||||||
|
//{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported
|
||||||
|
//{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails.
|
||||||
|
|
||||||
|
// Bools from spec
|
||||||
|
{
|
||||||
|
"canonical: y",
|
||||||
|
map[string]interface{}{"canonical": true},
|
||||||
|
}, {
|
||||||
|
"answer: NO",
|
||||||
|
map[string]interface{}{"answer": false},
|
||||||
|
}, {
|
||||||
|
"logical: True",
|
||||||
|
map[string]interface{}{"logical": true},
|
||||||
|
}, {
|
||||||
|
"option: on",
|
||||||
|
map[string]interface{}{"option": true},
|
||||||
|
}, {
|
||||||
|
"option: on",
|
||||||
|
map[string]bool{"option": true},
|
||||||
|
},
|
||||||
|
// Ints from spec
|
||||||
|
{
|
||||||
|
"canonical: 685230",
|
||||||
|
map[string]interface{}{"canonical": 685230},
|
||||||
|
}, {
|
||||||
|
"decimal: +685_230",
|
||||||
|
map[string]interface{}{"decimal": 685230},
|
||||||
|
}, {
|
||||||
|
"octal: 02472256",
|
||||||
|
map[string]interface{}{"octal": 685230},
|
||||||
|
}, {
|
||||||
|
"hexa: 0x_0A_74_AE",
|
||||||
|
map[string]interface{}{"hexa": 685230},
|
||||||
|
}, {
|
||||||
|
"bin: 0b1010_0111_0100_1010_1110",
|
||||||
|
map[string]interface{}{"bin": 685230},
|
||||||
|
}, {
|
||||||
|
"bin: -0b101010",
|
||||||
|
map[string]interface{}{"bin": -42},
|
||||||
|
}, {
|
||||||
|
"decimal: +685_230",
|
||||||
|
map[string]int{"decimal": 685230},
|
||||||
|
},
|
||||||
|
|
||||||
|
//{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported
|
||||||
|
|
||||||
|
// Nulls from spec
|
||||||
|
{
|
||||||
|
"empty:",
|
||||||
|
map[string]interface{}{"empty": nil},
|
||||||
|
}, {
|
||||||
|
"canonical: ~",
|
||||||
|
map[string]interface{}{"canonical": nil},
|
||||||
|
}, {
|
||||||
|
"english: null",
|
||||||
|
map[string]interface{}{"english": nil},
|
||||||
|
}, {
|
||||||
|
"~: null key",
|
||||||
|
map[interface{}]string{nil: "null key"},
|
||||||
|
}, {
|
||||||
|
"empty:",
|
||||||
|
map[string]*bool{"empty": nil},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Flow sequence
|
||||||
|
{
|
||||||
|
"seq: [A,B]",
|
||||||
|
map[string]interface{}{"seq": []interface{}{"A", "B"}},
|
||||||
|
}, {
|
||||||
|
"seq: [A,B,C,]",
|
||||||
|
map[string][]string{"seq": []string{"A", "B", "C"}},
|
||||||
|
}, {
|
||||||
|
"seq: [A,1,C]",
|
||||||
|
map[string][]string{"seq": []string{"A", "1", "C"}},
|
||||||
|
}, {
|
||||||
|
"seq: [A,1,C]",
|
||||||
|
map[string][]int{"seq": []int{1}},
|
||||||
|
}, {
|
||||||
|
"seq: [A,1,C]",
|
||||||
|
map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
|
||||||
|
},
|
||||||
|
// Block sequence
|
||||||
|
{
|
||||||
|
"seq:\n - A\n - B",
|
||||||
|
map[string]interface{}{"seq": []interface{}{"A", "B"}},
|
||||||
|
}, {
|
||||||
|
"seq:\n - A\n - B\n - C",
|
||||||
|
map[string][]string{"seq": []string{"A", "B", "C"}},
|
||||||
|
}, {
|
||||||
|
"seq:\n - A\n - 1\n - C",
|
||||||
|
map[string][]string{"seq": []string{"A", "1", "C"}},
|
||||||
|
}, {
|
||||||
|
"seq:\n - A\n - 1\n - C",
|
||||||
|
map[string][]int{"seq": []int{1}},
|
||||||
|
}, {
|
||||||
|
"seq:\n - A\n - 1\n - C",
|
||||||
|
map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Literal block scalar
|
||||||
|
{
|
||||||
|
"scalar: | # Comment\n\n literal\n\n \ttext\n\n",
|
||||||
|
map[string]string{"scalar": "\nliteral\n\n\ttext\n"},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Folded block scalar
|
||||||
|
{
|
||||||
|
"scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n",
|
||||||
|
map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Map inside interface with no type hints.
|
||||||
|
{
|
||||||
|
"a: {b: c}",
|
||||||
|
map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Structs and type conversions.
|
||||||
|
{
|
||||||
|
"hello: world",
|
||||||
|
&struct{ Hello string }{"world"},
|
||||||
|
}, {
|
||||||
|
"a: {b: c}",
|
||||||
|
&struct{ A struct{ B string } }{struct{ B string }{"c"}},
|
||||||
|
}, {
|
||||||
|
"a: {b: c}",
|
||||||
|
&struct{ A *struct{ B string } }{&struct{ B string }{"c"}},
|
||||||
|
}, {
|
||||||
|
"a: {b: c}",
|
||||||
|
&struct{ A map[string]string }{map[string]string{"b": "c"}},
|
||||||
|
}, {
|
||||||
|
"a: {b: c}",
|
||||||
|
&struct{ A *map[string]string }{&map[string]string{"b": "c"}},
|
||||||
|
}, {
|
||||||
|
"a:",
|
||||||
|
&struct{ A map[string]string }{},
|
||||||
|
}, {
|
||||||
|
"a: 1",
|
||||||
|
&struct{ A int }{1},
|
||||||
|
}, {
|
||||||
|
"a: 1",
|
||||||
|
&struct{ A float64 }{1},
|
||||||
|
}, {
|
||||||
|
"a: 1.0",
|
||||||
|
&struct{ A int }{1},
|
||||||
|
}, {
|
||||||
|
"a: 1.0",
|
||||||
|
&struct{ A uint }{1},
|
||||||
|
}, {
|
||||||
|
"a: [1, 2]",
|
||||||
|
&struct{ A []int }{[]int{1, 2}},
|
||||||
|
}, {
|
||||||
|
"a: 1",
|
||||||
|
&struct{ B int }{0},
|
||||||
|
}, {
|
||||||
|
"a: 1",
|
||||||
|
&struct {
|
||||||
|
B int "a"
|
||||||
|
}{1},
|
||||||
|
}, {
|
||||||
|
"a: y",
|
||||||
|
&struct{ A bool }{true},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Some cross type conversions
|
||||||
|
{
|
||||||
|
"v: 42",
|
||||||
|
map[string]uint{"v": 42},
|
||||||
|
}, {
|
||||||
|
"v: -42",
|
||||||
|
map[string]uint{},
|
||||||
|
}, {
|
||||||
|
"v: 4294967296",
|
||||||
|
map[string]uint64{"v": 4294967296},
|
||||||
|
}, {
|
||||||
|
"v: -4294967296",
|
||||||
|
map[string]uint64{},
|
||||||
|
},
|
||||||
|
|
||||||
|
// int
|
||||||
|
{
|
||||||
|
"int_max: 2147483647",
|
||||||
|
map[string]int{"int_max": math.MaxInt32},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"int_min: -2147483648",
|
||||||
|
map[string]int{"int_min": math.MinInt32},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"int_overflow: 9223372036854775808", // math.MaxInt64 + 1
|
||||||
|
map[string]int{},
|
||||||
|
},
|
||||||
|
|
||||||
|
// int64
|
||||||
|
{
|
||||||
|
"int64_max: 9223372036854775807",
|
||||||
|
map[string]int64{"int64_max": math.MaxInt64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111",
|
||||||
|
map[string]int64{"int64_max_base2": math.MaxInt64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"int64_min: -9223372036854775808",
|
||||||
|
map[string]int64{"int64_min": math.MinInt64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111",
|
||||||
|
map[string]int64{"int64_neg_base2": -math.MaxInt64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"int64_overflow: 9223372036854775808", // math.MaxInt64 + 1
|
||||||
|
map[string]int64{},
|
||||||
|
},
|
||||||
|
|
||||||
|
// uint
|
||||||
|
{
|
||||||
|
"uint_min: 0",
|
||||||
|
map[string]uint{"uint_min": 0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"uint_max: 4294967295",
|
||||||
|
map[string]uint{"uint_max": math.MaxUint32},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"uint_underflow: -1",
|
||||||
|
map[string]uint{},
|
||||||
|
},
|
||||||
|
|
||||||
|
// uint64
|
||||||
|
{
|
||||||
|
"uint64_min: 0",
|
||||||
|
map[string]uint{"uint64_min": 0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"uint64_max: 18446744073709551615",
|
||||||
|
map[string]uint64{"uint64_max": math.MaxUint64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111",
|
||||||
|
map[string]uint64{"uint64_max_base2": math.MaxUint64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"uint64_maxint64: 9223372036854775807",
|
||||||
|
map[string]uint64{"uint64_maxint64": math.MaxInt64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"uint64_underflow: -1",
|
||||||
|
map[string]uint64{},
|
||||||
|
},
|
||||||
|
|
||||||
|
// float32
|
||||||
|
{
|
||||||
|
"float32_max: 3.40282346638528859811704183484516925440e+38",
|
||||||
|
map[string]float32{"float32_max": math.MaxFloat32},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"float32_nonzero: 1.401298464324817070923729583289916131280e-45",
|
||||||
|
map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"float32_maxuint64: 18446744073709551615",
|
||||||
|
map[string]float32{"float32_maxuint64": float32(math.MaxUint64)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"float32_maxuint64+1: 18446744073709551616",
|
||||||
|
map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)},
|
||||||
|
},
|
||||||
|
|
||||||
|
// float64
|
||||||
|
{
|
||||||
|
"float64_max: 1.797693134862315708145274237317043567981e+308",
|
||||||
|
map[string]float64{"float64_max": math.MaxFloat64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"float64_nonzero: 4.940656458412465441765687928682213723651e-324",
|
||||||
|
map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"float64_maxuint64: 18446744073709551615",
|
||||||
|
map[string]float64{"float64_maxuint64": float64(math.MaxUint64)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"float64_maxuint64+1: 18446744073709551616",
|
||||||
|
map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Overflow cases.
|
||||||
|
{
|
||||||
|
"v: 4294967297",
|
||||||
|
map[string]int32{},
|
||||||
|
}, {
|
||||||
|
"v: 128",
|
||||||
|
map[string]int8{},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Quoted values.
|
||||||
|
{
|
||||||
|
"'1': '\"2\"'",
|
||||||
|
map[interface{}]interface{}{"1": "\"2\""},
|
||||||
|
}, {
|
||||||
|
"v:\n- A\n- 'B\n\n C'\n",
|
||||||
|
map[string][]string{"v": []string{"A", "B\nC"}},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Explicit tags.
|
||||||
|
{
|
||||||
|
"v: !!float '1.1'",
|
||||||
|
map[string]interface{}{"v": 1.1},
|
||||||
|
}, {
|
||||||
|
"v: !!null ''",
|
||||||
|
map[string]interface{}{"v": nil},
|
||||||
|
}, {
|
||||||
|
"%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'",
|
||||||
|
map[string]interface{}{"v": 1},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Anchors and aliases.
|
||||||
|
{
|
||||||
|
"a: &x 1\nb: &y 2\nc: *x\nd: *y\n",
|
||||||
|
&struct{ A, B, C, D int }{1, 2, 1, 2},
|
||||||
|
}, {
|
||||||
|
"a: &a {c: 1}\nb: *a",
|
||||||
|
&struct {
|
||||||
|
A, B struct {
|
||||||
|
C int
|
||||||
|
}
|
||||||
|
}{struct{ C int }{1}, struct{ C int }{1}},
|
||||||
|
}, {
|
||||||
|
"a: &a [1, 2]\nb: *a",
|
||||||
|
&struct{ B []int }{[]int{1, 2}},
|
||||||
|
}, {
|
||||||
|
"b: *a\na: &a {c: 1}",
|
||||||
|
&struct {
|
||||||
|
A, B struct {
|
||||||
|
C int
|
||||||
|
}
|
||||||
|
}{struct{ C int }{1}, struct{ C int }{1}},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Bug #1133337
|
||||||
|
{
|
||||||
|
"foo: ''",
|
||||||
|
map[string]*string{"foo": new(string)},
|
||||||
|
}, {
|
||||||
|
"foo: null",
|
||||||
|
map[string]string{"foo": ""},
|
||||||
|
}, {
|
||||||
|
"foo: null",
|
||||||
|
map[string]interface{}{"foo": nil},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Ignored field
|
||||||
|
{
|
||||||
|
"a: 1\nb: 2\n",
|
||||||
|
&struct {
|
||||||
|
A int
|
||||||
|
B int "-"
|
||||||
|
}{1, 0},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Bug #1191981
|
||||||
|
{
|
||||||
|
"" +
|
||||||
|
"%YAML 1.1\n" +
|
||||||
|
"--- !!str\n" +
|
||||||
|
`"Generic line break (no glyph)\n\` + "\n" +
|
||||||
|
` Generic line break (glyphed)\n\` + "\n" +
|
||||||
|
` Line separator\u2028\` + "\n" +
|
||||||
|
` Paragraph separator\u2029"` + "\n",
|
||||||
|
"" +
|
||||||
|
"Generic line break (no glyph)\n" +
|
||||||
|
"Generic line break (glyphed)\n" +
|
||||||
|
"Line separator\u2028Paragraph separator\u2029",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Struct inlining
|
||||||
|
{
|
||||||
|
"a: 1\nb: 2\nc: 3\n",
|
||||||
|
&struct {
|
||||||
|
A int
|
||||||
|
C inlineB `yaml:",inline"`
|
||||||
|
}{1, inlineB{2, inlineC{3}}},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Map inlining
|
||||||
|
{
|
||||||
|
"a: 1\nb: 2\nc: 3\n",
|
||||||
|
&struct {
|
||||||
|
A int
|
||||||
|
C map[string]int `yaml:",inline"`
|
||||||
|
}{1, map[string]int{"b": 2, "c": 3}},
|
||||||
|
},
|
||||||
|
|
||||||
|
// bug 1243827
|
||||||
|
{
|
||||||
|
"a: -b_c",
|
||||||
|
map[string]interface{}{"a": "-b_c"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"a: +b_c",
|
||||||
|
map[string]interface{}{"a": "+b_c"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"a: 50cent_of_dollar",
|
||||||
|
map[string]interface{}{"a": "50cent_of_dollar"},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Duration
|
||||||
|
{
|
||||||
|
"a: 3s",
|
||||||
|
map[string]time.Duration{"a": 3 * time.Second},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Issue #24.
|
||||||
|
{
|
||||||
|
"a: <foo>",
|
||||||
|
map[string]string{"a": "<foo>"},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Base 60 floats are obsolete and unsupported.
|
||||||
|
{
|
||||||
|
"a: 1:1\n",
|
||||||
|
map[string]string{"a": "1:1"},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Binary data.
|
||||||
|
{
|
||||||
|
"a: !!binary gIGC\n",
|
||||||
|
map[string]string{"a": "\x80\x81\x82"},
|
||||||
|
}, {
|
||||||
|
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
|
||||||
|
map[string]string{"a": strings.Repeat("\x90", 54)},
|
||||||
|
}, {
|
||||||
|
"a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n",
|
||||||
|
map[string]string{"a": strings.Repeat("\x00", 52)},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Ordered maps.
|
||||||
|
{
|
||||||
|
"{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}",
|
||||||
|
&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Issue #39.
|
||||||
|
{
|
||||||
|
"a:\n b:\n c: d\n",
|
||||||
|
map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Custom map type.
|
||||||
|
{
|
||||||
|
"a: {b: c}",
|
||||||
|
M{"a": M{"b": "c"}},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Support encoding.TextUnmarshaler.
|
||||||
|
{
|
||||||
|
"a: 1.2.3.4\n",
|
||||||
|
map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"a: 2015-02-24T18:19:39Z\n",
|
||||||
|
map[string]time.Time{"a": time.Unix(1424801979, 0)},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Encode empty lists as zero-length slices.
|
||||||
|
{
|
||||||
|
"a: []",
|
||||||
|
&struct{ A []int }{[]int{}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
type M map[interface{}]interface{}
|
||||||
|
|
||||||
|
type inlineB struct {
|
||||||
|
B int
|
||||||
|
inlineC `yaml:",inline"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type inlineC struct {
|
||||||
|
C int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshal(c *C) {
|
||||||
|
for _, item := range unmarshalTests {
|
||||||
|
t := reflect.ValueOf(item.value).Type()
|
||||||
|
var value interface{}
|
||||||
|
switch t.Kind() {
|
||||||
|
case reflect.Map:
|
||||||
|
value = reflect.MakeMap(t).Interface()
|
||||||
|
case reflect.String:
|
||||||
|
value = reflect.New(t).Interface()
|
||||||
|
case reflect.Ptr:
|
||||||
|
value = reflect.New(t.Elem()).Interface()
|
||||||
|
default:
|
||||||
|
c.Fatalf("missing case for %s", t)
|
||||||
|
}
|
||||||
|
err := yaml.Unmarshal([]byte(item.data), value)
|
||||||
|
if _, ok := err.(*yaml.TypeError); !ok {
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
}
|
||||||
|
if t.Kind() == reflect.String {
|
||||||
|
c.Assert(*value.(*string), Equals, item.value)
|
||||||
|
} else {
|
||||||
|
c.Assert(value, DeepEquals, item.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshalNaN(c *C) {
|
||||||
|
value := map[string]interface{}{}
|
||||||
|
err := yaml.Unmarshal([]byte("notanum: .NaN"), &value)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
var unmarshalErrorTests = []struct {
|
||||||
|
data, error string
|
||||||
|
}{
|
||||||
|
{"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"},
|
||||||
|
{"v: [A,", "yaml: line 1: did not find expected node content"},
|
||||||
|
{"v:\n- [A,", "yaml: line 2: did not find expected node content"},
|
||||||
|
{"a: *b\n", "yaml: unknown anchor 'b' referenced"},
|
||||||
|
{"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"},
|
||||||
|
{"value: -", "yaml: block sequence entries are not allowed in this context"},
|
||||||
|
{"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"},
|
||||||
|
{"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`},
|
||||||
|
{"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshalErrors(c *C) {
|
||||||
|
for _, item := range unmarshalErrorTests {
|
||||||
|
var value interface{}
|
||||||
|
err := yaml.Unmarshal([]byte(item.data), &value)
|
||||||
|
c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var unmarshalerTests = []struct {
|
||||||
|
data, tag string
|
||||||
|
value interface{}
|
||||||
|
}{
|
||||||
|
{"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}},
|
||||||
|
{"_: [1,A]", "!!seq", []interface{}{1, "A"}},
|
||||||
|
{"_: 10", "!!int", 10},
|
||||||
|
{"_: null", "!!null", nil},
|
||||||
|
{`_: BAR!`, "!!str", "BAR!"},
|
||||||
|
{`_: "BAR!"`, "!!str", "BAR!"},
|
||||||
|
{"_: !!foo 'BAR!'", "!!foo", "BAR!"},
|
||||||
|
}
|
||||||
|
|
||||||
|
var unmarshalerResult = map[int]error{}
|
||||||
|
|
||||||
|
type unmarshalerType struct {
|
||||||
|
value interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error {
|
||||||
|
if err := unmarshal(&o.value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if i, ok := o.value.(int); ok {
|
||||||
|
if result, ok := unmarshalerResult[i]; ok {
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type unmarshalerPointer struct {
|
||||||
|
Field *unmarshalerType "_"
|
||||||
|
}
|
||||||
|
|
||||||
|
type unmarshalerValue struct {
|
||||||
|
Field unmarshalerType "_"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshalerPointerField(c *C) {
|
||||||
|
for _, item := range unmarshalerTests {
|
||||||
|
obj := &unmarshalerPointer{}
|
||||||
|
err := yaml.Unmarshal([]byte(item.data), obj)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
if item.value == nil {
|
||||||
|
c.Assert(obj.Field, IsNil)
|
||||||
|
} else {
|
||||||
|
c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
|
||||||
|
c.Assert(obj.Field.value, DeepEquals, item.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshalerValueField(c *C) {
|
||||||
|
for _, item := range unmarshalerTests {
|
||||||
|
obj := &unmarshalerValue{}
|
||||||
|
err := yaml.Unmarshal([]byte(item.data), obj)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
|
||||||
|
c.Assert(obj.Field.value, DeepEquals, item.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshalerWholeDocument(c *C) {
|
||||||
|
obj := &unmarshalerType{}
|
||||||
|
err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
value, ok := obj.value.(map[interface{}]interface{})
|
||||||
|
c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value))
|
||||||
|
c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshalerTypeError(c *C) {
|
||||||
|
unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}}
|
||||||
|
unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}}
|
||||||
|
defer func() {
|
||||||
|
delete(unmarshalerResult, 2)
|
||||||
|
delete(unmarshalerResult, 4)
|
||||||
|
}()
|
||||||
|
|
||||||
|
type T struct {
|
||||||
|
Before int
|
||||||
|
After int
|
||||||
|
M map[string]*unmarshalerType
|
||||||
|
}
|
||||||
|
var v T
|
||||||
|
data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}`
|
||||||
|
err := yaml.Unmarshal([]byte(data), &v)
|
||||||
|
c.Assert(err, ErrorMatches, ""+
|
||||||
|
"yaml: unmarshal errors:\n"+
|
||||||
|
" line 1: cannot unmarshal !!str `A` into int\n"+
|
||||||
|
" foo\n"+
|
||||||
|
" bar\n"+
|
||||||
|
" line 1: cannot unmarshal !!str `B` into int")
|
||||||
|
c.Assert(v.M["abc"], NotNil)
|
||||||
|
c.Assert(v.M["def"], IsNil)
|
||||||
|
c.Assert(v.M["ghi"], NotNil)
|
||||||
|
c.Assert(v.M["jkl"], IsNil)
|
||||||
|
|
||||||
|
c.Assert(v.M["abc"].value, Equals, 1)
|
||||||
|
c.Assert(v.M["ghi"].value, Equals, 3)
|
||||||
|
}
|
||||||
|
|
||||||
|
type proxyTypeError struct{}
|
||||||
|
|
||||||
|
func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
var s string
|
||||||
|
var a int32
|
||||||
|
var b int64
|
||||||
|
if err := unmarshal(&s); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if s == "a" {
|
||||||
|
if err := unmarshal(&b); err == nil {
|
||||||
|
panic("should have failed")
|
||||||
|
}
|
||||||
|
return unmarshal(&a)
|
||||||
|
}
|
||||||
|
if err := unmarshal(&a); err == nil {
|
||||||
|
panic("should have failed")
|
||||||
|
}
|
||||||
|
return unmarshal(&b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshalerTypeErrorProxying(c *C) {
|
||||||
|
type T struct {
|
||||||
|
Before int
|
||||||
|
After int
|
||||||
|
M map[string]*proxyTypeError
|
||||||
|
}
|
||||||
|
var v T
|
||||||
|
data := `{before: A, m: {abc: a, def: b}, after: B}`
|
||||||
|
err := yaml.Unmarshal([]byte(data), &v)
|
||||||
|
c.Assert(err, ErrorMatches, ""+
|
||||||
|
"yaml: unmarshal errors:\n"+
|
||||||
|
" line 1: cannot unmarshal !!str `A` into int\n"+
|
||||||
|
" line 1: cannot unmarshal !!str `a` into int32\n"+
|
||||||
|
" line 1: cannot unmarshal !!str `b` into int64\n"+
|
||||||
|
" line 1: cannot unmarshal !!str `B` into int")
|
||||||
|
}
|
||||||
|
|
||||||
|
type failingUnmarshaler struct{}
|
||||||
|
|
||||||
|
var failingErr = errors.New("failingErr")
|
||||||
|
|
||||||
|
func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
return failingErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshalerError(c *C) {
|
||||||
|
err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{})
|
||||||
|
c.Assert(err, Equals, failingErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
type sliceUnmarshaler []int
|
||||||
|
|
||||||
|
func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
var slice []int
|
||||||
|
err := unmarshal(&slice)
|
||||||
|
if err == nil {
|
||||||
|
*su = slice
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var intVal int
|
||||||
|
err = unmarshal(&intVal)
|
||||||
|
if err == nil {
|
||||||
|
*su = []int{intVal}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshalerRetry(c *C) {
|
||||||
|
var su sliceUnmarshaler
|
||||||
|
err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3}))
|
||||||
|
|
||||||
|
err = yaml.Unmarshal([]byte("1"), &su)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1}))
|
||||||
|
}
|
||||||
|
|
||||||
|
// From http://yaml.org/type/merge.html
|
||||||
|
var mergeTests = `
|
||||||
|
anchors:
|
||||||
|
list:
|
||||||
|
- &CENTER { "x": 1, "y": 2 }
|
||||||
|
- &LEFT { "x": 0, "y": 2 }
|
||||||
|
- &BIG { "r": 10 }
|
||||||
|
- &SMALL { "r": 1 }
|
||||||
|
|
||||||
|
# All the following maps are equal:
|
||||||
|
|
||||||
|
plain:
|
||||||
|
# Explicit keys
|
||||||
|
"x": 1
|
||||||
|
"y": 2
|
||||||
|
"r": 10
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
mergeOne:
|
||||||
|
# Merge one map
|
||||||
|
<< : *CENTER
|
||||||
|
"r": 10
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
mergeMultiple:
|
||||||
|
# Merge multiple maps
|
||||||
|
<< : [ *CENTER, *BIG ]
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
override:
|
||||||
|
# Override
|
||||||
|
<< : [ *BIG, *LEFT, *SMALL ]
|
||||||
|
"x": 1
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
shortTag:
|
||||||
|
# Explicit short merge tag
|
||||||
|
!!merge "<<" : [ *CENTER, *BIG ]
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
longTag:
|
||||||
|
# Explicit merge long tag
|
||||||
|
!<tag:yaml.org,2002:merge> "<<" : [ *CENTER, *BIG ]
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
inlineMap:
|
||||||
|
# Inlined map
|
||||||
|
<< : {"x": 1, "y": 2, "r": 10}
|
||||||
|
label: center/big
|
||||||
|
|
||||||
|
inlineSequenceMap:
|
||||||
|
# Inlined map in sequence
|
||||||
|
<< : [ *CENTER, {"r": 10} ]
|
||||||
|
label: center/big
|
||||||
|
`
|
||||||
|
|
||||||
|
func (s *S) TestMerge(c *C) {
|
||||||
|
var want = map[interface{}]interface{}{
|
||||||
|
"x": 1,
|
||||||
|
"y": 2,
|
||||||
|
"r": 10,
|
||||||
|
"label": "center/big",
|
||||||
|
}
|
||||||
|
|
||||||
|
var m map[interface{}]interface{}
|
||||||
|
err := yaml.Unmarshal([]byte(mergeTests), &m)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
for name, test := range m {
|
||||||
|
if name == "anchors" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c.Assert(test, DeepEquals, want, Commentf("test %q failed", name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestMergeStruct(c *C) {
|
||||||
|
type Data struct {
|
||||||
|
X, Y, R int
|
||||||
|
Label string
|
||||||
|
}
|
||||||
|
want := Data{1, 2, 10, "center/big"}
|
||||||
|
|
||||||
|
var m map[string]Data
|
||||||
|
err := yaml.Unmarshal([]byte(mergeTests), &m)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
for name, test := range m {
|
||||||
|
if name == "anchors" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c.Assert(test, Equals, want, Commentf("test %q failed", name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var unmarshalNullTests = []func() interface{}{
|
||||||
|
func() interface{} { var v interface{}; v = "v"; return &v },
|
||||||
|
func() interface{} { var s = "s"; return &s },
|
||||||
|
func() interface{} { var s = "s"; sptr := &s; return &sptr },
|
||||||
|
func() interface{} { var i = 1; return &i },
|
||||||
|
func() interface{} { var i = 1; iptr := &i; return &iptr },
|
||||||
|
func() interface{} { m := map[string]int{"s": 1}; return &m },
|
||||||
|
func() interface{} { m := map[string]int{"s": 1}; return m },
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshalNull(c *C) {
|
||||||
|
for _, test := range unmarshalNullTests {
|
||||||
|
item := test()
|
||||||
|
zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface()
|
||||||
|
err := yaml.Unmarshal([]byte("null"), item)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
if reflect.TypeOf(item).Kind() == reflect.Map {
|
||||||
|
c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface())
|
||||||
|
} else {
|
||||||
|
c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestUnmarshalSliceOnPreset(c *C) {
|
||||||
|
// Issue #48.
|
||||||
|
v := struct{ A []int }{[]int{1}}
|
||||||
|
yaml.Unmarshal([]byte("a: [2]"), &v)
|
||||||
|
c.Assert(v.A, DeepEquals, []int{2})
|
||||||
|
}
|
||||||
|
|
||||||
|
//var data []byte
|
||||||
|
//func init() {
|
||||||
|
// var err error
|
||||||
|
// data, err = ioutil.ReadFile("/tmp/file.yaml")
|
||||||
|
// if err != nil {
|
||||||
|
// panic(err)
|
||||||
|
// }
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
//func (s *S) BenchmarkUnmarshal(c *C) {
|
||||||
|
// var err error
|
||||||
|
// for i := 0; i < c.N; i++ {
|
||||||
|
// var v map[string]interface{}
|
||||||
|
// err = yaml.Unmarshal(data, &v)
|
||||||
|
// }
|
||||||
|
// if err != nil {
|
||||||
|
// panic(err)
|
||||||
|
// }
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
//func (s *S) BenchmarkMarshal(c *C) {
|
||||||
|
// var v map[string]interface{}
|
||||||
|
// yaml.Unmarshal(data, &v)
|
||||||
|
// c.ResetTimer()
|
||||||
|
// for i := 0; i < c.N; i++ {
|
||||||
|
// yaml.Marshal(&v)
|
||||||
|
// }
|
||||||
|
//}
|
1685
Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go
generated
vendored
Normal file
1685
Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
306
Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go
generated
vendored
Normal file
306
Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go
generated
vendored
Normal file
|
@ -0,0 +1,306 @@
|
||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type encoder struct {
|
||||||
|
emitter yaml_emitter_t
|
||||||
|
event yaml_event_t
|
||||||
|
out []byte
|
||||||
|
flow bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newEncoder() (e *encoder) {
|
||||||
|
e = &encoder{}
|
||||||
|
e.must(yaml_emitter_initialize(&e.emitter))
|
||||||
|
yaml_emitter_set_output_string(&e.emitter, &e.out)
|
||||||
|
yaml_emitter_set_unicode(&e.emitter, true)
|
||||||
|
e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
|
||||||
|
e.emit()
|
||||||
|
e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
|
||||||
|
e.emit()
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) finish() {
|
||||||
|
e.must(yaml_document_end_event_initialize(&e.event, true))
|
||||||
|
e.emit()
|
||||||
|
e.emitter.open_ended = false
|
||||||
|
e.must(yaml_stream_end_event_initialize(&e.event))
|
||||||
|
e.emit()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) destroy() {
|
||||||
|
yaml_emitter_delete(&e.emitter)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) emit() {
|
||||||
|
// This will internally delete the e.event value.
|
||||||
|
if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
|
||||||
|
e.must(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) must(ok bool) {
|
||||||
|
if !ok {
|
||||||
|
msg := e.emitter.problem
|
||||||
|
if msg == "" {
|
||||||
|
msg = "unknown problem generating YAML content"
|
||||||
|
}
|
||||||
|
failf("%s", msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) marshal(tag string, in reflect.Value) {
|
||||||
|
if !in.IsValid() {
|
||||||
|
e.nilv()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
iface := in.Interface()
|
||||||
|
if m, ok := iface.(Marshaler); ok {
|
||||||
|
v, err := m.MarshalYAML()
|
||||||
|
if err != nil {
|
||||||
|
fail(err)
|
||||||
|
}
|
||||||
|
if v == nil {
|
||||||
|
e.nilv()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
in = reflect.ValueOf(v)
|
||||||
|
} else if m, ok := iface.(encoding.TextMarshaler); ok {
|
||||||
|
text, err := m.MarshalText()
|
||||||
|
if err != nil {
|
||||||
|
fail(err)
|
||||||
|
}
|
||||||
|
in = reflect.ValueOf(string(text))
|
||||||
|
}
|
||||||
|
switch in.Kind() {
|
||||||
|
case reflect.Interface:
|
||||||
|
if in.IsNil() {
|
||||||
|
e.nilv()
|
||||||
|
} else {
|
||||||
|
e.marshal(tag, in.Elem())
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
e.mapv(tag, in)
|
||||||
|
case reflect.Ptr:
|
||||||
|
if in.IsNil() {
|
||||||
|
e.nilv()
|
||||||
|
} else {
|
||||||
|
e.marshal(tag, in.Elem())
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
e.structv(tag, in)
|
||||||
|
case reflect.Slice:
|
||||||
|
if in.Type().Elem() == mapItemType {
|
||||||
|
e.itemsv(tag, in)
|
||||||
|
} else {
|
||||||
|
e.slicev(tag, in)
|
||||||
|
}
|
||||||
|
case reflect.String:
|
||||||
|
e.stringv(tag, in)
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
if in.Type() == durationType {
|
||||||
|
e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
|
||||||
|
} else {
|
||||||
|
e.intv(tag, in)
|
||||||
|
}
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
e.uintv(tag, in)
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
e.floatv(tag, in)
|
||||||
|
case reflect.Bool:
|
||||||
|
e.boolv(tag, in)
|
||||||
|
default:
|
||||||
|
panic("cannot marshal type: " + in.Type().String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) mapv(tag string, in reflect.Value) {
|
||||||
|
e.mappingv(tag, func() {
|
||||||
|
keys := keyList(in.MapKeys())
|
||||||
|
sort.Sort(keys)
|
||||||
|
for _, k := range keys {
|
||||||
|
e.marshal("", k)
|
||||||
|
e.marshal("", in.MapIndex(k))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) itemsv(tag string, in reflect.Value) {
|
||||||
|
e.mappingv(tag, func() {
|
||||||
|
slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
|
||||||
|
for _, item := range slice {
|
||||||
|
e.marshal("", reflect.ValueOf(item.Key))
|
||||||
|
e.marshal("", reflect.ValueOf(item.Value))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) structv(tag string, in reflect.Value) {
|
||||||
|
sinfo, err := getStructInfo(in.Type())
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
e.mappingv(tag, func() {
|
||||||
|
for _, info := range sinfo.FieldsList {
|
||||||
|
var value reflect.Value
|
||||||
|
if info.Inline == nil {
|
||||||
|
value = in.Field(info.Num)
|
||||||
|
} else {
|
||||||
|
value = in.FieldByIndex(info.Inline)
|
||||||
|
}
|
||||||
|
if info.OmitEmpty && isZero(value) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
e.marshal("", reflect.ValueOf(info.Key))
|
||||||
|
e.flow = info.Flow
|
||||||
|
e.marshal("", value)
|
||||||
|
}
|
||||||
|
if sinfo.InlineMap >= 0 {
|
||||||
|
m := in.Field(sinfo.InlineMap)
|
||||||
|
if m.Len() > 0 {
|
||||||
|
e.flow = false
|
||||||
|
keys := keyList(m.MapKeys())
|
||||||
|
sort.Sort(keys)
|
||||||
|
for _, k := range keys {
|
||||||
|
if _, found := sinfo.FieldsMap[k.String()]; found {
|
||||||
|
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
|
||||||
|
}
|
||||||
|
e.marshal("", k)
|
||||||
|
e.flow = false
|
||||||
|
e.marshal("", m.MapIndex(k))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) mappingv(tag string, f func()) {
|
||||||
|
implicit := tag == ""
|
||||||
|
style := yaml_BLOCK_MAPPING_STYLE
|
||||||
|
if e.flow {
|
||||||
|
e.flow = false
|
||||||
|
style = yaml_FLOW_MAPPING_STYLE
|
||||||
|
}
|
||||||
|
e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
|
||||||
|
e.emit()
|
||||||
|
f()
|
||||||
|
e.must(yaml_mapping_end_event_initialize(&e.event))
|
||||||
|
e.emit()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) slicev(tag string, in reflect.Value) {
|
||||||
|
implicit := tag == ""
|
||||||
|
style := yaml_BLOCK_SEQUENCE_STYLE
|
||||||
|
if e.flow {
|
||||||
|
e.flow = false
|
||||||
|
style = yaml_FLOW_SEQUENCE_STYLE
|
||||||
|
}
|
||||||
|
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
|
||||||
|
e.emit()
|
||||||
|
n := in.Len()
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
e.marshal("", in.Index(i))
|
||||||
|
}
|
||||||
|
e.must(yaml_sequence_end_event_initialize(&e.event))
|
||||||
|
e.emit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
|
||||||
|
//
|
||||||
|
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
|
||||||
|
// in YAML 1.2 and by this package, but these should be marshalled quoted for
|
||||||
|
// the time being for compatibility with other parsers.
|
||||||
|
func isBase60Float(s string) (result bool) {
|
||||||
|
// Fast path.
|
||||||
|
if s == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
c := s[0]
|
||||||
|
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Do the full match.
|
||||||
|
return base60float.MatchString(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// From http://yaml.org/type/float.html, except the regular expression there
|
||||||
|
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
|
||||||
|
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
|
||||||
|
|
||||||
|
func (e *encoder) stringv(tag string, in reflect.Value) {
|
||||||
|
var style yaml_scalar_style_t
|
||||||
|
s := in.String()
|
||||||
|
rtag, rs := resolve("", s)
|
||||||
|
if rtag == yaml_BINARY_TAG {
|
||||||
|
if tag == "" || tag == yaml_STR_TAG {
|
||||||
|
tag = rtag
|
||||||
|
s = rs.(string)
|
||||||
|
} else if tag == yaml_BINARY_TAG {
|
||||||
|
failf("explicitly tagged !!binary data must be base64-encoded")
|
||||||
|
} else {
|
||||||
|
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
|
||||||
|
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||||||
|
} else if strings.Contains(s, "\n") {
|
||||||
|
style = yaml_LITERAL_SCALAR_STYLE
|
||||||
|
} else {
|
||||||
|
style = yaml_PLAIN_SCALAR_STYLE
|
||||||
|
}
|
||||||
|
e.emitScalar(s, "", tag, style)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) boolv(tag string, in reflect.Value) {
|
||||||
|
var s string
|
||||||
|
if in.Bool() {
|
||||||
|
s = "true"
|
||||||
|
} else {
|
||||||
|
s = "false"
|
||||||
|
}
|
||||||
|
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) intv(tag string, in reflect.Value) {
|
||||||
|
s := strconv.FormatInt(in.Int(), 10)
|
||||||
|
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) uintv(tag string, in reflect.Value) {
|
||||||
|
s := strconv.FormatUint(in.Uint(), 10)
|
||||||
|
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) floatv(tag string, in reflect.Value) {
|
||||||
|
// FIXME: Handle 64 bits here.
|
||||||
|
s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
|
||||||
|
switch s {
|
||||||
|
case "+Inf":
|
||||||
|
s = ".inf"
|
||||||
|
case "-Inf":
|
||||||
|
s = "-.inf"
|
||||||
|
case "NaN":
|
||||||
|
s = ".nan"
|
||||||
|
}
|
||||||
|
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) nilv() {
|
||||||
|
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
|
||||||
|
implicit := tag == ""
|
||||||
|
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
|
||||||
|
e.emit()
|
||||||
|
}
|
485
Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go
generated
vendored
Normal file
485
Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go
generated
vendored
Normal file
|
@ -0,0 +1,485 @@
|
||||||
|
package yaml_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
. "gopkg.in/check.v1"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
var marshalIntTest = 123
|
||||||
|
|
||||||
|
var marshalTests = []struct {
|
||||||
|
value interface{}
|
||||||
|
data string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
nil,
|
||||||
|
"null\n",
|
||||||
|
}, {
|
||||||
|
&struct{}{},
|
||||||
|
"{}\n",
|
||||||
|
}, {
|
||||||
|
map[string]string{"v": "hi"},
|
||||||
|
"v: hi\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"v": "hi"},
|
||||||
|
"v: hi\n",
|
||||||
|
}, {
|
||||||
|
map[string]string{"v": "true"},
|
||||||
|
"v: \"true\"\n",
|
||||||
|
}, {
|
||||||
|
map[string]string{"v": "false"},
|
||||||
|
"v: \"false\"\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"v": true},
|
||||||
|
"v: true\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"v": false},
|
||||||
|
"v: false\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"v": 10},
|
||||||
|
"v: 10\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"v": -10},
|
||||||
|
"v: -10\n",
|
||||||
|
}, {
|
||||||
|
map[string]uint{"v": 42},
|
||||||
|
"v: 42\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"v": int64(4294967296)},
|
||||||
|
"v: 4294967296\n",
|
||||||
|
}, {
|
||||||
|
map[string]int64{"v": int64(4294967296)},
|
||||||
|
"v: 4294967296\n",
|
||||||
|
}, {
|
||||||
|
map[string]uint64{"v": 4294967296},
|
||||||
|
"v: 4294967296\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"v": "10"},
|
||||||
|
"v: \"10\"\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"v": 0.1},
|
||||||
|
"v: 0.1\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"v": float64(0.1)},
|
||||||
|
"v: 0.1\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"v": -0.1},
|
||||||
|
"v: -0.1\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"v": math.Inf(+1)},
|
||||||
|
"v: .inf\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"v": math.Inf(-1)},
|
||||||
|
"v: -.inf\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"v": math.NaN()},
|
||||||
|
"v: .nan\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"v": nil},
|
||||||
|
"v: null\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"v": ""},
|
||||||
|
"v: \"\"\n",
|
||||||
|
}, {
|
||||||
|
map[string][]string{"v": []string{"A", "B"}},
|
||||||
|
"v:\n- A\n- B\n",
|
||||||
|
}, {
|
||||||
|
map[string][]string{"v": []string{"A", "B\nC"}},
|
||||||
|
"v:\n- A\n- |-\n B\n C\n",
|
||||||
|
}, {
|
||||||
|
map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
|
||||||
|
"v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
|
||||||
|
"a:\n b: c\n",
|
||||||
|
}, {
|
||||||
|
map[string]interface{}{"a": "-"},
|
||||||
|
"a: '-'\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Simple values.
|
||||||
|
{
|
||||||
|
&marshalIntTest,
|
||||||
|
"123\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Structures
|
||||||
|
{
|
||||||
|
&struct{ Hello string }{"world"},
|
||||||
|
"hello: world\n",
|
||||||
|
}, {
|
||||||
|
&struct {
|
||||||
|
A struct {
|
||||||
|
B string
|
||||||
|
}
|
||||||
|
}{struct{ B string }{"c"}},
|
||||||
|
"a:\n b: c\n",
|
||||||
|
}, {
|
||||||
|
&struct {
|
||||||
|
A *struct {
|
||||||
|
B string
|
||||||
|
}
|
||||||
|
}{&struct{ B string }{"c"}},
|
||||||
|
"a:\n b: c\n",
|
||||||
|
}, {
|
||||||
|
&struct {
|
||||||
|
A *struct {
|
||||||
|
B string
|
||||||
|
}
|
||||||
|
}{},
|
||||||
|
"a: null\n",
|
||||||
|
}, {
|
||||||
|
&struct{ A int }{1},
|
||||||
|
"a: 1\n",
|
||||||
|
}, {
|
||||||
|
&struct{ A []int }{[]int{1, 2}},
|
||||||
|
"a:\n- 1\n- 2\n",
|
||||||
|
}, {
|
||||||
|
&struct {
|
||||||
|
B int "a"
|
||||||
|
}{1},
|
||||||
|
"a: 1\n",
|
||||||
|
}, {
|
||||||
|
&struct{ A bool }{true},
|
||||||
|
"a: true\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Conditional flag
|
||||||
|
{
|
||||||
|
&struct {
|
||||||
|
A int "a,omitempty"
|
||||||
|
B int "b,omitempty"
|
||||||
|
}{1, 0},
|
||||||
|
"a: 1\n",
|
||||||
|
}, {
|
||||||
|
&struct {
|
||||||
|
A int "a,omitempty"
|
||||||
|
B int "b,omitempty"
|
||||||
|
}{0, 0},
|
||||||
|
"{}\n",
|
||||||
|
}, {
|
||||||
|
&struct {
|
||||||
|
A *struct{ X, y int } "a,omitempty,flow"
|
||||||
|
}{&struct{ X, y int }{1, 2}},
|
||||||
|
"a: {x: 1}\n",
|
||||||
|
}, {
|
||||||
|
&struct {
|
||||||
|
A *struct{ X, y int } "a,omitempty,flow"
|
||||||
|
}{nil},
|
||||||
|
"{}\n",
|
||||||
|
}, {
|
||||||
|
&struct {
|
||||||
|
A *struct{ X, y int } "a,omitempty,flow"
|
||||||
|
}{&struct{ X, y int }{}},
|
||||||
|
"a: {x: 0}\n",
|
||||||
|
}, {
|
||||||
|
&struct {
|
||||||
|
A struct{ X, y int } "a,omitempty,flow"
|
||||||
|
}{struct{ X, y int }{1, 2}},
|
||||||
|
"a: {x: 1}\n",
|
||||||
|
}, {
|
||||||
|
&struct {
|
||||||
|
A struct{ X, y int } "a,omitempty,flow"
|
||||||
|
}{struct{ X, y int }{0, 1}},
|
||||||
|
"{}\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Flow flag
|
||||||
|
{
|
||||||
|
&struct {
|
||||||
|
A []int "a,flow"
|
||||||
|
}{[]int{1, 2}},
|
||||||
|
"a: [1, 2]\n",
|
||||||
|
}, {
|
||||||
|
&struct {
|
||||||
|
A map[string]string "a,flow"
|
||||||
|
}{map[string]string{"b": "c", "d": "e"}},
|
||||||
|
"a: {b: c, d: e}\n",
|
||||||
|
}, {
|
||||||
|
&struct {
|
||||||
|
A struct {
|
||||||
|
B, D string
|
||||||
|
} "a,flow"
|
||||||
|
}{struct{ B, D string }{"c", "e"}},
|
||||||
|
"a: {b: c, d: e}\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Unexported field
|
||||||
|
{
|
||||||
|
&struct {
|
||||||
|
u int
|
||||||
|
A int
|
||||||
|
}{0, 1},
|
||||||
|
"a: 1\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Ignored field
|
||||||
|
{
|
||||||
|
&struct {
|
||||||
|
A int
|
||||||
|
B int "-"
|
||||||
|
}{1, 2},
|
||||||
|
"a: 1\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Struct inlining
|
||||||
|
{
|
||||||
|
&struct {
|
||||||
|
A int
|
||||||
|
C inlineB `yaml:",inline"`
|
||||||
|
}{1, inlineB{2, inlineC{3}}},
|
||||||
|
"a: 1\nb: 2\nc: 3\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Map inlining
|
||||||
|
{
|
||||||
|
&struct {
|
||||||
|
A int
|
||||||
|
C map[string]int `yaml:",inline"`
|
||||||
|
}{1, map[string]int{"b": 2, "c": 3}},
|
||||||
|
"a: 1\nb: 2\nc: 3\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Duration
|
||||||
|
{
|
||||||
|
map[string]time.Duration{"a": 3 * time.Second},
|
||||||
|
"a: 3s\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Issue #24: bug in map merging logic.
|
||||||
|
{
|
||||||
|
map[string]string{"a": "<foo>"},
|
||||||
|
"a: <foo>\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Issue #34: marshal unsupported base 60 floats quoted for compatibility
|
||||||
|
// with old YAML 1.1 parsers.
|
||||||
|
{
|
||||||
|
map[string]string{"a": "1:1"},
|
||||||
|
"a: \"1:1\"\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Binary data.
|
||||||
|
{
|
||||||
|
map[string]string{"a": "\x00"},
|
||||||
|
"a: \"\\0\"\n",
|
||||||
|
}, {
|
||||||
|
map[string]string{"a": "\x80\x81\x82"},
|
||||||
|
"a: !!binary gIGC\n",
|
||||||
|
}, {
|
||||||
|
map[string]string{"a": strings.Repeat("\x90", 54)},
|
||||||
|
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Ordered maps.
|
||||||
|
{
|
||||||
|
&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
|
||||||
|
"b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Encode unicode as utf-8 rather than in escaped form.
|
||||||
|
{
|
||||||
|
map[string]string{"a": "你好"},
|
||||||
|
"a: 你好\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Support encoding.TextMarshaler.
|
||||||
|
{
|
||||||
|
map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
|
||||||
|
"a: 1.2.3.4\n",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]time.Time{"a": time.Unix(1424801979, 0)},
|
||||||
|
"a: 2015-02-24T18:19:39Z\n",
|
||||||
|
},
|
||||||
|
|
||||||
|
// Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible).
|
||||||
|
{
|
||||||
|
map[string]string{"a": "b: c"},
|
||||||
|
"a: 'b: c'\n",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestMarshal(c *C) {
|
||||||
|
defer os.Setenv("TZ", os.Getenv("TZ"))
|
||||||
|
os.Setenv("TZ", "UTC")
|
||||||
|
for _, item := range marshalTests {
|
||||||
|
data, err := yaml.Marshal(item.value)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(string(data), Equals, item.data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var marshalErrorTests = []struct {
|
||||||
|
value interface{}
|
||||||
|
error string
|
||||||
|
panic string
|
||||||
|
}{{
|
||||||
|
value: &struct {
|
||||||
|
B int
|
||||||
|
inlineB ",inline"
|
||||||
|
}{1, inlineB{2, inlineC{3}}},
|
||||||
|
panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
|
||||||
|
}, {
|
||||||
|
value: &struct {
|
||||||
|
A int
|
||||||
|
B map[string]int ",inline"
|
||||||
|
}{1, map[string]int{"a": 2}},
|
||||||
|
panic: `Can't have key "a" in inlined map; conflicts with struct field`,
|
||||||
|
}}
|
||||||
|
|
||||||
|
func (s *S) TestMarshalErrors(c *C) {
|
||||||
|
for _, item := range marshalErrorTests {
|
||||||
|
if item.panic != "" {
|
||||||
|
c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
|
||||||
|
} else {
|
||||||
|
_, err := yaml.Marshal(item.value)
|
||||||
|
c.Assert(err, ErrorMatches, item.error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestMarshalTypeCache(c *C) {
|
||||||
|
var data []byte
|
||||||
|
var err error
|
||||||
|
func() {
|
||||||
|
type T struct{ A int }
|
||||||
|
data, err = yaml.Marshal(&T{})
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
}()
|
||||||
|
func() {
|
||||||
|
type T struct{ B int }
|
||||||
|
data, err = yaml.Marshal(&T{})
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
}()
|
||||||
|
c.Assert(string(data), Equals, "b: 0\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
var marshalerTests = []struct {
|
||||||
|
data string
|
||||||
|
value interface{}
|
||||||
|
}{
|
||||||
|
{"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}},
|
||||||
|
{"_:\n- 1\n- A\n", []interface{}{1, "A"}},
|
||||||
|
{"_: 10\n", 10},
|
||||||
|
{"_: null\n", nil},
|
||||||
|
{"_: BAR!\n", "BAR!"},
|
||||||
|
}
|
||||||
|
|
||||||
|
type marshalerType struct {
|
||||||
|
value interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o marshalerType) MarshalText() ([]byte, error) {
|
||||||
|
panic("MarshalText called on type with MarshalYAML")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o marshalerType) MarshalYAML() (interface{}, error) {
|
||||||
|
return o.value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type marshalerValue struct {
|
||||||
|
Field marshalerType "_"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestMarshaler(c *C) {
|
||||||
|
for _, item := range marshalerTests {
|
||||||
|
obj := &marshalerValue{}
|
||||||
|
obj.Field.value = item.value
|
||||||
|
data, err := yaml.Marshal(obj)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(string(data), Equals, string(item.data))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestMarshalerWholeDocument(c *C) {
|
||||||
|
obj := &marshalerType{}
|
||||||
|
obj.value = map[string]string{"hello": "world!"}
|
||||||
|
data, err := yaml.Marshal(obj)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(string(data), Equals, "hello: world!\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
type failingMarshaler struct{}
|
||||||
|
|
||||||
|
func (ft *failingMarshaler) MarshalYAML() (interface{}, error) {
|
||||||
|
return nil, failingErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestMarshalerError(c *C) {
|
||||||
|
_, err := yaml.Marshal(&failingMarshaler{})
|
||||||
|
c.Assert(err, Equals, failingErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S) TestSortedOutput(c *C) {
|
||||||
|
order := []interface{}{
|
||||||
|
false,
|
||||||
|
true,
|
||||||
|
1,
|
||||||
|
uint(1),
|
||||||
|
1.0,
|
||||||
|
1.1,
|
||||||
|
1.2,
|
||||||
|
2,
|
||||||
|
uint(2),
|
||||||
|
2.0,
|
||||||
|
2.1,
|
||||||
|
"",
|
||||||
|
".1",
|
||||||
|
".2",
|
||||||
|
".a",
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
"a!10",
|
||||||
|
"a/2",
|
||||||
|
"a/10",
|
||||||
|
"a~10",
|
||||||
|
"ab/1",
|
||||||
|
"b/1",
|
||||||
|
"b/01",
|
||||||
|
"b/2",
|
||||||
|
"b/02",
|
||||||
|
"b/3",
|
||||||
|
"b/03",
|
||||||
|
"b1",
|
||||||
|
"b01",
|
||||||
|
"b3",
|
||||||
|
"c2.10",
|
||||||
|
"c10.2",
|
||||||
|
"d1",
|
||||||
|
"d12",
|
||||||
|
"d12a",
|
||||||
|
}
|
||||||
|
m := make(map[interface{}]int)
|
||||||
|
for _, k := range order {
|
||||||
|
m[k] = 1
|
||||||
|
}
|
||||||
|
data, err := yaml.Marshal(m)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
out := "\n" + string(data)
|
||||||
|
last := 0
|
||||||
|
for i, k := range order {
|
||||||
|
repr := fmt.Sprint(k)
|
||||||
|
if s, ok := k.(string); ok {
|
||||||
|
if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
|
||||||
|
repr = `"` + repr + `"`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
index := strings.Index(out, "\n"+repr+":")
|
||||||
|
if index == -1 {
|
||||||
|
c.Fatalf("%#v is not in the output: %#v", k, out)
|
||||||
|
}
|
||||||
|
if index < last {
|
||||||
|
c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
|
||||||
|
}
|
||||||
|
last = index
|
||||||
|
}
|
||||||
|
}
|
1096
Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go
generated
vendored
Normal file
1096
Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
391
Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go
generated
vendored
Normal file
391
Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go
generated
vendored
Normal file
|
@ -0,0 +1,391 @@
|
||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Set the reader error and return 0.
|
||||||
|
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
|
||||||
|
parser.error = yaml_READER_ERROR
|
||||||
|
parser.problem = problem
|
||||||
|
parser.problem_offset = offset
|
||||||
|
parser.problem_value = value
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Byte order marks.
|
||||||
|
const (
|
||||||
|
bom_UTF8 = "\xef\xbb\xbf"
|
||||||
|
bom_UTF16LE = "\xff\xfe"
|
||||||
|
bom_UTF16BE = "\xfe\xff"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
|
||||||
|
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
|
||||||
|
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
|
||||||
|
// Ensure that we had enough bytes in the raw buffer.
|
||||||
|
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
|
||||||
|
if !yaml_parser_update_raw_buffer(parser) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine the encoding.
|
||||||
|
buf := parser.raw_buffer
|
||||||
|
pos := parser.raw_buffer_pos
|
||||||
|
avail := len(buf) - pos
|
||||||
|
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
|
||||||
|
parser.encoding = yaml_UTF16LE_ENCODING
|
||||||
|
parser.raw_buffer_pos += 2
|
||||||
|
parser.offset += 2
|
||||||
|
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
|
||||||
|
parser.encoding = yaml_UTF16BE_ENCODING
|
||||||
|
parser.raw_buffer_pos += 2
|
||||||
|
parser.offset += 2
|
||||||
|
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
|
||||||
|
parser.encoding = yaml_UTF8_ENCODING
|
||||||
|
parser.raw_buffer_pos += 3
|
||||||
|
parser.offset += 3
|
||||||
|
} else {
|
||||||
|
parser.encoding = yaml_UTF8_ENCODING
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the raw buffer.
|
||||||
|
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
|
||||||
|
size_read := 0
|
||||||
|
|
||||||
|
// Return if the raw buffer is full.
|
||||||
|
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return on EOF.
|
||||||
|
if parser.eof {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move the remaining bytes in the raw buffer to the beginning.
|
||||||
|
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
|
||||||
|
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
|
||||||
|
}
|
||||||
|
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
|
||||||
|
parser.raw_buffer_pos = 0
|
||||||
|
|
||||||
|
// Call the read handler to fill the buffer.
|
||||||
|
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
|
||||||
|
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
|
||||||
|
if err == io.EOF {
|
||||||
|
parser.eof = true
|
||||||
|
} else if err != nil {
|
||||||
|
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that the buffer contains at least `length` characters.
|
||||||
|
// Return true on success, false on failure.
|
||||||
|
//
|
||||||
|
// The length is supposed to be significantly less that the buffer size.
|
||||||
|
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
|
||||||
|
if parser.read_handler == nil {
|
||||||
|
panic("read handler must be set")
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the EOF flag is set and the raw buffer is empty, do nothing.
|
||||||
|
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return if the buffer contains enough characters.
|
||||||
|
if parser.unread >= length {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine the input encoding if it is not known yet.
|
||||||
|
if parser.encoding == yaml_ANY_ENCODING {
|
||||||
|
if !yaml_parser_determine_encoding(parser) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move the unread characters to the beginning of the buffer.
|
||||||
|
buffer_len := len(parser.buffer)
|
||||||
|
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
|
||||||
|
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
|
||||||
|
buffer_len -= parser.buffer_pos
|
||||||
|
parser.buffer_pos = 0
|
||||||
|
} else if parser.buffer_pos == buffer_len {
|
||||||
|
buffer_len = 0
|
||||||
|
parser.buffer_pos = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open the whole buffer for writing, and cut it before returning.
|
||||||
|
parser.buffer = parser.buffer[:cap(parser.buffer)]
|
||||||
|
|
||||||
|
// Fill the buffer until it has enough characters.
|
||||||
|
first := true
|
||||||
|
for parser.unread < length {
|
||||||
|
|
||||||
|
// Fill the raw buffer if necessary.
|
||||||
|
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
|
||||||
|
if !yaml_parser_update_raw_buffer(parser) {
|
||||||
|
parser.buffer = parser.buffer[:buffer_len]
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
first = false
|
||||||
|
|
||||||
|
// Decode the raw buffer.
|
||||||
|
inner:
|
||||||
|
for parser.raw_buffer_pos != len(parser.raw_buffer) {
|
||||||
|
var value rune
|
||||||
|
var width int
|
||||||
|
|
||||||
|
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
|
||||||
|
|
||||||
|
// Decode the next character.
|
||||||
|
switch parser.encoding {
|
||||||
|
case yaml_UTF8_ENCODING:
|
||||||
|
// Decode a UTF-8 character. Check RFC 3629
|
||||||
|
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
|
||||||
|
//
|
||||||
|
// The following table (taken from the RFC) is used for
|
||||||
|
// decoding.
|
||||||
|
//
|
||||||
|
// Char. number range | UTF-8 octet sequence
|
||||||
|
// (hexadecimal) | (binary)
|
||||||
|
// --------------------+------------------------------------
|
||||||
|
// 0000 0000-0000 007F | 0xxxxxxx
|
||||||
|
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
|
||||||
|
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
|
||||||
|
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
||||||
|
//
|
||||||
|
// Additionally, the characters in the range 0xD800-0xDFFF
|
||||||
|
// are prohibited as they are reserved for use with UTF-16
|
||||||
|
// surrogate pairs.
|
||||||
|
|
||||||
|
// Determine the length of the UTF-8 sequence.
|
||||||
|
octet := parser.raw_buffer[parser.raw_buffer_pos]
|
||||||
|
switch {
|
||||||
|
case octet&0x80 == 0x00:
|
||||||
|
width = 1
|
||||||
|
case octet&0xE0 == 0xC0:
|
||||||
|
width = 2
|
||||||
|
case octet&0xF0 == 0xE0:
|
||||||
|
width = 3
|
||||||
|
case octet&0xF8 == 0xF0:
|
||||||
|
width = 4
|
||||||
|
default:
|
||||||
|
// The leading octet is invalid.
|
||||||
|
return yaml_parser_set_reader_error(parser,
|
||||||
|
"invalid leading UTF-8 octet",
|
||||||
|
parser.offset, int(octet))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the raw buffer contains an incomplete character.
|
||||||
|
if width > raw_unread {
|
||||||
|
if parser.eof {
|
||||||
|
return yaml_parser_set_reader_error(parser,
|
||||||
|
"incomplete UTF-8 octet sequence",
|
||||||
|
parser.offset, -1)
|
||||||
|
}
|
||||||
|
break inner
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode the leading octet.
|
||||||
|
switch {
|
||||||
|
case octet&0x80 == 0x00:
|
||||||
|
value = rune(octet & 0x7F)
|
||||||
|
case octet&0xE0 == 0xC0:
|
||||||
|
value = rune(octet & 0x1F)
|
||||||
|
case octet&0xF0 == 0xE0:
|
||||||
|
value = rune(octet & 0x0F)
|
||||||
|
case octet&0xF8 == 0xF0:
|
||||||
|
value = rune(octet & 0x07)
|
||||||
|
default:
|
||||||
|
value = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check and decode the trailing octets.
|
||||||
|
for k := 1; k < width; k++ {
|
||||||
|
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
|
||||||
|
|
||||||
|
// Check if the octet is valid.
|
||||||
|
if (octet & 0xC0) != 0x80 {
|
||||||
|
return yaml_parser_set_reader_error(parser,
|
||||||
|
"invalid trailing UTF-8 octet",
|
||||||
|
parser.offset+k, int(octet))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode the octet.
|
||||||
|
value = (value << 6) + rune(octet&0x3F)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the length of the sequence against the value.
|
||||||
|
switch {
|
||||||
|
case width == 1:
|
||||||
|
case width == 2 && value >= 0x80:
|
||||||
|
case width == 3 && value >= 0x800:
|
||||||
|
case width == 4 && value >= 0x10000:
|
||||||
|
default:
|
||||||
|
return yaml_parser_set_reader_error(parser,
|
||||||
|
"invalid length of a UTF-8 sequence",
|
||||||
|
parser.offset, -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the range of the value.
|
||||||
|
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
|
||||||
|
return yaml_parser_set_reader_error(parser,
|
||||||
|
"invalid Unicode character",
|
||||||
|
parser.offset, int(value))
|
||||||
|
}
|
||||||
|
|
||||||
|
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
|
||||||
|
var low, high int
|
||||||
|
if parser.encoding == yaml_UTF16LE_ENCODING {
|
||||||
|
low, high = 0, 1
|
||||||
|
} else {
|
||||||
|
high, low = 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// The UTF-16 encoding is not as simple as one might
|
||||||
|
// naively think. Check RFC 2781
|
||||||
|
// (http://www.ietf.org/rfc/rfc2781.txt).
|
||||||
|
//
|
||||||
|
// Normally, two subsequent bytes describe a Unicode
|
||||||
|
// character. However a special technique (called a
|
||||||
|
// surrogate pair) is used for specifying character
|
||||||
|
// values larger than 0xFFFF.
|
||||||
|
//
|
||||||
|
// A surrogate pair consists of two pseudo-characters:
|
||||||
|
// high surrogate area (0xD800-0xDBFF)
|
||||||
|
// low surrogate area (0xDC00-0xDFFF)
|
||||||
|
//
|
||||||
|
// The following formulas are used for decoding
|
||||||
|
// and encoding characters using surrogate pairs:
|
||||||
|
//
|
||||||
|
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
|
||||||
|
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
|
||||||
|
// W1 = 110110yyyyyyyyyy
|
||||||
|
// W2 = 110111xxxxxxxxxx
|
||||||
|
//
|
||||||
|
// where U is the character value, W1 is the high surrogate
|
||||||
|
// area, W2 is the low surrogate area.
|
||||||
|
|
||||||
|
// Check for incomplete UTF-16 character.
|
||||||
|
if raw_unread < 2 {
|
||||||
|
if parser.eof {
|
||||||
|
return yaml_parser_set_reader_error(parser,
|
||||||
|
"incomplete UTF-16 character",
|
||||||
|
parser.offset, -1)
|
||||||
|
}
|
||||||
|
break inner
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the character.
|
||||||
|
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
|
||||||
|
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
|
||||||
|
|
||||||
|
// Check for unexpected low surrogate area.
|
||||||
|
if value&0xFC00 == 0xDC00 {
|
||||||
|
return yaml_parser_set_reader_error(parser,
|
||||||
|
"unexpected low surrogate area",
|
||||||
|
parser.offset, int(value))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for a high surrogate area.
|
||||||
|
if value&0xFC00 == 0xD800 {
|
||||||
|
width = 4
|
||||||
|
|
||||||
|
// Check for incomplete surrogate pair.
|
||||||
|
if raw_unread < 4 {
|
||||||
|
if parser.eof {
|
||||||
|
return yaml_parser_set_reader_error(parser,
|
||||||
|
"incomplete UTF-16 surrogate pair",
|
||||||
|
parser.offset, -1)
|
||||||
|
}
|
||||||
|
break inner
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the next character.
|
||||||
|
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
|
||||||
|
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
|
||||||
|
|
||||||
|
// Check for a low surrogate area.
|
||||||
|
if value2&0xFC00 != 0xDC00 {
|
||||||
|
return yaml_parser_set_reader_error(parser,
|
||||||
|
"expected low surrogate area",
|
||||||
|
parser.offset+2, int(value2))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate the value of the surrogate pair.
|
||||||
|
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
|
||||||
|
} else {
|
||||||
|
width = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic("impossible")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the character is in the allowed range:
|
||||||
|
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
|
||||||
|
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
|
||||||
|
// | [#x10000-#x10FFFF] (32 bit)
|
||||||
|
switch {
|
||||||
|
case value == 0x09:
|
||||||
|
case value == 0x0A:
|
||||||
|
case value == 0x0D:
|
||||||
|
case value >= 0x20 && value <= 0x7E:
|
||||||
|
case value == 0x85:
|
||||||
|
case value >= 0xA0 && value <= 0xD7FF:
|
||||||
|
case value >= 0xE000 && value <= 0xFFFD:
|
||||||
|
case value >= 0x10000 && value <= 0x10FFFF:
|
||||||
|
default:
|
||||||
|
return yaml_parser_set_reader_error(parser,
|
||||||
|
"control characters are not allowed",
|
||||||
|
parser.offset, int(value))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move the raw pointers.
|
||||||
|
parser.raw_buffer_pos += width
|
||||||
|
parser.offset += width
|
||||||
|
|
||||||
|
// Finally put the character into the buffer.
|
||||||
|
if value <= 0x7F {
|
||||||
|
// 0000 0000-0000 007F . 0xxxxxxx
|
||||||
|
parser.buffer[buffer_len+0] = byte(value)
|
||||||
|
} else if value <= 0x7FF {
|
||||||
|
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
|
||||||
|
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
|
||||||
|
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
|
||||||
|
} else if value <= 0xFFFF {
|
||||||
|
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
|
||||||
|
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
|
||||||
|
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
|
||||||
|
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
|
||||||
|
} else {
|
||||||
|
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
||||||
|
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
|
||||||
|
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
|
||||||
|
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
|
||||||
|
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
|
||||||
|
}
|
||||||
|
buffer_len += width
|
||||||
|
|
||||||
|
parser.unread++
|
||||||
|
}
|
||||||
|
|
||||||
|
// On EOF, put NUL into the buffer and return.
|
||||||
|
if parser.eof {
|
||||||
|
parser.buffer[buffer_len] = 0
|
||||||
|
buffer_len++
|
||||||
|
parser.unread++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
parser.buffer = parser.buffer[:buffer_len]
|
||||||
|
return true
|
||||||
|
}
|
203
Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go
generated
vendored
Normal file
203
Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go
generated
vendored
Normal file
|
@ -0,0 +1,203 @@
|
||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
type resolveMapItem struct {
|
||||||
|
value interface{}
|
||||||
|
tag string
|
||||||
|
}
|
||||||
|
|
||||||
|
var resolveTable = make([]byte, 256)
|
||||||
|
var resolveMap = make(map[string]resolveMapItem)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
t := resolveTable
|
||||||
|
t[int('+')] = 'S' // Sign
|
||||||
|
t[int('-')] = 'S'
|
||||||
|
for _, c := range "0123456789" {
|
||||||
|
t[int(c)] = 'D' // Digit
|
||||||
|
}
|
||||||
|
for _, c := range "yYnNtTfFoO~" {
|
||||||
|
t[int(c)] = 'M' // In map
|
||||||
|
}
|
||||||
|
t[int('.')] = '.' // Float (potentially in map)
|
||||||
|
|
||||||
|
var resolveMapList = []struct {
|
||||||
|
v interface{}
|
||||||
|
tag string
|
||||||
|
l []string
|
||||||
|
}{
|
||||||
|
{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
|
||||||
|
{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
|
||||||
|
{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
|
||||||
|
{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
|
||||||
|
{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
|
||||||
|
{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
|
||||||
|
{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
|
||||||
|
{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
|
||||||
|
{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
|
||||||
|
{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
|
||||||
|
{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
|
||||||
|
{"<<", yaml_MERGE_TAG, []string{"<<"}},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := resolveMap
|
||||||
|
for _, item := range resolveMapList {
|
||||||
|
for _, s := range item.l {
|
||||||
|
m[s] = resolveMapItem{item.v, item.tag}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const longTagPrefix = "tag:yaml.org,2002:"
|
||||||
|
|
||||||
|
func shortTag(tag string) string {
|
||||||
|
// TODO This can easily be made faster and produce less garbage.
|
||||||
|
if strings.HasPrefix(tag, longTagPrefix) {
|
||||||
|
return "!!" + tag[len(longTagPrefix):]
|
||||||
|
}
|
||||||
|
return tag
|
||||||
|
}
|
||||||
|
|
||||||
|
func longTag(tag string) string {
|
||||||
|
if strings.HasPrefix(tag, "!!") {
|
||||||
|
return longTagPrefix + tag[2:]
|
||||||
|
}
|
||||||
|
return tag
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolvableTag(tag string) bool {
|
||||||
|
switch tag {
|
||||||
|
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolve(tag string, in string) (rtag string, out interface{}) {
|
||||||
|
if !resolvableTag(tag) {
|
||||||
|
return tag, in
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
switch tag {
|
||||||
|
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Any data is accepted as a !!str or !!binary.
|
||||||
|
// Otherwise, the prefix is enough of a hint about what it might be.
|
||||||
|
hint := byte('N')
|
||||||
|
if in != "" {
|
||||||
|
hint = resolveTable[in[0]]
|
||||||
|
}
|
||||||
|
if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
|
||||||
|
// Handle things we can lookup in a map.
|
||||||
|
if item, ok := resolveMap[in]; ok {
|
||||||
|
return item.tag, item.value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
|
||||||
|
// are purposefully unsupported here. They're still quoted on
|
||||||
|
// the way out for compatibility with other parser, though.
|
||||||
|
|
||||||
|
switch hint {
|
||||||
|
case 'M':
|
||||||
|
// We've already checked the map above.
|
||||||
|
|
||||||
|
case '.':
|
||||||
|
// Not in the map, so maybe a normal float.
|
||||||
|
floatv, err := strconv.ParseFloat(in, 64)
|
||||||
|
if err == nil {
|
||||||
|
return yaml_FLOAT_TAG, floatv
|
||||||
|
}
|
||||||
|
|
||||||
|
case 'D', 'S':
|
||||||
|
// Int, float, or timestamp.
|
||||||
|
plain := strings.Replace(in, "_", "", -1)
|
||||||
|
intv, err := strconv.ParseInt(plain, 0, 64)
|
||||||
|
if err == nil {
|
||||||
|
if intv == int64(int(intv)) {
|
||||||
|
return yaml_INT_TAG, int(intv)
|
||||||
|
} else {
|
||||||
|
return yaml_INT_TAG, intv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
uintv, err := strconv.ParseUint(plain, 0, 64)
|
||||||
|
if err == nil {
|
||||||
|
return yaml_INT_TAG, uintv
|
||||||
|
}
|
||||||
|
floatv, err := strconv.ParseFloat(plain, 64)
|
||||||
|
if err == nil {
|
||||||
|
return yaml_FLOAT_TAG, floatv
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(plain, "0b") {
|
||||||
|
intv, err := strconv.ParseInt(plain[2:], 2, 64)
|
||||||
|
if err == nil {
|
||||||
|
if intv == int64(int(intv)) {
|
||||||
|
return yaml_INT_TAG, int(intv)
|
||||||
|
} else {
|
||||||
|
return yaml_INT_TAG, intv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
uintv, err := strconv.ParseUint(plain[2:], 2, 64)
|
||||||
|
if err == nil {
|
||||||
|
return yaml_INT_TAG, uintv
|
||||||
|
}
|
||||||
|
} else if strings.HasPrefix(plain, "-0b") {
|
||||||
|
intv, err := strconv.ParseInt(plain[3:], 2, 64)
|
||||||
|
if err == nil {
|
||||||
|
if intv == int64(int(intv)) {
|
||||||
|
return yaml_INT_TAG, -int(intv)
|
||||||
|
} else {
|
||||||
|
return yaml_INT_TAG, -intv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// XXX Handle timestamps here.
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if tag == yaml_BINARY_TAG {
|
||||||
|
return yaml_BINARY_TAG, in
|
||||||
|
}
|
||||||
|
if utf8.ValidString(in) {
|
||||||
|
return yaml_STR_TAG, in
|
||||||
|
}
|
||||||
|
return yaml_BINARY_TAG, encodeBase64(in)
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeBase64 encodes s as base64 that is broken up into multiple lines
|
||||||
|
// as appropriate for the resulting length.
|
||||||
|
func encodeBase64(s string) string {
|
||||||
|
const lineLen = 70
|
||||||
|
encLen := base64.StdEncoding.EncodedLen(len(s))
|
||||||
|
lines := encLen/lineLen + 1
|
||||||
|
buf := make([]byte, encLen*2+lines)
|
||||||
|
in := buf[0:encLen]
|
||||||
|
out := buf[encLen:]
|
||||||
|
base64.StdEncoding.Encode(in, []byte(s))
|
||||||
|
k := 0
|
||||||
|
for i := 0; i < len(in); i += lineLen {
|
||||||
|
j := i + lineLen
|
||||||
|
if j > len(in) {
|
||||||
|
j = len(in)
|
||||||
|
}
|
||||||
|
k += copy(out[k:], in[i:j])
|
||||||
|
if lines > 1 {
|
||||||
|
out[k] = '\n'
|
||||||
|
k++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return string(out[:k])
|
||||||
|
}
|
2710
Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go
generated
vendored
Normal file
2710
Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
104
Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go
generated
vendored
Normal file
104
Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go
generated
vendored
Normal file
|
@ -0,0 +1,104 @@
|
||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
type keyList []reflect.Value
|
||||||
|
|
||||||
|
func (l keyList) Len() int { return len(l) }
|
||||||
|
func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||||
|
func (l keyList) Less(i, j int) bool {
|
||||||
|
a := l[i]
|
||||||
|
b := l[j]
|
||||||
|
ak := a.Kind()
|
||||||
|
bk := b.Kind()
|
||||||
|
for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
|
||||||
|
a = a.Elem()
|
||||||
|
ak = a.Kind()
|
||||||
|
}
|
||||||
|
for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
|
||||||
|
b = b.Elem()
|
||||||
|
bk = b.Kind()
|
||||||
|
}
|
||||||
|
af, aok := keyFloat(a)
|
||||||
|
bf, bok := keyFloat(b)
|
||||||
|
if aok && bok {
|
||||||
|
if af != bf {
|
||||||
|
return af < bf
|
||||||
|
}
|
||||||
|
if ak != bk {
|
||||||
|
return ak < bk
|
||||||
|
}
|
||||||
|
return numLess(a, b)
|
||||||
|
}
|
||||||
|
if ak != reflect.String || bk != reflect.String {
|
||||||
|
return ak < bk
|
||||||
|
}
|
||||||
|
ar, br := []rune(a.String()), []rune(b.String())
|
||||||
|
for i := 0; i < len(ar) && i < len(br); i++ {
|
||||||
|
if ar[i] == br[i] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
al := unicode.IsLetter(ar[i])
|
||||||
|
bl := unicode.IsLetter(br[i])
|
||||||
|
if al && bl {
|
||||||
|
return ar[i] < br[i]
|
||||||
|
}
|
||||||
|
if al || bl {
|
||||||
|
return bl
|
||||||
|
}
|
||||||
|
var ai, bi int
|
||||||
|
var an, bn int64
|
||||||
|
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
|
||||||
|
an = an*10 + int64(ar[ai]-'0')
|
||||||
|
}
|
||||||
|
for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
|
||||||
|
bn = bn*10 + int64(br[bi]-'0')
|
||||||
|
}
|
||||||
|
if an != bn {
|
||||||
|
return an < bn
|
||||||
|
}
|
||||||
|
if ai != bi {
|
||||||
|
return ai < bi
|
||||||
|
}
|
||||||
|
return ar[i] < br[i]
|
||||||
|
}
|
||||||
|
return len(ar) < len(br)
|
||||||
|
}
|
||||||
|
|
||||||
|
// keyFloat returns a float value for v if it is a number/bool
|
||||||
|
// and whether it is a number/bool or not.
|
||||||
|
func keyFloat(v reflect.Value) (f float64, ok bool) {
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
return float64(v.Int()), true
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return v.Float(), true
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
return float64(v.Uint()), true
|
||||||
|
case reflect.Bool:
|
||||||
|
if v.Bool() {
|
||||||
|
return 1, true
|
||||||
|
}
|
||||||
|
return 0, true
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// numLess returns whether a < b.
|
||||||
|
// a and b must necessarily have the same kind.
|
||||||
|
func numLess(a, b reflect.Value) bool {
|
||||||
|
switch a.Kind() {
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
return a.Int() < b.Int()
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return a.Float() < b.Float()
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
return a.Uint() < b.Uint()
|
||||||
|
case reflect.Bool:
|
||||||
|
return !a.Bool() && b.Bool()
|
||||||
|
}
|
||||||
|
panic("not a number")
|
||||||
|
}
|
12
Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go
generated
vendored
Normal file
12
Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
package yaml_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "gopkg.in/check.v1"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test(t *testing.T) { TestingT(t) }
|
||||||
|
|
||||||
|
type S struct{}
|
||||||
|
|
||||||
|
var _ = Suite(&S{})
|
89
Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go
generated
vendored
Normal file
89
Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
package yaml
|
||||||
|
|
||||||
|
// Set the writer error and return false.
|
||||||
|
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
|
||||||
|
emitter.error = yaml_WRITER_ERROR
|
||||||
|
emitter.problem = problem
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush the output buffer.
|
||||||
|
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
|
||||||
|
if emitter.write_handler == nil {
|
||||||
|
panic("write handler not set")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the buffer is empty.
|
||||||
|
if emitter.buffer_pos == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the output encoding is UTF-8, we don't need to recode the buffer.
|
||||||
|
if emitter.encoding == yaml_UTF8_ENCODING {
|
||||||
|
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
|
||||||
|
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
|
||||||
|
}
|
||||||
|
emitter.buffer_pos = 0
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recode the buffer into the raw buffer.
|
||||||
|
var low, high int
|
||||||
|
if emitter.encoding == yaml_UTF16LE_ENCODING {
|
||||||
|
low, high = 0, 1
|
||||||
|
} else {
|
||||||
|
high, low = 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
pos := 0
|
||||||
|
for pos < emitter.buffer_pos {
|
||||||
|
// See the "reader.c" code for more details on UTF-8 encoding. Note
|
||||||
|
// that we assume that the buffer contains a valid UTF-8 sequence.
|
||||||
|
|
||||||
|
// Read the next UTF-8 character.
|
||||||
|
octet := emitter.buffer[pos]
|
||||||
|
|
||||||
|
var w int
|
||||||
|
var value rune
|
||||||
|
switch {
|
||||||
|
case octet&0x80 == 0x00:
|
||||||
|
w, value = 1, rune(octet&0x7F)
|
||||||
|
case octet&0xE0 == 0xC0:
|
||||||
|
w, value = 2, rune(octet&0x1F)
|
||||||
|
case octet&0xF0 == 0xE0:
|
||||||
|
w, value = 3, rune(octet&0x0F)
|
||||||
|
case octet&0xF8 == 0xF0:
|
||||||
|
w, value = 4, rune(octet&0x07)
|
||||||
|
}
|
||||||
|
for k := 1; k < w; k++ {
|
||||||
|
octet = emitter.buffer[pos+k]
|
||||||
|
value = (value << 6) + (rune(octet) & 0x3F)
|
||||||
|
}
|
||||||
|
pos += w
|
||||||
|
|
||||||
|
// Write the character.
|
||||||
|
if value < 0x10000 {
|
||||||
|
var b [2]byte
|
||||||
|
b[high] = byte(value >> 8)
|
||||||
|
b[low] = byte(value & 0xFF)
|
||||||
|
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
|
||||||
|
} else {
|
||||||
|
// Write the character using a surrogate pair (check "reader.c").
|
||||||
|
var b [4]byte
|
||||||
|
value -= 0x10000
|
||||||
|
b[high] = byte(0xD8 + (value >> 18))
|
||||||
|
b[low] = byte((value >> 10) & 0xFF)
|
||||||
|
b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
|
||||||
|
b[low+2] = byte(value & 0xFF)
|
||||||
|
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the raw buffer.
|
||||||
|
if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
|
||||||
|
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
|
||||||
|
}
|
||||||
|
emitter.buffer_pos = 0
|
||||||
|
emitter.raw_buffer = emitter.raw_buffer[:0]
|
||||||
|
return true
|
||||||
|
}
|
344
Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go
generated
vendored
Normal file
344
Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go
generated
vendored
Normal file
|
@ -0,0 +1,344 @@
|
||||||
|
// Package yaml implements YAML support for the Go language.
|
||||||
|
//
|
||||||
|
// Source code and other details for the project are available at GitHub:
|
||||||
|
//
|
||||||
|
// https://github.com/go-yaml/yaml
|
||||||
|
//
|
||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MapSlice encodes and decodes as a YAML map.
|
||||||
|
// The order of keys is preserved when encoding and decoding.
|
||||||
|
type MapSlice []MapItem
|
||||||
|
|
||||||
|
// MapItem is an item in a MapSlice.
|
||||||
|
type MapItem struct {
|
||||||
|
Key, Value interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Unmarshaler interface may be implemented by types to customize their
|
||||||
|
// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
|
||||||
|
// method receives a function that may be called to unmarshal the original
|
||||||
|
// YAML value into a field or variable. It is safe to call the unmarshal
|
||||||
|
// function parameter more than once if necessary.
|
||||||
|
type Unmarshaler interface {
|
||||||
|
UnmarshalYAML(unmarshal func(interface{}) error) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Marshaler interface may be implemented by types to customize their
|
||||||
|
// behavior when being marshaled into a YAML document. The returned value
|
||||||
|
// is marshaled in place of the original value implementing Marshaler.
|
||||||
|
//
|
||||||
|
// If an error is returned by MarshalYAML, the marshaling procedure stops
|
||||||
|
// and returns with the provided error.
|
||||||
|
type Marshaler interface {
|
||||||
|
MarshalYAML() (interface{}, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal decodes the first document found within the in byte slice
|
||||||
|
// and assigns decoded values into the out value.
|
||||||
|
//
|
||||||
|
// Maps and pointers (to a struct, string, int, etc) are accepted as out
|
||||||
|
// values. If an internal pointer within a struct is not initialized,
|
||||||
|
// the yaml package will initialize it if necessary for unmarshalling
|
||||||
|
// the provided data. The out parameter must not be nil.
|
||||||
|
//
|
||||||
|
// The type of the decoded values should be compatible with the respective
|
||||||
|
// values in out. If one or more values cannot be decoded due to a type
|
||||||
|
// mismatches, decoding continues partially until the end of the YAML
|
||||||
|
// content, and a *yaml.TypeError is returned with details for all
|
||||||
|
// missed values.
|
||||||
|
//
|
||||||
|
// Struct fields are only unmarshalled if they are exported (have an
|
||||||
|
// upper case first letter), and are unmarshalled using the field name
|
||||||
|
// lowercased as the default key. Custom keys may be defined via the
|
||||||
|
// "yaml" name in the field tag: the content preceding the first comma
|
||||||
|
// is used as the key, and the following comma-separated options are
|
||||||
|
// used to tweak the marshalling process (see Marshal).
|
||||||
|
// Conflicting names result in a runtime error.
|
||||||
|
//
|
||||||
|
// For example:
|
||||||
|
//
|
||||||
|
// type T struct {
|
||||||
|
// F int `yaml:"a,omitempty"`
|
||||||
|
// B int
|
||||||
|
// }
|
||||||
|
// var t T
|
||||||
|
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
||||||
|
//
|
||||||
|
// See the documentation of Marshal for the format of tags and a list of
|
||||||
|
// supported tag options.
|
||||||
|
//
|
||||||
|
func Unmarshal(in []byte, out interface{}) (err error) {
|
||||||
|
defer handleErr(&err)
|
||||||
|
d := newDecoder()
|
||||||
|
p := newParser(in)
|
||||||
|
defer p.destroy()
|
||||||
|
node := p.parse()
|
||||||
|
if node != nil {
|
||||||
|
v := reflect.ValueOf(out)
|
||||||
|
if v.Kind() == reflect.Ptr && !v.IsNil() {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
d.unmarshal(node, v)
|
||||||
|
}
|
||||||
|
if len(d.terrors) > 0 {
|
||||||
|
return &TypeError{d.terrors}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal serializes the value provided into a YAML document. The structure
|
||||||
|
// of the generated document will reflect the structure of the value itself.
|
||||||
|
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
|
||||||
|
//
|
||||||
|
// Struct fields are only unmarshalled if they are exported (have an upper case
|
||||||
|
// first letter), and are unmarshalled using the field name lowercased as the
|
||||||
|
// default key. Custom keys may be defined via the "yaml" name in the field
|
||||||
|
// tag: the content preceding the first comma is used as the key, and the
|
||||||
|
// following comma-separated options are used to tweak the marshalling process.
|
||||||
|
// Conflicting names result in a runtime error.
|
||||||
|
//
|
||||||
|
// The field tag format accepted is:
|
||||||
|
//
|
||||||
|
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||||
|
//
|
||||||
|
// The following flags are currently supported:
|
||||||
|
//
|
||||||
|
// omitempty Only include the field if it's not set to the zero
|
||||||
|
// value for the type or to empty slices or maps.
|
||||||
|
// Does not apply to zero valued structs.
|
||||||
|
//
|
||||||
|
// flow Marshal using a flow style (useful for structs,
|
||||||
|
// sequences and maps).
|
||||||
|
//
|
||||||
|
// inline Inline the field, which must be a struct or a map,
|
||||||
|
// causing all of its fields or keys to be processed as if
|
||||||
|
// they were part of the outer struct. For maps, keys must
|
||||||
|
// not conflict with the yaml keys of other struct fields.
|
||||||
|
//
|
||||||
|
// In addition, if the key is "-", the field is ignored.
|
||||||
|
//
|
||||||
|
// For example:
|
||||||
|
//
|
||||||
|
// type T struct {
|
||||||
|
// F int "a,omitempty"
|
||||||
|
// B int
|
||||||
|
// }
|
||||||
|
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
||||||
|
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
|
||||||
|
//
|
||||||
|
func Marshal(in interface{}) (out []byte, err error) {
|
||||||
|
defer handleErr(&err)
|
||||||
|
e := newEncoder()
|
||||||
|
defer e.destroy()
|
||||||
|
e.marshal("", reflect.ValueOf(in))
|
||||||
|
e.finish()
|
||||||
|
out = e.out
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleErr(err *error) {
|
||||||
|
if v := recover(); v != nil {
|
||||||
|
if e, ok := v.(yamlError); ok {
|
||||||
|
*err = e.err
|
||||||
|
} else {
|
||||||
|
panic(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type yamlError struct {
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func fail(err error) {
|
||||||
|
panic(yamlError{err})
|
||||||
|
}
|
||||||
|
|
||||||
|
func failf(format string, args ...interface{}) {
|
||||||
|
panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
|
||||||
|
}
|
||||||
|
|
||||||
|
// A TypeError is returned by Unmarshal when one or more fields in
|
||||||
|
// the YAML document cannot be properly decoded into the requested
|
||||||
|
// types. When this error is returned, the value is still
|
||||||
|
// unmarshaled partially.
|
||||||
|
type TypeError struct {
|
||||||
|
Errors []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *TypeError) Error() string {
|
||||||
|
return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
// Maintain a mapping of keys to structure field indexes
|
||||||
|
|
||||||
|
// The code in this section was copied from mgo/bson.
|
||||||
|
|
||||||
|
// structInfo holds details for the serialization of fields of
|
||||||
|
// a given struct.
|
||||||
|
type structInfo struct {
|
||||||
|
FieldsMap map[string]fieldInfo
|
||||||
|
FieldsList []fieldInfo
|
||||||
|
|
||||||
|
// InlineMap is the number of the field in the struct that
|
||||||
|
// contains an ,inline map, or -1 if there's none.
|
||||||
|
InlineMap int
|
||||||
|
}
|
||||||
|
|
||||||
|
type fieldInfo struct {
|
||||||
|
Key string
|
||||||
|
Num int
|
||||||
|
OmitEmpty bool
|
||||||
|
Flow bool
|
||||||
|
|
||||||
|
// Inline holds the field index if the field is part of an inlined struct.
|
||||||
|
Inline []int
|
||||||
|
}
|
||||||
|
|
||||||
|
var structMap = make(map[reflect.Type]*structInfo)
|
||||||
|
var fieldMapMutex sync.RWMutex
|
||||||
|
|
||||||
|
func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||||
|
fieldMapMutex.RLock()
|
||||||
|
sinfo, found := structMap[st]
|
||||||
|
fieldMapMutex.RUnlock()
|
||||||
|
if found {
|
||||||
|
return sinfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
n := st.NumField()
|
||||||
|
fieldsMap := make(map[string]fieldInfo)
|
||||||
|
fieldsList := make([]fieldInfo, 0, n)
|
||||||
|
inlineMap := -1
|
||||||
|
for i := 0; i != n; i++ {
|
||||||
|
field := st.Field(i)
|
||||||
|
if field.PkgPath != "" {
|
||||||
|
continue // Private field
|
||||||
|
}
|
||||||
|
|
||||||
|
info := fieldInfo{Num: i}
|
||||||
|
|
||||||
|
tag := field.Tag.Get("yaml")
|
||||||
|
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
|
||||||
|
tag = string(field.Tag)
|
||||||
|
}
|
||||||
|
if tag == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
inline := false
|
||||||
|
fields := strings.Split(tag, ",")
|
||||||
|
if len(fields) > 1 {
|
||||||
|
for _, flag := range fields[1:] {
|
||||||
|
switch flag {
|
||||||
|
case "omitempty":
|
||||||
|
info.OmitEmpty = true
|
||||||
|
case "flow":
|
||||||
|
info.Flow = true
|
||||||
|
case "inline":
|
||||||
|
inline = true
|
||||||
|
default:
|
||||||
|
return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tag = fields[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
if inline {
|
||||||
|
switch field.Type.Kind() {
|
||||||
|
case reflect.Map:
|
||||||
|
if inlineMap >= 0 {
|
||||||
|
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
|
||||||
|
}
|
||||||
|
if field.Type.Key() != reflect.TypeOf("") {
|
||||||
|
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
|
||||||
|
}
|
||||||
|
inlineMap = info.Num
|
||||||
|
case reflect.Struct:
|
||||||
|
sinfo, err := getStructInfo(field.Type)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, finfo := range sinfo.FieldsList {
|
||||||
|
if _, found := fieldsMap[finfo.Key]; found {
|
||||||
|
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
|
||||||
|
return nil, errors.New(msg)
|
||||||
|
}
|
||||||
|
if finfo.Inline == nil {
|
||||||
|
finfo.Inline = []int{i, finfo.Num}
|
||||||
|
} else {
|
||||||
|
finfo.Inline = append([]int{i}, finfo.Inline...)
|
||||||
|
}
|
||||||
|
fieldsMap[finfo.Key] = finfo
|
||||||
|
fieldsList = append(fieldsList, finfo)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
//return nil, errors.New("Option ,inline needs a struct value or map field")
|
||||||
|
return nil, errors.New("Option ,inline needs a struct value field")
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if tag != "" {
|
||||||
|
info.Key = tag
|
||||||
|
} else {
|
||||||
|
info.Key = strings.ToLower(field.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, found = fieldsMap[info.Key]; found {
|
||||||
|
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
|
||||||
|
return nil, errors.New(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldsList = append(fieldsList, info)
|
||||||
|
fieldsMap[info.Key] = info
|
||||||
|
}
|
||||||
|
|
||||||
|
sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
|
||||||
|
|
||||||
|
fieldMapMutex.Lock()
|
||||||
|
structMap[st] = sinfo
|
||||||
|
fieldMapMutex.Unlock()
|
||||||
|
return sinfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isZero(v reflect.Value) bool {
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.String:
|
||||||
|
return len(v.String()) == 0
|
||||||
|
case reflect.Interface, reflect.Ptr:
|
||||||
|
return v.IsNil()
|
||||||
|
case reflect.Slice:
|
||||||
|
return v.Len() == 0
|
||||||
|
case reflect.Map:
|
||||||
|
return v.Len() == 0
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
return v.Int() == 0
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
return v.Uint() == 0
|
||||||
|
case reflect.Bool:
|
||||||
|
return !v.Bool()
|
||||||
|
case reflect.Struct:
|
||||||
|
vt := v.Type()
|
||||||
|
for i := v.NumField()-1; i >= 0; i-- {
|
||||||
|
if vt.Field(i).PkgPath != "" {
|
||||||
|
continue // Private field
|
||||||
|
}
|
||||||
|
if !isZero(v.Field(i)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
716
Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go
generated
vendored
Normal file
716
Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go
generated
vendored
Normal file
|
@ -0,0 +1,716 @@
|
||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The version directive data.
|
||||||
|
type yaml_version_directive_t struct {
|
||||||
|
major int8 // The major version number.
|
||||||
|
minor int8 // The minor version number.
|
||||||
|
}
|
||||||
|
|
||||||
|
// The tag directive data.
|
||||||
|
type yaml_tag_directive_t struct {
|
||||||
|
handle []byte // The tag handle.
|
||||||
|
prefix []byte // The tag prefix.
|
||||||
|
}
|
||||||
|
|
||||||
|
type yaml_encoding_t int
|
||||||
|
|
||||||
|
// The stream encoding.
|
||||||
|
const (
|
||||||
|
// Let the parser choose the encoding.
|
||||||
|
yaml_ANY_ENCODING yaml_encoding_t = iota
|
||||||
|
|
||||||
|
yaml_UTF8_ENCODING // The default UTF-8 encoding.
|
||||||
|
yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
|
||||||
|
yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
|
||||||
|
)
|
||||||
|
|
||||||
|
type yaml_break_t int
|
||||||
|
|
||||||
|
// Line break types.
|
||||||
|
const (
|
||||||
|
// Let the parser choose the break type.
|
||||||
|
yaml_ANY_BREAK yaml_break_t = iota
|
||||||
|
|
||||||
|
yaml_CR_BREAK // Use CR for line breaks (Mac style).
|
||||||
|
yaml_LN_BREAK // Use LN for line breaks (Unix style).
|
||||||
|
yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
|
||||||
|
)
|
||||||
|
|
||||||
|
type yaml_error_type_t int
|
||||||
|
|
||||||
|
// Many bad things could happen with the parser and emitter.
|
||||||
|
const (
|
||||||
|
// No error is produced.
|
||||||
|
yaml_NO_ERROR yaml_error_type_t = iota
|
||||||
|
|
||||||
|
yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
|
||||||
|
yaml_READER_ERROR // Cannot read or decode the input stream.
|
||||||
|
yaml_SCANNER_ERROR // Cannot scan the input stream.
|
||||||
|
yaml_PARSER_ERROR // Cannot parse the input stream.
|
||||||
|
yaml_COMPOSER_ERROR // Cannot compose a YAML document.
|
||||||
|
yaml_WRITER_ERROR // Cannot write to the output stream.
|
||||||
|
yaml_EMITTER_ERROR // Cannot emit a YAML stream.
|
||||||
|
)
|
||||||
|
|
||||||
|
// The pointer position.
|
||||||
|
type yaml_mark_t struct {
|
||||||
|
index int // The position index.
|
||||||
|
line int // The position line.
|
||||||
|
column int // The position column.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Node Styles
|
||||||
|
|
||||||
|
type yaml_style_t int8
|
||||||
|
|
||||||
|
type yaml_scalar_style_t yaml_style_t
|
||||||
|
|
||||||
|
// Scalar styles.
|
||||||
|
const (
|
||||||
|
// Let the emitter choose the style.
|
||||||
|
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
|
||||||
|
|
||||||
|
yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
|
||||||
|
yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
|
||||||
|
yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
|
||||||
|
yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
|
||||||
|
yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
|
||||||
|
)
|
||||||
|
|
||||||
|
type yaml_sequence_style_t yaml_style_t
|
||||||
|
|
||||||
|
// Sequence styles.
|
||||||
|
const (
|
||||||
|
// Let the emitter choose the style.
|
||||||
|
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
|
||||||
|
|
||||||
|
yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
|
||||||
|
yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
|
||||||
|
)
|
||||||
|
|
||||||
|
type yaml_mapping_style_t yaml_style_t
|
||||||
|
|
||||||
|
// Mapping styles.
|
||||||
|
const (
|
||||||
|
// Let the emitter choose the style.
|
||||||
|
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
|
||||||
|
|
||||||
|
yaml_BLOCK_MAPPING_STYLE // The block mapping style.
|
||||||
|
yaml_FLOW_MAPPING_STYLE // The flow mapping style.
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tokens
|
||||||
|
|
||||||
|
type yaml_token_type_t int
|
||||||
|
|
||||||
|
// Token types.
|
||||||
|
const (
|
||||||
|
// An empty token.
|
||||||
|
yaml_NO_TOKEN yaml_token_type_t = iota
|
||||||
|
|
||||||
|
yaml_STREAM_START_TOKEN // A STREAM-START token.
|
||||||
|
yaml_STREAM_END_TOKEN // A STREAM-END token.
|
||||||
|
|
||||||
|
yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
|
||||||
|
yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
|
||||||
|
yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
|
||||||
|
yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
|
||||||
|
|
||||||
|
yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
|
||||||
|
yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
|
||||||
|
yaml_BLOCK_END_TOKEN // A BLOCK-END token.
|
||||||
|
|
||||||
|
yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
|
||||||
|
yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
|
||||||
|
yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
|
||||||
|
yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
|
||||||
|
|
||||||
|
yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
|
||||||
|
yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
|
||||||
|
yaml_KEY_TOKEN // A KEY token.
|
||||||
|
yaml_VALUE_TOKEN // A VALUE token.
|
||||||
|
|
||||||
|
yaml_ALIAS_TOKEN // An ALIAS token.
|
||||||
|
yaml_ANCHOR_TOKEN // An ANCHOR token.
|
||||||
|
yaml_TAG_TOKEN // A TAG token.
|
||||||
|
yaml_SCALAR_TOKEN // A SCALAR token.
|
||||||
|
)
|
||||||
|
|
||||||
|
func (tt yaml_token_type_t) String() string {
|
||||||
|
switch tt {
|
||||||
|
case yaml_NO_TOKEN:
|
||||||
|
return "yaml_NO_TOKEN"
|
||||||
|
case yaml_STREAM_START_TOKEN:
|
||||||
|
return "yaml_STREAM_START_TOKEN"
|
||||||
|
case yaml_STREAM_END_TOKEN:
|
||||||
|
return "yaml_STREAM_END_TOKEN"
|
||||||
|
case yaml_VERSION_DIRECTIVE_TOKEN:
|
||||||
|
return "yaml_VERSION_DIRECTIVE_TOKEN"
|
||||||
|
case yaml_TAG_DIRECTIVE_TOKEN:
|
||||||
|
return "yaml_TAG_DIRECTIVE_TOKEN"
|
||||||
|
case yaml_DOCUMENT_START_TOKEN:
|
||||||
|
return "yaml_DOCUMENT_START_TOKEN"
|
||||||
|
case yaml_DOCUMENT_END_TOKEN:
|
||||||
|
return "yaml_DOCUMENT_END_TOKEN"
|
||||||
|
case yaml_BLOCK_SEQUENCE_START_TOKEN:
|
||||||
|
return "yaml_BLOCK_SEQUENCE_START_TOKEN"
|
||||||
|
case yaml_BLOCK_MAPPING_START_TOKEN:
|
||||||
|
return "yaml_BLOCK_MAPPING_START_TOKEN"
|
||||||
|
case yaml_BLOCK_END_TOKEN:
|
||||||
|
return "yaml_BLOCK_END_TOKEN"
|
||||||
|
case yaml_FLOW_SEQUENCE_START_TOKEN:
|
||||||
|
return "yaml_FLOW_SEQUENCE_START_TOKEN"
|
||||||
|
case yaml_FLOW_SEQUENCE_END_TOKEN:
|
||||||
|
return "yaml_FLOW_SEQUENCE_END_TOKEN"
|
||||||
|
case yaml_FLOW_MAPPING_START_TOKEN:
|
||||||
|
return "yaml_FLOW_MAPPING_START_TOKEN"
|
||||||
|
case yaml_FLOW_MAPPING_END_TOKEN:
|
||||||
|
return "yaml_FLOW_MAPPING_END_TOKEN"
|
||||||
|
case yaml_BLOCK_ENTRY_TOKEN:
|
||||||
|
return "yaml_BLOCK_ENTRY_TOKEN"
|
||||||
|
case yaml_FLOW_ENTRY_TOKEN:
|
||||||
|
return "yaml_FLOW_ENTRY_TOKEN"
|
||||||
|
case yaml_KEY_TOKEN:
|
||||||
|
return "yaml_KEY_TOKEN"
|
||||||
|
case yaml_VALUE_TOKEN:
|
||||||
|
return "yaml_VALUE_TOKEN"
|
||||||
|
case yaml_ALIAS_TOKEN:
|
||||||
|
return "yaml_ALIAS_TOKEN"
|
||||||
|
case yaml_ANCHOR_TOKEN:
|
||||||
|
return "yaml_ANCHOR_TOKEN"
|
||||||
|
case yaml_TAG_TOKEN:
|
||||||
|
return "yaml_TAG_TOKEN"
|
||||||
|
case yaml_SCALAR_TOKEN:
|
||||||
|
return "yaml_SCALAR_TOKEN"
|
||||||
|
}
|
||||||
|
return "<unknown token>"
|
||||||
|
}
|
||||||
|
|
||||||
|
// The token structure.
|
||||||
|
type yaml_token_t struct {
|
||||||
|
// The token type.
|
||||||
|
typ yaml_token_type_t
|
||||||
|
|
||||||
|
// The start/end of the token.
|
||||||
|
start_mark, end_mark yaml_mark_t
|
||||||
|
|
||||||
|
// The stream encoding (for yaml_STREAM_START_TOKEN).
|
||||||
|
encoding yaml_encoding_t
|
||||||
|
|
||||||
|
// The alias/anchor/scalar value or tag/tag directive handle
|
||||||
|
// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
|
||||||
|
value []byte
|
||||||
|
|
||||||
|
// The tag suffix (for yaml_TAG_TOKEN).
|
||||||
|
suffix []byte
|
||||||
|
|
||||||
|
// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
|
||||||
|
prefix []byte
|
||||||
|
|
||||||
|
// The scalar style (for yaml_SCALAR_TOKEN).
|
||||||
|
style yaml_scalar_style_t
|
||||||
|
|
||||||
|
// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
|
||||||
|
major, minor int8
|
||||||
|
}
|
||||||
|
|
||||||
|
// Events
|
||||||
|
|
||||||
|
type yaml_event_type_t int8
|
||||||
|
|
||||||
|
// Event types.
|
||||||
|
const (
|
||||||
|
// An empty event.
|
||||||
|
yaml_NO_EVENT yaml_event_type_t = iota
|
||||||
|
|
||||||
|
yaml_STREAM_START_EVENT // A STREAM-START event.
|
||||||
|
yaml_STREAM_END_EVENT // A STREAM-END event.
|
||||||
|
yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
|
||||||
|
yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
|
||||||
|
yaml_ALIAS_EVENT // An ALIAS event.
|
||||||
|
yaml_SCALAR_EVENT // A SCALAR event.
|
||||||
|
yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
|
||||||
|
yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
|
||||||
|
yaml_MAPPING_START_EVENT // A MAPPING-START event.
|
||||||
|
yaml_MAPPING_END_EVENT // A MAPPING-END event.
|
||||||
|
)
|
||||||
|
|
||||||
|
// The event structure.
|
||||||
|
type yaml_event_t struct {
|
||||||
|
|
||||||
|
// The event type.
|
||||||
|
typ yaml_event_type_t
|
||||||
|
|
||||||
|
// The start and end of the event.
|
||||||
|
start_mark, end_mark yaml_mark_t
|
||||||
|
|
||||||
|
// The document encoding (for yaml_STREAM_START_EVENT).
|
||||||
|
encoding yaml_encoding_t
|
||||||
|
|
||||||
|
// The version directive (for yaml_DOCUMENT_START_EVENT).
|
||||||
|
version_directive *yaml_version_directive_t
|
||||||
|
|
||||||
|
// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
|
||||||
|
tag_directives []yaml_tag_directive_t
|
||||||
|
|
||||||
|
// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
|
||||||
|
anchor []byte
|
||||||
|
|
||||||
|
// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
||||||
|
tag []byte
|
||||||
|
|
||||||
|
// The scalar value (for yaml_SCALAR_EVENT).
|
||||||
|
value []byte
|
||||||
|
|
||||||
|
// Is the document start/end indicator implicit, or the tag optional?
|
||||||
|
// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
|
||||||
|
implicit bool
|
||||||
|
|
||||||
|
// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
|
||||||
|
quoted_implicit bool
|
||||||
|
|
||||||
|
// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
||||||
|
style yaml_style_t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
|
||||||
|
func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
|
||||||
|
func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
|
||||||
|
|
||||||
|
// Nodes
|
||||||
|
|
||||||
|
const (
|
||||||
|
yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
|
||||||
|
yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
|
||||||
|
yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
|
||||||
|
yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
|
||||||
|
yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
|
||||||
|
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
|
||||||
|
|
||||||
|
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
|
||||||
|
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
|
||||||
|
|
||||||
|
// Not in original libyaml.
|
||||||
|
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
|
||||||
|
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
|
||||||
|
|
||||||
|
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
|
||||||
|
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
|
||||||
|
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
|
||||||
|
)
|
||||||
|
|
||||||
|
type yaml_node_type_t int
|
||||||
|
|
||||||
|
// Node types.
|
||||||
|
const (
|
||||||
|
// An empty node.
|
||||||
|
yaml_NO_NODE yaml_node_type_t = iota
|
||||||
|
|
||||||
|
yaml_SCALAR_NODE // A scalar node.
|
||||||
|
yaml_SEQUENCE_NODE // A sequence node.
|
||||||
|
yaml_MAPPING_NODE // A mapping node.
|
||||||
|
)
|
||||||
|
|
||||||
|
// An element of a sequence node.
|
||||||
|
type yaml_node_item_t int
|
||||||
|
|
||||||
|
// An element of a mapping node.
|
||||||
|
type yaml_node_pair_t struct {
|
||||||
|
key int // The key of the element.
|
||||||
|
value int // The value of the element.
|
||||||
|
}
|
||||||
|
|
||||||
|
// The node structure.
|
||||||
|
type yaml_node_t struct {
|
||||||
|
typ yaml_node_type_t // The node type.
|
||||||
|
tag []byte // The node tag.
|
||||||
|
|
||||||
|
// The node data.
|
||||||
|
|
||||||
|
// The scalar parameters (for yaml_SCALAR_NODE).
|
||||||
|
scalar struct {
|
||||||
|
value []byte // The scalar value.
|
||||||
|
length int // The length of the scalar value.
|
||||||
|
style yaml_scalar_style_t // The scalar style.
|
||||||
|
}
|
||||||
|
|
||||||
|
// The sequence parameters (for YAML_SEQUENCE_NODE).
|
||||||
|
sequence struct {
|
||||||
|
items_data []yaml_node_item_t // The stack of sequence items.
|
||||||
|
style yaml_sequence_style_t // The sequence style.
|
||||||
|
}
|
||||||
|
|
||||||
|
// The mapping parameters (for yaml_MAPPING_NODE).
|
||||||
|
mapping struct {
|
||||||
|
pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
|
||||||
|
pairs_start *yaml_node_pair_t // The beginning of the stack.
|
||||||
|
pairs_end *yaml_node_pair_t // The end of the stack.
|
||||||
|
pairs_top *yaml_node_pair_t // The top of the stack.
|
||||||
|
style yaml_mapping_style_t // The mapping style.
|
||||||
|
}
|
||||||
|
|
||||||
|
start_mark yaml_mark_t // The beginning of the node.
|
||||||
|
end_mark yaml_mark_t // The end of the node.
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// The document structure.
|
||||||
|
type yaml_document_t struct {
|
||||||
|
|
||||||
|
// The document nodes.
|
||||||
|
nodes []yaml_node_t
|
||||||
|
|
||||||
|
// The version directive.
|
||||||
|
version_directive *yaml_version_directive_t
|
||||||
|
|
||||||
|
// The list of tag directives.
|
||||||
|
tag_directives_data []yaml_tag_directive_t
|
||||||
|
tag_directives_start int // The beginning of the tag directives list.
|
||||||
|
tag_directives_end int // The end of the tag directives list.
|
||||||
|
|
||||||
|
start_implicit int // Is the document start indicator implicit?
|
||||||
|
end_implicit int // Is the document end indicator implicit?
|
||||||
|
|
||||||
|
// The start/end of the document.
|
||||||
|
start_mark, end_mark yaml_mark_t
|
||||||
|
}
|
||||||
|
|
||||||
|
// The prototype of a read handler.
|
||||||
|
//
|
||||||
|
// The read handler is called when the parser needs to read more bytes from the
|
||||||
|
// source. The handler should write not more than size bytes to the buffer.
|
||||||
|
// The number of written bytes should be set to the size_read variable.
|
||||||
|
//
|
||||||
|
// [in,out] data A pointer to an application data specified by
|
||||||
|
// yaml_parser_set_input().
|
||||||
|
// [out] buffer The buffer to write the data from the source.
|
||||||
|
// [in] size The size of the buffer.
|
||||||
|
// [out] size_read The actual number of bytes read from the source.
|
||||||
|
//
|
||||||
|
// On success, the handler should return 1. If the handler failed,
|
||||||
|
// the returned value should be 0. On EOF, the handler should set the
|
||||||
|
// size_read to 0 and return 1.
|
||||||
|
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
|
||||||
|
|
||||||
|
// This structure holds information about a potential simple key.
|
||||||
|
type yaml_simple_key_t struct {
|
||||||
|
possible bool // Is a simple key possible?
|
||||||
|
required bool // Is a simple key required?
|
||||||
|
token_number int // The number of the token.
|
||||||
|
mark yaml_mark_t // The position mark.
|
||||||
|
}
|
||||||
|
|
||||||
|
// The states of the parser.
|
||||||
|
type yaml_parser_state_t int
|
||||||
|
|
||||||
|
const (
|
||||||
|
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
|
||||||
|
|
||||||
|
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
|
||||||
|
yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
|
||||||
|
yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
||||||
|
yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
||||||
|
yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
|
||||||
|
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
|
||||||
|
yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
|
||||||
|
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
|
||||||
|
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
|
||||||
|
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
|
||||||
|
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
||||||
|
yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
|
||||||
|
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
|
||||||
|
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
|
||||||
|
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
|
||||||
|
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
|
||||||
|
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
|
||||||
|
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
|
||||||
|
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
||||||
|
yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
||||||
|
yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
||||||
|
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
|
||||||
|
yaml_PARSE_END_STATE // Expect nothing.
|
||||||
|
)
|
||||||
|
|
||||||
|
func (ps yaml_parser_state_t) String() string {
|
||||||
|
switch ps {
|
||||||
|
case yaml_PARSE_STREAM_START_STATE:
|
||||||
|
return "yaml_PARSE_STREAM_START_STATE"
|
||||||
|
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
|
||||||
|
return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
|
||||||
|
case yaml_PARSE_DOCUMENT_START_STATE:
|
||||||
|
return "yaml_PARSE_DOCUMENT_START_STATE"
|
||||||
|
case yaml_PARSE_DOCUMENT_CONTENT_STATE:
|
||||||
|
return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
|
||||||
|
case yaml_PARSE_DOCUMENT_END_STATE:
|
||||||
|
return "yaml_PARSE_DOCUMENT_END_STATE"
|
||||||
|
case yaml_PARSE_BLOCK_NODE_STATE:
|
||||||
|
return "yaml_PARSE_BLOCK_NODE_STATE"
|
||||||
|
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
|
||||||
|
return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
|
||||||
|
case yaml_PARSE_FLOW_NODE_STATE:
|
||||||
|
return "yaml_PARSE_FLOW_NODE_STATE"
|
||||||
|
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
|
||||||
|
return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
|
||||||
|
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
|
||||||
|
return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
|
||||||
|
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
|
||||||
|
return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
|
||||||
|
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
|
||||||
|
return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
|
||||||
|
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
|
||||||
|
return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
|
||||||
|
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
|
||||||
|
return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
|
||||||
|
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
|
||||||
|
return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
|
||||||
|
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
|
||||||
|
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
|
||||||
|
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
|
||||||
|
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
|
||||||
|
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
|
||||||
|
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
|
||||||
|
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
|
||||||
|
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
|
||||||
|
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
|
||||||
|
return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
|
||||||
|
case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
|
||||||
|
return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
|
||||||
|
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
|
||||||
|
return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
|
||||||
|
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
|
||||||
|
return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
|
||||||
|
case yaml_PARSE_END_STATE:
|
||||||
|
return "yaml_PARSE_END_STATE"
|
||||||
|
}
|
||||||
|
return "<unknown parser state>"
|
||||||
|
}
|
||||||
|
|
||||||
|
// This structure holds aliases data.
|
||||||
|
type yaml_alias_data_t struct {
|
||||||
|
anchor []byte // The anchor.
|
||||||
|
index int // The node id.
|
||||||
|
mark yaml_mark_t // The anchor mark.
|
||||||
|
}
|
||||||
|
|
||||||
|
// The parser structure.
|
||||||
|
//
|
||||||
|
// All members are internal. Manage the structure using the
|
||||||
|
// yaml_parser_ family of functions.
|
||||||
|
type yaml_parser_t struct {
|
||||||
|
|
||||||
|
// Error handling
|
||||||
|
|
||||||
|
error yaml_error_type_t // Error type.
|
||||||
|
|
||||||
|
problem string // Error description.
|
||||||
|
|
||||||
|
// The byte about which the problem occured.
|
||||||
|
problem_offset int
|
||||||
|
problem_value int
|
||||||
|
problem_mark yaml_mark_t
|
||||||
|
|
||||||
|
// The error context.
|
||||||
|
context string
|
||||||
|
context_mark yaml_mark_t
|
||||||
|
|
||||||
|
// Reader stuff
|
||||||
|
|
||||||
|
read_handler yaml_read_handler_t // Read handler.
|
||||||
|
|
||||||
|
input_file io.Reader // File input data.
|
||||||
|
input []byte // String input data.
|
||||||
|
input_pos int
|
||||||
|
|
||||||
|
eof bool // EOF flag
|
||||||
|
|
||||||
|
buffer []byte // The working buffer.
|
||||||
|
buffer_pos int // The current position of the buffer.
|
||||||
|
|
||||||
|
unread int // The number of unread characters in the buffer.
|
||||||
|
|
||||||
|
raw_buffer []byte // The raw buffer.
|
||||||
|
raw_buffer_pos int // The current position of the buffer.
|
||||||
|
|
||||||
|
encoding yaml_encoding_t // The input encoding.
|
||||||
|
|
||||||
|
offset int // The offset of the current position (in bytes).
|
||||||
|
mark yaml_mark_t // The mark of the current position.
|
||||||
|
|
||||||
|
// Scanner stuff
|
||||||
|
|
||||||
|
stream_start_produced bool // Have we started to scan the input stream?
|
||||||
|
stream_end_produced bool // Have we reached the end of the input stream?
|
||||||
|
|
||||||
|
flow_level int // The number of unclosed '[' and '{' indicators.
|
||||||
|
|
||||||
|
tokens []yaml_token_t // The tokens queue.
|
||||||
|
tokens_head int // The head of the tokens queue.
|
||||||
|
tokens_parsed int // The number of tokens fetched from the queue.
|
||||||
|
token_available bool // Does the tokens queue contain a token ready for dequeueing.
|
||||||
|
|
||||||
|
indent int // The current indentation level.
|
||||||
|
indents []int // The indentation levels stack.
|
||||||
|
|
||||||
|
simple_key_allowed bool // May a simple key occur at the current position?
|
||||||
|
simple_keys []yaml_simple_key_t // The stack of simple keys.
|
||||||
|
|
||||||
|
// Parser stuff
|
||||||
|
|
||||||
|
state yaml_parser_state_t // The current parser state.
|
||||||
|
states []yaml_parser_state_t // The parser states stack.
|
||||||
|
marks []yaml_mark_t // The stack of marks.
|
||||||
|
tag_directives []yaml_tag_directive_t // The list of TAG directives.
|
||||||
|
|
||||||
|
// Dumper stuff
|
||||||
|
|
||||||
|
aliases []yaml_alias_data_t // The alias data.
|
||||||
|
|
||||||
|
document *yaml_document_t // The currently parsed document.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emitter Definitions
|
||||||
|
|
||||||
|
// The prototype of a write handler.
|
||||||
|
//
|
||||||
|
// The write handler is called when the emitter needs to flush the accumulated
|
||||||
|
// characters to the output. The handler should write @a size bytes of the
|
||||||
|
// @a buffer to the output.
|
||||||
|
//
|
||||||
|
// @param[in,out] data A pointer to an application data specified by
|
||||||
|
// yaml_emitter_set_output().
|
||||||
|
// @param[in] buffer The buffer with bytes to be written.
|
||||||
|
// @param[in] size The size of the buffer.
|
||||||
|
//
|
||||||
|
// @returns On success, the handler should return @c 1. If the handler failed,
|
||||||
|
// the returned value should be @c 0.
|
||||||
|
//
|
||||||
|
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
|
||||||
|
|
||||||
|
type yaml_emitter_state_t int
|
||||||
|
|
||||||
|
// The emitter states.
|
||||||
|
const (
|
||||||
|
// Expect STREAM-START.
|
||||||
|
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
|
||||||
|
|
||||||
|
yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
|
||||||
|
yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
|
||||||
|
yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
||||||
|
yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
||||||
|
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
|
||||||
|
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
|
||||||
|
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
||||||
|
yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
||||||
|
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
|
||||||
|
yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
||||||
|
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
|
||||||
|
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
|
||||||
|
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
||||||
|
yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
|
||||||
|
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
|
||||||
|
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
|
||||||
|
yaml_EMIT_END_STATE // Expect nothing.
|
||||||
|
)
|
||||||
|
|
||||||
|
// The emitter structure.
|
||||||
|
//
|
||||||
|
// All members are internal. Manage the structure using the @c yaml_emitter_
|
||||||
|
// family of functions.
|
||||||
|
type yaml_emitter_t struct {
|
||||||
|
|
||||||
|
// Error handling
|
||||||
|
|
||||||
|
error yaml_error_type_t // Error type.
|
||||||
|
problem string // Error description.
|
||||||
|
|
||||||
|
// Writer stuff
|
||||||
|
|
||||||
|
write_handler yaml_write_handler_t // Write handler.
|
||||||
|
|
||||||
|
output_buffer *[]byte // String output data.
|
||||||
|
output_file io.Writer // File output data.
|
||||||
|
|
||||||
|
buffer []byte // The working buffer.
|
||||||
|
buffer_pos int // The current position of the buffer.
|
||||||
|
|
||||||
|
raw_buffer []byte // The raw buffer.
|
||||||
|
raw_buffer_pos int // The current position of the buffer.
|
||||||
|
|
||||||
|
encoding yaml_encoding_t // The stream encoding.
|
||||||
|
|
||||||
|
// Emitter stuff
|
||||||
|
|
||||||
|
canonical bool // If the output is in the canonical style?
|
||||||
|
best_indent int // The number of indentation spaces.
|
||||||
|
best_width int // The preferred width of the output lines.
|
||||||
|
unicode bool // Allow unescaped non-ASCII characters?
|
||||||
|
line_break yaml_break_t // The preferred line break.
|
||||||
|
|
||||||
|
state yaml_emitter_state_t // The current emitter state.
|
||||||
|
states []yaml_emitter_state_t // The stack of states.
|
||||||
|
|
||||||
|
events []yaml_event_t // The event queue.
|
||||||
|
events_head int // The head of the event queue.
|
||||||
|
|
||||||
|
indents []int // The stack of indentation levels.
|
||||||
|
|
||||||
|
tag_directives []yaml_tag_directive_t // The list of tag directives.
|
||||||
|
|
||||||
|
indent int // The current indentation level.
|
||||||
|
|
||||||
|
flow_level int // The current flow level.
|
||||||
|
|
||||||
|
root_context bool // Is it the document root context?
|
||||||
|
sequence_context bool // Is it a sequence context?
|
||||||
|
mapping_context bool // Is it a mapping context?
|
||||||
|
simple_key_context bool // Is it a simple mapping key context?
|
||||||
|
|
||||||
|
line int // The current line.
|
||||||
|
column int // The current column.
|
||||||
|
whitespace bool // If the last character was a whitespace?
|
||||||
|
indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
|
||||||
|
open_ended bool // If an explicit document end is required?
|
||||||
|
|
||||||
|
// Anchor analysis.
|
||||||
|
anchor_data struct {
|
||||||
|
anchor []byte // The anchor value.
|
||||||
|
alias bool // Is it an alias?
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tag analysis.
|
||||||
|
tag_data struct {
|
||||||
|
handle []byte // The tag handle.
|
||||||
|
suffix []byte // The tag suffix.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scalar analysis.
|
||||||
|
scalar_data struct {
|
||||||
|
value []byte // The scalar value.
|
||||||
|
multiline bool // Does the scalar contain line breaks?
|
||||||
|
flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
|
||||||
|
block_plain_allowed bool // Can the scalar be expressed in the block plain style?
|
||||||
|
single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
|
||||||
|
block_allowed bool // Can the scalar be expressed in the literal or folded styles?
|
||||||
|
style yaml_scalar_style_t // The output style.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dumper stuff
|
||||||
|
|
||||||
|
opened bool // If the stream was already opened?
|
||||||
|
closed bool // If the stream was already closed?
|
||||||
|
|
||||||
|
// The information associated with the document nodes.
|
||||||
|
anchors *struct {
|
||||||
|
references int // The number of references.
|
||||||
|
anchor int // The anchor id.
|
||||||
|
serialized bool // If the node has been emitted?
|
||||||
|
}
|
||||||
|
|
||||||
|
last_anchor_id int // The last assigned anchor id.
|
||||||
|
|
||||||
|
document *yaml_document_t // The currently emitted document.
|
||||||
|
}
|
173
Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go
generated
vendored
Normal file
173
Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go
generated
vendored
Normal file
|
@ -0,0 +1,173 @@
|
||||||
|
package yaml
|
||||||
|
|
||||||
|
const (
|
||||||
|
// The size of the input raw buffer.
|
||||||
|
input_raw_buffer_size = 512
|
||||||
|
|
||||||
|
// The size of the input buffer.
|
||||||
|
// It should be possible to decode the whole raw buffer.
|
||||||
|
input_buffer_size = input_raw_buffer_size * 3
|
||||||
|
|
||||||
|
// The size of the output buffer.
|
||||||
|
output_buffer_size = 128
|
||||||
|
|
||||||
|
// The size of the output raw buffer.
|
||||||
|
// It should be possible to encode the whole output buffer.
|
||||||
|
output_raw_buffer_size = (output_buffer_size*2 + 2)
|
||||||
|
|
||||||
|
// The size of other stacks and queues.
|
||||||
|
initial_stack_size = 16
|
||||||
|
initial_queue_size = 16
|
||||||
|
initial_string_size = 16
|
||||||
|
)
|
||||||
|
|
||||||
|
// Check if the character at the specified position is an alphabetical
|
||||||
|
// character, a digit, '_', or '-'.
|
||||||
|
func is_alpha(b []byte, i int) bool {
|
||||||
|
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the character at the specified position is a digit.
|
||||||
|
func is_digit(b []byte, i int) bool {
|
||||||
|
return b[i] >= '0' && b[i] <= '9'
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the value of a digit.
|
||||||
|
func as_digit(b []byte, i int) int {
|
||||||
|
return int(b[i]) - '0'
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the character at the specified position is a hex-digit.
|
||||||
|
func is_hex(b []byte, i int) bool {
|
||||||
|
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the value of a hex-digit.
|
||||||
|
func as_hex(b []byte, i int) int {
|
||||||
|
bi := b[i]
|
||||||
|
if bi >= 'A' && bi <= 'F' {
|
||||||
|
return int(bi) - 'A' + 10
|
||||||
|
}
|
||||||
|
if bi >= 'a' && bi <= 'f' {
|
||||||
|
return int(bi) - 'a' + 10
|
||||||
|
}
|
||||||
|
return int(bi) - '0'
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the character is ASCII.
|
||||||
|
func is_ascii(b []byte, i int) bool {
|
||||||
|
return b[i] <= 0x7F
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the character at the start of the buffer can be printed unescaped.
|
||||||
|
func is_printable(b []byte, i int) bool {
|
||||||
|
return ((b[i] == 0x0A) || // . == #x0A
|
||||||
|
(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
|
||||||
|
(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
|
||||||
|
(b[i] > 0xC2 && b[i] < 0xED) ||
|
||||||
|
(b[i] == 0xED && b[i+1] < 0xA0) ||
|
||||||
|
(b[i] == 0xEE) ||
|
||||||
|
(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
|
||||||
|
!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
|
||||||
|
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the character at the specified position is NUL.
|
||||||
|
func is_z(b []byte, i int) bool {
|
||||||
|
return b[i] == 0x00
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the beginning of the buffer is a BOM.
|
||||||
|
func is_bom(b []byte, i int) bool {
|
||||||
|
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the character at the specified position is space.
|
||||||
|
func is_space(b []byte, i int) bool {
|
||||||
|
return b[i] == ' '
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the character at the specified position is tab.
|
||||||
|
func is_tab(b []byte, i int) bool {
|
||||||
|
return b[i] == '\t'
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the character at the specified position is blank (space or tab).
|
||||||
|
func is_blank(b []byte, i int) bool {
|
||||||
|
//return is_space(b, i) || is_tab(b, i)
|
||||||
|
return b[i] == ' ' || b[i] == '\t'
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the character at the specified position is a line break.
|
||||||
|
func is_break(b []byte, i int) bool {
|
||||||
|
return (b[i] == '\r' || // CR (#xD)
|
||||||
|
b[i] == '\n' || // LF (#xA)
|
||||||
|
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||||
|
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||||
|
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
|
||||||
|
}
|
||||||
|
|
||||||
|
func is_crlf(b []byte, i int) bool {
|
||||||
|
return b[i] == '\r' && b[i+1] == '\n'
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the character is a line break or NUL.
|
||||||
|
func is_breakz(b []byte, i int) bool {
|
||||||
|
//return is_break(b, i) || is_z(b, i)
|
||||||
|
return ( // is_break:
|
||||||
|
b[i] == '\r' || // CR (#xD)
|
||||||
|
b[i] == '\n' || // LF (#xA)
|
||||||
|
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||||
|
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||||
|
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||||
|
// is_z:
|
||||||
|
b[i] == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the character is a line break, space, or NUL.
|
||||||
|
func is_spacez(b []byte, i int) bool {
|
||||||
|
//return is_space(b, i) || is_breakz(b, i)
|
||||||
|
return ( // is_space:
|
||||||
|
b[i] == ' ' ||
|
||||||
|
// is_breakz:
|
||||||
|
b[i] == '\r' || // CR (#xD)
|
||||||
|
b[i] == '\n' || // LF (#xA)
|
||||||
|
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||||
|
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||||
|
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||||
|
b[i] == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the character is a line break, space, tab, or NUL.
|
||||||
|
func is_blankz(b []byte, i int) bool {
|
||||||
|
//return is_blank(b, i) || is_breakz(b, i)
|
||||||
|
return ( // is_blank:
|
||||||
|
b[i] == ' ' || b[i] == '\t' ||
|
||||||
|
// is_breakz:
|
||||||
|
b[i] == '\r' || // CR (#xD)
|
||||||
|
b[i] == '\n' || // LF (#xA)
|
||||||
|
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||||
|
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||||
|
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||||
|
b[i] == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine the width of the character.
|
||||||
|
func width(b byte) int {
|
||||||
|
// Don't replace these by a switch without first
|
||||||
|
// confirming that it is being inlined.
|
||||||
|
if b&0x80 == 0x00 {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
if b&0xE0 == 0xC0 {
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
if b&0xF0 == 0xE0 {
|
||||||
|
return 3
|
||||||
|
}
|
||||||
|
if b&0xF8 == 0xF0 {
|
||||||
|
return 4
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
|
||||||
|
}
|
11
Makefile
11
Makefile
|
@ -26,7 +26,7 @@ advice: $(GOCC)
|
||||||
|
|
||||||
binary: build
|
binary: build
|
||||||
|
|
||||||
build: config tools web $(GOPATH)
|
build: tools web $(GOPATH)
|
||||||
$(GO) build -o prometheus $(BUILDFLAGS) .
|
$(GO) build -o prometheus $(BUILDFLAGS) .
|
||||||
|
|
||||||
docker: build
|
docker: build
|
||||||
|
@ -49,7 +49,7 @@ tag:
|
||||||
$(BUILD_PATH)/cache/$(GOPKG):
|
$(BUILD_PATH)/cache/$(GOPKG):
|
||||||
$(CURL) -o $@ -L $(GOURL)/$(GOPKG)
|
$(CURL) -o $@ -L $(GOURL)/$(GOPKG)
|
||||||
|
|
||||||
benchmark: config dependencies tools web
|
benchmark: dependencies tools web
|
||||||
$(GO) test $(GO_TEST_FLAGS) -test.run='NONE' -test.bench='.*' -test.benchmem ./... | tee benchmark.txt
|
$(GO) test $(GO_TEST_FLAGS) -test.run='NONE' -test.bench='.*' -test.benchmem ./... | tee benchmark.txt
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
|
@ -62,9 +62,6 @@ clean:
|
||||||
-find . -type f -name '*#' -exec rm '{}' ';'
|
-find . -type f -name '*#' -exec rm '{}' ';'
|
||||||
-find . -type f -name '.#*' -exec rm '{}' ';'
|
-find . -type f -name '.#*' -exec rm '{}' ';'
|
||||||
|
|
||||||
config:
|
|
||||||
$(MAKE) -C config
|
|
||||||
|
|
||||||
$(SELFLINK): $(GOPATH)
|
$(SELFLINK): $(GOPATH)
|
||||||
ln -s $(MAKEFILE_DIR) $@
|
ln -s $(MAKEFILE_DIR) $@
|
||||||
|
|
||||||
|
@ -91,7 +88,7 @@ run: binary
|
||||||
search_index:
|
search_index:
|
||||||
godoc -index -write_index -index_files='search_index'
|
godoc -index -write_index -index_files='search_index'
|
||||||
|
|
||||||
test: config dependencies tools web
|
test: dependencies tools web
|
||||||
$(GO) test $(GO_TEST_FLAGS) ./...
|
$(GO) test $(GO_TEST_FLAGS) ./...
|
||||||
|
|
||||||
tools: dependencies
|
tools: dependencies
|
||||||
|
@ -100,4 +97,4 @@ tools: dependencies
|
||||||
web: dependencies
|
web: dependencies
|
||||||
$(MAKE) -C web
|
$(MAKE) -C web
|
||||||
|
|
||||||
.PHONY: advice binary build clean config dependencies documentation format race_condition_binary race_condition_run release run search_index tag tarball test tools
|
.PHONY: advice binary build clean dependencies documentation format race_condition_binary race_condition_run release run search_index tag tarball test tools
|
||||||
|
|
|
@ -1,22 +0,0 @@
|
||||||
# Copyright 2013 The Prometheus Authors
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
all: generated/config.pb.go
|
|
||||||
|
|
||||||
SUFFIXES:
|
|
||||||
|
|
||||||
include ../Makefile.INCLUDE
|
|
||||||
|
|
||||||
generated/config.pb.go: config.proto
|
|
||||||
go get github.com/golang/protobuf/protoc-gen-go
|
|
||||||
$(PROTOC) --proto_path=$(PREFIX)/include:. --go_out=generated/ config.proto
|
|
598
config/config.go
598
config/config.go
|
@ -1,167 +1,487 @@
|
||||||
// Copyright 2013 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/utility"
|
"github.com/prometheus/prometheus/utility"
|
||||||
|
|
||||||
pb "github.com/prometheus/prometheus/config/generated"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var jobNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_-]*$")
|
var (
|
||||||
var labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
patJobName = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_-]*$`)
|
||||||
|
patFileSDName = regexp.MustCompile(`^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$`)
|
||||||
|
)
|
||||||
|
|
||||||
// Config encapsulates the configuration of a Prometheus instance. It wraps the
|
// Load parses the YAML input s into a Config.
|
||||||
// raw configuration protocol buffer to be able to add custom methods to it.
|
func Load(s string) (*Config, error) {
|
||||||
type Config struct {
|
cfg := &Config{
|
||||||
// The protobuf containing the actual configuration values.
|
original: s,
|
||||||
pb.PrometheusConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns an ASCII serialization of the loaded configuration protobuf.
|
|
||||||
func (c Config) String() string {
|
|
||||||
return proto.MarshalTextString(&c.PrometheusConfig)
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateLabels validates whether label names have the correct format.
|
|
||||||
func (c Config) validateLabels(labels *pb.LabelPairs) error {
|
|
||||||
if labels == nil {
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
for _, label := range labels.Label {
|
err := yaml.Unmarshal([]byte(s), cfg)
|
||||||
if !labelNameRE.MatchString(label.GetName()) {
|
|
||||||
return fmt.Errorf("invalid label name '%s'", label.GetName())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate checks an entire parsed Config for the validity of its fields.
|
|
||||||
func (c Config) Validate() error {
|
|
||||||
// Check the global configuration section for validity.
|
|
||||||
global := c.Global
|
|
||||||
if _, err := utility.StringToDuration(global.GetScrapeInterval()); err != nil {
|
|
||||||
return fmt.Errorf("invalid global scrape interval: %s", err)
|
|
||||||
}
|
|
||||||
if _, err := utility.StringToDuration(global.GetEvaluationInterval()); err != nil {
|
|
||||||
return fmt.Errorf("invalid rule evaluation interval: %s", err)
|
|
||||||
}
|
|
||||||
if err := c.validateLabels(global.Labels); err != nil {
|
|
||||||
return fmt.Errorf("invalid global labels: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check each job configuration for validity.
|
|
||||||
jobNames := map[string]bool{}
|
|
||||||
for _, job := range c.Job {
|
|
||||||
if jobNames[job.GetName()] {
|
|
||||||
return fmt.Errorf("found multiple jobs configured with the same name: '%s'", job.GetName())
|
|
||||||
}
|
|
||||||
jobNames[job.GetName()] = true
|
|
||||||
|
|
||||||
if !jobNameRE.MatchString(job.GetName()) {
|
|
||||||
return fmt.Errorf("invalid job name '%s'", job.GetName())
|
|
||||||
}
|
|
||||||
if _, err := utility.StringToDuration(job.GetScrapeInterval()); err != nil {
|
|
||||||
return fmt.Errorf("invalid scrape interval for job '%s': %s", job.GetName(), err)
|
|
||||||
}
|
|
||||||
if _, err := utility.StringToDuration(job.GetSdRefreshInterval()); err != nil {
|
|
||||||
return fmt.Errorf("invalid SD refresh interval for job '%s': %s", job.GetName(), err)
|
|
||||||
}
|
|
||||||
if _, err := utility.StringToDuration(job.GetScrapeTimeout()); err != nil {
|
|
||||||
return fmt.Errorf("invalid scrape timeout for job '%s': %s", job.GetName(), err)
|
|
||||||
}
|
|
||||||
for _, targetGroup := range job.TargetGroup {
|
|
||||||
if err := c.validateLabels(targetGroup.Labels); err != nil {
|
|
||||||
return fmt.Errorf("invalid labels for job '%s': %s", job.GetName(), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if job.SdName != nil && len(job.TargetGroup) > 0 {
|
|
||||||
return fmt.Errorf("specified both DNS-SD name and target group for job: %s", job.GetName())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetJobByName finds a job by its name in a Config object.
|
|
||||||
func (c Config) GetJobByName(name string) *JobConfig {
|
|
||||||
for _, job := range c.Job {
|
|
||||||
if job.GetName() == name {
|
|
||||||
return &JobConfig{*job}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GlobalLabels returns the global labels as a LabelSet.
|
|
||||||
func (c Config) GlobalLabels() clientmodel.LabelSet {
|
|
||||||
labels := clientmodel.LabelSet{}
|
|
||||||
if c.Global.Labels != nil {
|
|
||||||
for _, label := range c.Global.Labels.Label {
|
|
||||||
labels[clientmodel.LabelName(label.GetName())] = clientmodel.LabelValue(label.GetValue())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return labels
|
|
||||||
}
|
|
||||||
|
|
||||||
// Jobs returns all the jobs in a Config object.
|
|
||||||
func (c Config) Jobs() (jobs []JobConfig) {
|
|
||||||
for _, job := range c.Job {
|
|
||||||
jobs = append(jobs, JobConfig{*job})
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// stringToDuration converts a string to a duration and dies on invalid format.
|
|
||||||
func stringToDuration(intervalStr string) time.Duration {
|
|
||||||
duration, err := utility.StringToDuration(intervalStr)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
return nil, err
|
||||||
}
|
}
|
||||||
return duration
|
return cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ScrapeInterval gets the default scrape interval for a Config.
|
// LoadFromFile parses the given YAML file into a Config.
|
||||||
func (c Config) ScrapeInterval() time.Duration {
|
func LoadFromFile(filename string) (*Config, error) {
|
||||||
return stringToDuration(c.Global.GetScrapeInterval())
|
content, err := ioutil.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return Load(string(content))
|
||||||
}
|
}
|
||||||
|
|
||||||
// EvaluationInterval gets the default evaluation interval for a Config.
|
// The defaults applied before parsing the respective config sections.
|
||||||
func (c Config) EvaluationInterval() time.Duration {
|
var (
|
||||||
return stringToDuration(c.Global.GetEvaluationInterval())
|
// The default top-level configuration.
|
||||||
|
DefaultConfig = DefaultedConfig{
|
||||||
|
GlobalConfig: &GlobalConfig{DefaultGlobalConfig},
|
||||||
|
}
|
||||||
|
|
||||||
|
// The default global configuration.
|
||||||
|
DefaultGlobalConfig = DefaultedGlobalConfig{
|
||||||
|
ScrapeInterval: Duration(10 * time.Second),
|
||||||
|
ScrapeTimeout: Duration(10 * time.Second),
|
||||||
|
EvaluationInterval: Duration(1 * time.Minute),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Te default scrape configuration.
|
||||||
|
DefaultScrapeConfig = DefaultedScrapeConfig{
|
||||||
|
// ScrapeTimeout and ScrapeInterval default to the
|
||||||
|
// configured globals.
|
||||||
|
MetricsPath: "/metrics",
|
||||||
|
Scheme: "http",
|
||||||
|
}
|
||||||
|
|
||||||
|
// The default Relabel configuration.
|
||||||
|
DefaultRelabelConfig = DefaultedRelabelConfig{
|
||||||
|
Action: RelabelReplace,
|
||||||
|
Separator: ";",
|
||||||
|
}
|
||||||
|
|
||||||
|
// The default DNS SD configuration.
|
||||||
|
DefaultDNSSDConfig = DefaultedDNSSDConfig{
|
||||||
|
RefreshInterval: Duration(30 * time.Second),
|
||||||
|
}
|
||||||
|
|
||||||
|
// The default file SD configuration.
|
||||||
|
DefaultFileSDConfig = DefaultedFileSDConfig{
|
||||||
|
RefreshInterval: Duration(30 * time.Second),
|
||||||
|
}
|
||||||
|
|
||||||
|
// The default Consul SD configuration.
|
||||||
|
DefaultConsulSDConfig = DefaultedConsulSDConfig{
|
||||||
|
TagSeparator: ",",
|
||||||
|
Scheme: "http",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config is the top-level configuration for Prometheus's config files.
|
||||||
|
type Config struct {
|
||||||
|
// DefaultedConfig contains the actual fields of Config.
|
||||||
|
DefaultedConfig `yaml:",inline"`
|
||||||
|
|
||||||
|
// original is the input from which the config was parsed.
|
||||||
|
original string
|
||||||
}
|
}
|
||||||
|
|
||||||
// JobConfig encapsulates the configuration of a single job. It wraps the raw
|
func (c Config) String() string {
|
||||||
// job protocol buffer to be able to add custom methods to it.
|
if c.original != "" {
|
||||||
type JobConfig struct {
|
return c.original
|
||||||
pb.JobConfig
|
}
|
||||||
|
b, err := yaml.Marshal(c)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("<error creating config string: %s>", err)
|
||||||
|
}
|
||||||
|
return string(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ScrapeInterval gets the scrape interval for a job.
|
// UnmarshalYAML implements the yaml.Unmarshaller interface.
|
||||||
func (c JobConfig) ScrapeInterval() time.Duration {
|
func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
return stringToDuration(c.GetScrapeInterval())
|
c.DefaultedConfig = DefaultConfig
|
||||||
|
if err := unmarshal(&c.DefaultedConfig); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Do global overrides and validate unique names.
|
||||||
|
jobNames := map[string]struct{}{}
|
||||||
|
for _, scfg := range c.ScrapeConfigs {
|
||||||
|
if scfg.ScrapeInterval == 0 {
|
||||||
|
scfg.ScrapeInterval = c.GlobalConfig.ScrapeInterval
|
||||||
|
}
|
||||||
|
if scfg.ScrapeTimeout == 0 {
|
||||||
|
scfg.ScrapeTimeout = c.GlobalConfig.ScrapeTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := jobNames[scfg.JobName]; ok {
|
||||||
|
return fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName)
|
||||||
|
}
|
||||||
|
jobNames[scfg.JobName] = struct{}{}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ScrapeTimeout gets the scrape timeout for a job.
|
// DefaultedConfig is a proxy type for Config.
|
||||||
func (c JobConfig) ScrapeTimeout() time.Duration {
|
type DefaultedConfig struct {
|
||||||
return stringToDuration(c.GetScrapeTimeout())
|
GlobalConfig *GlobalConfig `yaml:"global"`
|
||||||
|
RuleFiles []string `yaml:"rule_files,omitempty"`
|
||||||
|
ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GlobalConfig configures values that are used across other configuration
|
||||||
|
// objects.
|
||||||
|
type GlobalConfig struct {
|
||||||
|
// DefaultedGlobalConfig contains the actual fields for GlobalConfig.
|
||||||
|
DefaultedGlobalConfig `yaml:",inline"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the yaml.Unmarshaller interface.
|
||||||
|
func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
c.DefaultedGlobalConfig = DefaultGlobalConfig
|
||||||
|
if err := unmarshal(&c.DefaultedGlobalConfig); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultedGlobalConfig is a proxy type for GlobalConfig.
|
||||||
|
type DefaultedGlobalConfig struct {
|
||||||
|
// How frequently to scrape targets by default.
|
||||||
|
ScrapeInterval Duration `yaml:"scrape_interval,omitempty"`
|
||||||
|
// The default timeout when scraping targets.
|
||||||
|
ScrapeTimeout Duration `yaml:"scrape_timeout,omitempty"`
|
||||||
|
// How frequently to evaluate rules by default.
|
||||||
|
EvaluationInterval Duration `yaml:"evaluation_interval,omitempty"`
|
||||||
|
|
||||||
|
// The labels to add to any timeseries that this Prometheus instance scrapes.
|
||||||
|
Labels clientmodel.LabelSet `yaml:"labels,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScrapeConfig configures a scraping unit for Prometheus.
|
||||||
|
type ScrapeConfig struct {
|
||||||
|
// DefaultedScrapeConfig contains the actual fields for ScrapeConfig.
|
||||||
|
DefaultedScrapeConfig `yaml:",inline"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the yaml.Unmarshaller interface.
|
||||||
|
func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
c.DefaultedScrapeConfig = DefaultScrapeConfig
|
||||||
|
err := unmarshal(&c.DefaultedScrapeConfig)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !patJobName.MatchString(c.JobName) {
|
||||||
|
return fmt.Errorf("%q is not a valid job name", c.JobName)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultedScrapeConfig is a proxy type for ScrapeConfig.
|
||||||
|
type DefaultedScrapeConfig struct {
|
||||||
|
// The job name to which the job label is set by default.
|
||||||
|
JobName string `yaml:"job_name"`
|
||||||
|
// How frequently to scrape the targets of this scrape config.
|
||||||
|
ScrapeInterval Duration `yaml:"scrape_interval,omitempty"`
|
||||||
|
// The timeout for scraping targets of this config.
|
||||||
|
ScrapeTimeout Duration `yaml:"scrape_timeout,omitempty"`
|
||||||
|
// The HTTP resource path on which to fetch metrics from targets.
|
||||||
|
MetricsPath string `yaml:"metrics_path,omitempty"`
|
||||||
|
// The URL scheme with which to fetch metrics from targets.
|
||||||
|
Scheme string `yaml:"scheme,omitempty"`
|
||||||
|
// The HTTP basic authentication credentials for the targets.
|
||||||
|
BasicAuth *BasicAuth `yaml:"basic_auth"`
|
||||||
|
|
||||||
|
// List of labeled target groups for this job.
|
||||||
|
TargetGroups []*TargetGroup `yaml:"target_groups,omitempty"`
|
||||||
|
// List of DNS service discovery configurations.
|
||||||
|
DNSSDConfigs []*DNSSDConfig `yaml:"dns_sd_configs,omitempty"`
|
||||||
|
// List of file service discovery configurations.
|
||||||
|
FileSDConfigs []*FileSDConfig `yaml:"file_sd_configs,omitempty"`
|
||||||
|
// List of Consul service discovery configurations.
|
||||||
|
ConsulSDConfigs []*ConsulSDConfig `yaml:"consul_sd_configs,omitempty"`
|
||||||
|
// List of relabel configurations.
|
||||||
|
RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BasicAuth contains basic HTTP authentication credentials.
|
||||||
|
type BasicAuth struct {
|
||||||
|
Username string `yaml:"username"`
|
||||||
|
Password string `yaml:"password"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// TargetGroup is a set of targets with a common label set.
|
||||||
|
type TargetGroup struct {
|
||||||
|
// Targets is a list of targets identified by a label set. Each target is
|
||||||
|
// uniquely identifiable in the group by its address label.
|
||||||
|
Targets []clientmodel.LabelSet `yaml:"targets,omitempty" json:"targets,omitempty"`
|
||||||
|
// Labels is a set of labels that is common across all targets in the group.
|
||||||
|
Labels clientmodel.LabelSet `yaml:"labels,omitempty" json:"labels,omitempty"`
|
||||||
|
|
||||||
|
// Source is an identifier that describes a group of targets.
|
||||||
|
Source string `yaml:"-", json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tg TargetGroup) String() string {
|
||||||
|
return tg.Source
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the yaml.Unmarshaller interface.
|
||||||
|
func (tg *TargetGroup) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
g := struct {
|
||||||
|
Targets []string `yaml:"targets"`
|
||||||
|
Labels clientmodel.LabelSet `yaml:"labels"`
|
||||||
|
}{}
|
||||||
|
if err := unmarshal(&g); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tg.Targets = make([]clientmodel.LabelSet, 0, len(g.Targets))
|
||||||
|
for _, t := range g.Targets {
|
||||||
|
if strings.Contains(t, "/") {
|
||||||
|
return fmt.Errorf("%q is not a valid hostname", t)
|
||||||
|
}
|
||||||
|
tg.Targets = append(tg.Targets, clientmodel.LabelSet{
|
||||||
|
clientmodel.AddressLabel: clientmodel.LabelValue(t),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
tg.Labels = g.Labels
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalYAML implements the yaml.Marshaller interface.
|
||||||
|
func (tg TargetGroup) MarshalYAML() (interface{}, error) {
|
||||||
|
g := &struct {
|
||||||
|
Targets []string `yaml:"targets"`
|
||||||
|
Labels clientmodel.LabelSet `yaml:"labels,omitempty"`
|
||||||
|
}{
|
||||||
|
Targets: make([]string, 0, len(tg.Targets)),
|
||||||
|
Labels: tg.Labels,
|
||||||
|
}
|
||||||
|
for _, t := range tg.Targets {
|
||||||
|
g.Targets = append(g.Targets, string(t[clientmodel.AddressLabel]))
|
||||||
|
}
|
||||||
|
return g, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements the json.Unmarshaller interface.
|
||||||
|
func (tg *TargetGroup) UnmarshalJSON(b []byte) error {
|
||||||
|
g := struct {
|
||||||
|
Targets []string `yaml:"targets"`
|
||||||
|
Labels clientmodel.LabelSet `yaml:"labels"`
|
||||||
|
}{}
|
||||||
|
if err := json.Unmarshal(b, &g); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tg.Targets = make([]clientmodel.LabelSet, 0, len(g.Targets))
|
||||||
|
for _, t := range g.Targets {
|
||||||
|
if strings.Contains(t, "/") {
|
||||||
|
return fmt.Errorf("%q is not a valid hostname", t)
|
||||||
|
}
|
||||||
|
tg.Targets = append(tg.Targets, clientmodel.LabelSet{
|
||||||
|
clientmodel.AddressLabel: clientmodel.LabelValue(t),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
tg.Labels = g.Labels
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DNSSDConfig is the configuration for DNS based service discovery.
|
||||||
|
type DNSSDConfig struct {
|
||||||
|
// DefaultedDNSSDConfig contains the actual fields for DNSSDConfig.
|
||||||
|
DefaultedDNSSDConfig `yaml:",inline"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the yaml.Unmarshaller interface.
|
||||||
|
func (c *DNSSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
c.DefaultedDNSSDConfig = DefaultDNSSDConfig
|
||||||
|
err := unmarshal(&c.DefaultedDNSSDConfig)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(c.Names) == 0 {
|
||||||
|
return fmt.Errorf("DNS-SD config must contain at least one SRV record name")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultedDNSSDConfig is a proxy type for DNSSDConfig.
|
||||||
|
type DefaultedDNSSDConfig struct {
|
||||||
|
Names []string `yaml:"names"`
|
||||||
|
RefreshInterval Duration `yaml:"refresh_interval,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileSDConfig is the configuration for file based discovery.
|
||||||
|
type FileSDConfig struct {
|
||||||
|
// DefaultedFileSDConfig contains the actual fields for FileSDConfig.
|
||||||
|
DefaultedFileSDConfig `yaml:",inline"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the yaml.Unmarshaller interface.
|
||||||
|
func (c *FileSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
c.DefaultedFileSDConfig = DefaultFileSDConfig
|
||||||
|
err := unmarshal(&c.DefaultedFileSDConfig)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(c.Names) == 0 {
|
||||||
|
return fmt.Errorf("file service discovery config must contain at least one path name")
|
||||||
|
}
|
||||||
|
for _, name := range c.Names {
|
||||||
|
if !patFileSDName.MatchString(name) {
|
||||||
|
return fmt.Errorf("path name %q is not valid for file discovery", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultedFileSDConfig is a proxy type for FileSDConfig.
|
||||||
|
type DefaultedFileSDConfig struct {
|
||||||
|
Names []string `yaml:"names"`
|
||||||
|
RefreshInterval Duration `yaml:"refresh_interval,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsulSDConfig is the configuration for Consul service discovery.
|
||||||
|
type ConsulSDConfig struct {
|
||||||
|
// DefaultedConsulSDConfig contains the actual fields for ConsulSDConfig.
|
||||||
|
DefaultedConsulSDConfig `yaml:",inline"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the yaml.Unmarshaller interface.
|
||||||
|
func (c *ConsulSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
c.DefaultedConsulSDConfig = DefaultConsulSDConfig
|
||||||
|
err := unmarshal(&c.DefaultedConsulSDConfig)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(c.Server) == "" {
|
||||||
|
return fmt.Errorf("Consul SD configuration requires a server address")
|
||||||
|
}
|
||||||
|
if len(c.Services) == 0 {
|
||||||
|
return fmt.Errorf("Consul SD configuration requires at least one service name")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultedConsulSDConfig is a proxy type for ConsulSDConfig.
|
||||||
|
type DefaultedConsulSDConfig struct {
|
||||||
|
Server string `yaml:"server"`
|
||||||
|
Token string `yaml:"token"`
|
||||||
|
Datacenter string `yaml:"datacenter"`
|
||||||
|
TagSeparator string `yaml:"tag_separator"`
|
||||||
|
Scheme string `yaml:"scheme"`
|
||||||
|
Username string `yaml:"username"`
|
||||||
|
Password string `yaml:"password"`
|
||||||
|
Services []string `yaml:"services"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RelabelAction is the action to be performed on relabeling.
|
||||||
|
type RelabelAction string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Performs a regex replacement.
|
||||||
|
RelabelReplace RelabelAction = "replace"
|
||||||
|
// Drops targets for which the input does not match the regex.
|
||||||
|
RelabelKeep = "keep"
|
||||||
|
// Drops targets for which the input does match the regex.
|
||||||
|
RelabelDrop = "drop"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the yaml.Unmarshaller interface.
|
||||||
|
func (a *RelabelAction) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
var s string
|
||||||
|
if err := unmarshal(&s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch act := RelabelAction(strings.ToLower(s)); act {
|
||||||
|
case RelabelReplace, RelabelKeep, RelabelDrop:
|
||||||
|
*a = act
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unknown relabel action %q", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RelabelConfig is the configuration for relabeling of target label sets.
|
||||||
|
type RelabelConfig struct {
|
||||||
|
// DefaultedRelabelConfig contains the actual fields for RelabelConfig.
|
||||||
|
DefaultedRelabelConfig `yaml:",inline"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the yaml.Unmarshaller interface.
|
||||||
|
func (c *RelabelConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
c.DefaultedRelabelConfig = DefaultRelabelConfig
|
||||||
|
return unmarshal(&c.DefaultedRelabelConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultedRelabelConfig is a proxy type for RelabelConfig.
|
||||||
|
type DefaultedRelabelConfig struct {
|
||||||
|
// A list of labels from which values are taken and concatenated
|
||||||
|
// with the configured separator in order.
|
||||||
|
SourceLabels clientmodel.LabelNames `yaml:"source_labels,flow"`
|
||||||
|
// Separator is the string between concatenated values from the source labels.
|
||||||
|
Separator string `yaml:"separator,omitempty"`
|
||||||
|
// Regex against which the concatenation is matched.
|
||||||
|
Regex *Regexp `yaml:"regex"`
|
||||||
|
// The label to which the resulting string is written in a replacement.
|
||||||
|
TargetLabel clientmodel.LabelName `yaml:"target_label,omitempty"`
|
||||||
|
// Replacement is the regex replacement pattern to be used.
|
||||||
|
Replacement string `yaml:"replacement,omitempty"`
|
||||||
|
// Action is the action to be performed for the relabeling.
|
||||||
|
Action RelabelAction `yaml:"action,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Regexp encapsulates a regexp.Regexp and makes it YAML marshallable.
|
||||||
|
type Regexp struct {
|
||||||
|
regexp.Regexp
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the yaml.Unmarshaller interface.
|
||||||
|
func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
var s string
|
||||||
|
if err := unmarshal(&s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
regex, err := regexp.Compile(s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
re.Regexp = *regex
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalYAML implements the yaml.Marshaller interface.
|
||||||
|
func (re Regexp) MarshalYAML() (interface{}, error) {
|
||||||
|
return re.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Duration encapsulates a time.Duration and makes it YAML marshallable.
|
||||||
|
//
|
||||||
|
// TODO(fabxc): Since we have custom types for most things, including timestamps,
|
||||||
|
// we might want to move this into our model as well, eventually.
|
||||||
|
type Duration time.Duration
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the yaml.Unmarshaller interface.
|
||||||
|
func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
var s string
|
||||||
|
if err := unmarshal(&s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dur, err := utility.StringToDuration(s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*d = Duration(dur)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalYAML implements the yaml.Marshaller interface.
|
||||||
|
func (d Duration) MarshalYAML() (interface{}, error) {
|
||||||
|
return utility.DurationToString(time.Duration(d)), nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,86 +0,0 @@
|
||||||
// Copyright 2013 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package io.prometheus;
|
|
||||||
|
|
||||||
// A label/value pair suitable for attaching to timeseries.
|
|
||||||
message LabelPair {
|
|
||||||
// The name of the label. Must adhere to the regex "[a-zA-Z_][a-zA-Z0-9_]*".
|
|
||||||
optional string name = 1;
|
|
||||||
// The value of the label. May contain any characters.
|
|
||||||
optional string value = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// A set of label/value pairs.
|
|
||||||
message LabelPairs {
|
|
||||||
repeated LabelPair label = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The global Prometheus configuration section.
|
|
||||||
message GlobalConfig {
|
|
||||||
// How frequently to scrape targets by default. Must be a valid Prometheus
|
|
||||||
// duration string in the form "[0-9]+[smhdwy]".
|
|
||||||
optional string scrape_interval = 1 [default = "1m"];
|
|
||||||
// How frequently to evaluate rules by default. Must be a valid Prometheus
|
|
||||||
// duration string in the form "[0-9]+[smhdwy]".
|
|
||||||
optional string evaluation_interval = 2 [default = "1m"];
|
|
||||||
// The labels to add to any timeseries that this Prometheus instance scrapes.
|
|
||||||
optional LabelPairs labels = 3;
|
|
||||||
// The list of file names of rule files to load.
|
|
||||||
repeated string rule_file = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
// A labeled group of targets to scrape for a job.
|
|
||||||
message TargetGroup {
|
|
||||||
// The list of endpoints to scrape via HTTP.
|
|
||||||
repeated string target = 1;
|
|
||||||
// The labels to add to any timeseries scraped for this target group.
|
|
||||||
optional LabelPairs labels = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The configuration for a Prometheus job to scrape.
|
|
||||||
//
|
|
||||||
// The next field no. is 8.
|
|
||||||
message JobConfig {
|
|
||||||
// The job name. Must adhere to the regex "[a-zA-Z_][a-zA-Z0-9_-]*".
|
|
||||||
required string name = 1;
|
|
||||||
// How frequently to scrape targets from this job. Overrides the global
|
|
||||||
// default. Must be a valid Prometheus duration string in the form
|
|
||||||
// "[0-9]+[smhdwy]".
|
|
||||||
optional string scrape_interval = 2;
|
|
||||||
// Per-target timeout when scraping this job. Must be a valid Prometheus
|
|
||||||
// duration string in the form "[0-9]+[smhdwy]".
|
|
||||||
optional string scrape_timeout = 7 [default = "10s"];
|
|
||||||
// The DNS-SD service name pointing to SRV records containing endpoint
|
|
||||||
// information for a job. When this field is provided, no target_group
|
|
||||||
// elements may be set.
|
|
||||||
optional string sd_name = 3;
|
|
||||||
// Discovery refresh period when using DNS-SD to discover targets. Must be a
|
|
||||||
// valid Prometheus duration string in the form "[0-9]+[smhdwy]".
|
|
||||||
optional string sd_refresh_interval = 4 [default = "30s"];
|
|
||||||
// List of labeled target groups for this job. Only legal when DNS-SD isn't
|
|
||||||
// used for a job.
|
|
||||||
repeated TargetGroup target_group = 5;
|
|
||||||
// The HTTP resource path to fetch metrics from on targets.
|
|
||||||
optional string metrics_path = 6 [default = "/metrics"];
|
|
||||||
}
|
|
||||||
|
|
||||||
// The top-level Prometheus configuration.
|
|
||||||
message PrometheusConfig {
|
|
||||||
// Global Prometheus configuration options. If omitted, an empty global
|
|
||||||
// configuration with default values (see GlobalConfig definition) will be
|
|
||||||
// created.
|
|
||||||
optional GlobalConfig global = 1;
|
|
||||||
// The list of jobs to scrape.
|
|
||||||
repeated JobConfig job = 2;
|
|
||||||
}
|
|
|
@ -1,84 +1,167 @@
|
||||||
// Copyright 2013 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"path"
|
"reflect"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
var fixturesPath = "fixtures"
|
var expectedConf = &Config{DefaultedConfig{
|
||||||
|
GlobalConfig: &GlobalConfig{DefaultedGlobalConfig{
|
||||||
|
ScrapeInterval: Duration(15 * time.Second),
|
||||||
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EvaluationInterval: Duration(30 * time.Second),
|
||||||
|
|
||||||
var configTests = []struct {
|
Labels: clientmodel.LabelSet{
|
||||||
inputFile string
|
"monitor": "codelab",
|
||||||
shouldFail bool
|
"foo": "bar",
|
||||||
errContains string
|
},
|
||||||
|
}},
|
||||||
|
|
||||||
|
RuleFiles: []string{
|
||||||
|
"first.rules",
|
||||||
|
"second.rules",
|
||||||
|
},
|
||||||
|
|
||||||
|
ScrapeConfigs: []*ScrapeConfig{
|
||||||
|
{DefaultedScrapeConfig{
|
||||||
|
JobName: "prometheus",
|
||||||
|
|
||||||
|
ScrapeInterval: Duration(15 * time.Second),
|
||||||
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
|
||||||
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
|
||||||
|
TargetGroups: []*TargetGroup{
|
||||||
|
{
|
||||||
|
Targets: []clientmodel.LabelSet{
|
||||||
|
{clientmodel.AddressLabel: "localhost:9090"},
|
||||||
|
{clientmodel.AddressLabel: "localhost:9191"},
|
||||||
|
},
|
||||||
|
Labels: clientmodel.LabelSet{
|
||||||
|
"my": "label",
|
||||||
|
"your": "label",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
FileSDConfigs: []*FileSDConfig{
|
||||||
|
{DefaultedFileSDConfig{
|
||||||
|
Names: []string{"foo/*.slow.json", "foo/*.slow.yml", "single/file.yml"},
|
||||||
|
RefreshInterval: Duration(10 * time.Minute),
|
||||||
|
}},
|
||||||
|
{DefaultedFileSDConfig{
|
||||||
|
Names: []string{"bar/*.yaml"},
|
||||||
|
RefreshInterval: Duration(30 * time.Second),
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
|
||||||
|
RelabelConfigs: []*RelabelConfig{
|
||||||
|
{DefaultedRelabelConfig{
|
||||||
|
SourceLabels: clientmodel.LabelNames{"job", "__meta_dns_srv_name"},
|
||||||
|
TargetLabel: "job",
|
||||||
|
Separator: ";",
|
||||||
|
Regex: &Regexp{*regexp.MustCompile("(.*)some-[regex]$")},
|
||||||
|
Replacement: "foo-${1}",
|
||||||
|
Action: RelabelReplace,
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
{DefaultedScrapeConfig{
|
||||||
|
JobName: "service-x",
|
||||||
|
|
||||||
|
ScrapeInterval: Duration(50 * time.Second),
|
||||||
|
ScrapeTimeout: Duration(5 * time.Second),
|
||||||
|
|
||||||
|
BasicAuth: &BasicAuth{
|
||||||
|
Username: "admin",
|
||||||
|
Password: "password",
|
||||||
|
},
|
||||||
|
MetricsPath: "/my_path",
|
||||||
|
Scheme: "https",
|
||||||
|
|
||||||
|
DNSSDConfigs: []*DNSSDConfig{
|
||||||
|
{DefaultedDNSSDConfig{
|
||||||
|
Names: []string{
|
||||||
|
"first.dns.address.domain.com",
|
||||||
|
"second.dns.address.domain.com",
|
||||||
|
},
|
||||||
|
RefreshInterval: Duration(15 * time.Second),
|
||||||
|
}},
|
||||||
|
{DefaultedDNSSDConfig{
|
||||||
|
Names: []string{
|
||||||
|
"first.dns.address.domain.com",
|
||||||
|
},
|
||||||
|
RefreshInterval: Duration(30 * time.Second),
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
|
||||||
|
RelabelConfigs: []*RelabelConfig{
|
||||||
|
{DefaultedRelabelConfig{
|
||||||
|
SourceLabels: clientmodel.LabelNames{"job"},
|
||||||
|
Regex: &Regexp{*regexp.MustCompile("(.*)some-[regex]$")},
|
||||||
|
Separator: ";",
|
||||||
|
Action: RelabelDrop,
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, ""}
|
||||||
|
|
||||||
|
func TestLoadConfig(t *testing.T) {
|
||||||
|
c, err := LoadFromFile("testdata/conf.good.yml")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error parsing %s: %s", "testdata/conf.good.yml", err)
|
||||||
|
}
|
||||||
|
bgot, err := yaml.Marshal(c)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s", err)
|
||||||
|
}
|
||||||
|
bexp, err := yaml.Marshal(expectedConf)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s", err)
|
||||||
|
}
|
||||||
|
expectedConf.original = c.original
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(c, expectedConf) {
|
||||||
|
t.Errorf("%s: unexpected config result: \n\n%s\n expected\n\n%s", "testdata/conf.good.yml", bgot, bexp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var expectedErrors = []struct {
|
||||||
|
filename string
|
||||||
|
errMsg string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
inputFile: "minimal.conf.input",
|
filename: "jobname.bad.yml",
|
||||||
|
errMsg: `"prom^etheus" is not a valid job name`,
|
||||||
}, {
|
}, {
|
||||||
inputFile: "sample.conf.input",
|
filename: "jobname_dup.bad.yml",
|
||||||
|
errMsg: `found multiple scrape configs with job name "prometheus"`,
|
||||||
}, {
|
}, {
|
||||||
inputFile: "empty.conf.input",
|
filename: "labelname.bad.yml",
|
||||||
|
errMsg: `"not$allowed" is not a valid label name`,
|
||||||
}, {
|
}, {
|
||||||
inputFile: "sd_targets.conf.input",
|
filename: "regex.bad.yml",
|
||||||
},
|
errMsg: "error parsing regexp",
|
||||||
{
|
|
||||||
inputFile: "invalid_proto_format.conf.input",
|
|
||||||
shouldFail: true,
|
|
||||||
errContains: "unknown field name",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
inputFile: "invalid_scrape_interval.conf.input",
|
|
||||||
shouldFail: true,
|
|
||||||
errContains: "invalid global scrape interval",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
inputFile: "invalid_job_name.conf.input",
|
|
||||||
shouldFail: true,
|
|
||||||
errContains: "invalid job name",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
inputFile: "invalid_label_name.conf.input",
|
|
||||||
shouldFail: true,
|
|
||||||
errContains: "invalid label name",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
inputFile: "mixing_sd_and_manual_targets.conf.input",
|
|
||||||
shouldFail: true,
|
|
||||||
errContains: "specified both DNS-SD name and target group",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
inputFile: "repeated_job_name.conf.input",
|
|
||||||
shouldFail: true,
|
|
||||||
errContains: "found multiple jobs configured with the same name: 'testjob1'",
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConfigs(t *testing.T) {
|
func TestBadConfigs(t *testing.T) {
|
||||||
for i, configTest := range configTests {
|
for _, ee := range expectedErrors {
|
||||||
_, err := LoadFromFile(path.Join(fixturesPath, configTest.inputFile))
|
_, err := LoadFromFile("testdata/" + ee.filename)
|
||||||
|
if err == nil {
|
||||||
if err != nil {
|
t.Errorf("Expected error parsing %s but got none", ee.filename)
|
||||||
if !configTest.shouldFail {
|
|
||||||
t.Fatalf("%d. Error parsing config %v: %v", i, configTest.inputFile, err)
|
|
||||||
} else {
|
|
||||||
if !strings.Contains(err.Error(), configTest.errContains) {
|
|
||||||
t.Fatalf("%d. Expected error containing '%v', got: %v", i, configTest.errContains, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
if !strings.Contains(err.Error(), ee.errMsg) {
|
||||||
|
t.Errorf("Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
job: <
|
|
||||||
name: "1testjob"
|
|
||||||
>
|
|
|
@ -1,10 +0,0 @@
|
||||||
global <
|
|
||||||
scrape_interval: "30s"
|
|
||||||
evaluation_interval: "30s"
|
|
||||||
labels: <
|
|
||||||
label: <
|
|
||||||
name: "monitor-test"
|
|
||||||
value: "test"
|
|
||||||
>
|
|
||||||
>
|
|
||||||
>
|
|
|
@ -1,11 +0,0 @@
|
||||||
global <
|
|
||||||
scrape_interval: "30s"
|
|
||||||
evaluation_interval: "30s"
|
|
||||||
unknown_field: "foo"
|
|
||||||
labels: <
|
|
||||||
label: <
|
|
||||||
name: "monitor"
|
|
||||||
value: "test"
|
|
||||||
>
|
|
||||||
>
|
|
||||||
>
|
|
|
@ -1,10 +0,0 @@
|
||||||
global <
|
|
||||||
scrape_interval: "30"
|
|
||||||
evaluation_interval: "30s"
|
|
||||||
labels: <
|
|
||||||
label: <
|
|
||||||
name: "monitor"
|
|
||||||
value: "test"
|
|
||||||
>
|
|
||||||
>
|
|
||||||
>
|
|
|
@ -1,20 +0,0 @@
|
||||||
global <
|
|
||||||
scrape_interval: "30s"
|
|
||||||
evaluation_interval: "30s"
|
|
||||||
labels: <
|
|
||||||
label: <
|
|
||||||
name: "monitor"
|
|
||||||
value: "test"
|
|
||||||
>
|
|
||||||
>
|
|
||||||
rule_file: "prometheus.rules"
|
|
||||||
>
|
|
||||||
|
|
||||||
job: <
|
|
||||||
name: "prometheus"
|
|
||||||
scrape_interval: "15s"
|
|
||||||
|
|
||||||
target_group: <
|
|
||||||
target: "http://localhost:9090/metrics.json"
|
|
||||||
>
|
|
||||||
>
|
|
|
@ -1,7 +0,0 @@
|
||||||
job: <
|
|
||||||
name: "testjob"
|
|
||||||
sd_name: "sd_name"
|
|
||||||
target_group: <
|
|
||||||
target: "http://sampletarget:8080/metrics.json"
|
|
||||||
>
|
|
||||||
>
|
|
|
@ -1,11 +0,0 @@
|
||||||
job: <
|
|
||||||
name: "testjob1"
|
|
||||||
>
|
|
||||||
|
|
||||||
job: <
|
|
||||||
name: "testjob2"
|
|
||||||
>
|
|
||||||
|
|
||||||
job: <
|
|
||||||
name: "testjob1"
|
|
||||||
>
|
|
|
@ -1,55 +0,0 @@
|
||||||
global <
|
|
||||||
scrape_interval: "30s"
|
|
||||||
evaluation_interval: "30s"
|
|
||||||
labels: <
|
|
||||||
label: <
|
|
||||||
name: "monitor"
|
|
||||||
value: "test"
|
|
||||||
>
|
|
||||||
>
|
|
||||||
rule_file: "prometheus.rules"
|
|
||||||
>
|
|
||||||
|
|
||||||
job: <
|
|
||||||
name: "prometheus"
|
|
||||||
scrape_interval: "15s"
|
|
||||||
|
|
||||||
target_group: <
|
|
||||||
target: "http://localhost:9090/metrics.json"
|
|
||||||
labels: <
|
|
||||||
label: <
|
|
||||||
name: "group"
|
|
||||||
value: "canary"
|
|
||||||
>
|
|
||||||
>
|
|
||||||
>
|
|
||||||
>
|
|
||||||
|
|
||||||
job: <
|
|
||||||
name: "random"
|
|
||||||
scrape_interval: "30s"
|
|
||||||
|
|
||||||
target_group: <
|
|
||||||
target: "http://random.com:8080/metrics.json"
|
|
||||||
target: "http://random.com:8081/metrics.json"
|
|
||||||
target: "http://random.com:8082/metrics.json"
|
|
||||||
target: "http://random.com:8083/metrics.json"
|
|
||||||
target: "http://random.com:8084/metrics.json"
|
|
||||||
labels: <
|
|
||||||
label: <
|
|
||||||
name: "group"
|
|
||||||
value: "production"
|
|
||||||
>
|
|
||||||
>
|
|
||||||
>
|
|
||||||
target_group: <
|
|
||||||
target: "http://random.com:8085/metrics.json"
|
|
||||||
target: "http://random.com:8086/metrics.json"
|
|
||||||
labels: <
|
|
||||||
label: <
|
|
||||||
name: "group"
|
|
||||||
value: "canary"
|
|
||||||
>
|
|
||||||
>
|
|
||||||
>
|
|
||||||
>
|
|
|
@ -1,4 +0,0 @@
|
||||||
job: <
|
|
||||||
name: "testjob"
|
|
||||||
sd_name: "sd_name"
|
|
||||||
>
|
|
|
@ -1,264 +0,0 @@
|
||||||
// Code generated by protoc-gen-go.
|
|
||||||
// source: config.proto
|
|
||||||
// DO NOT EDIT!
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package io_prometheus is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
config.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
LabelPair
|
|
||||||
LabelPairs
|
|
||||||
GlobalConfig
|
|
||||||
TargetGroup
|
|
||||||
JobConfig
|
|
||||||
PrometheusConfig
|
|
||||||
*/
|
|
||||||
package io_prometheus
|
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
|
||||||
import math "math"
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// A label/value pair suitable for attaching to timeseries.
|
|
||||||
type LabelPair struct {
|
|
||||||
// The name of the label. Must adhere to the regex "[a-zA-Z_][a-zA-Z0-9_]*".
|
|
||||||
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
|
||||||
// The value of the label. May contain any characters.
|
|
||||||
Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *LabelPair) Reset() { *m = LabelPair{} }
|
|
||||||
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*LabelPair) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *LabelPair) GetName() string {
|
|
||||||
if m != nil && m.Name != nil {
|
|
||||||
return *m.Name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *LabelPair) GetValue() string {
|
|
||||||
if m != nil && m.Value != nil {
|
|
||||||
return *m.Value
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// A set of label/value pairs.
|
|
||||||
type LabelPairs struct {
|
|
||||||
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *LabelPairs) Reset() { *m = LabelPairs{} }
|
|
||||||
func (m *LabelPairs) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*LabelPairs) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *LabelPairs) GetLabel() []*LabelPair {
|
|
||||||
if m != nil {
|
|
||||||
return m.Label
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// The global Prometheus configuration section.
|
|
||||||
type GlobalConfig struct {
|
|
||||||
// How frequently to scrape targets by default. Must be a valid Prometheus
|
|
||||||
// duration string in the form "[0-9]+[smhdwy]".
|
|
||||||
ScrapeInterval *string `protobuf:"bytes,1,opt,name=scrape_interval,def=1m" json:"scrape_interval,omitempty"`
|
|
||||||
// How frequently to evaluate rules by default. Must be a valid Prometheus
|
|
||||||
// duration string in the form "[0-9]+[smhdwy]".
|
|
||||||
EvaluationInterval *string `protobuf:"bytes,2,opt,name=evaluation_interval,def=1m" json:"evaluation_interval,omitempty"`
|
|
||||||
// The labels to add to any timeseries that this Prometheus instance scrapes.
|
|
||||||
Labels *LabelPairs `protobuf:"bytes,3,opt,name=labels" json:"labels,omitempty"`
|
|
||||||
// The list of file names of rule files to load.
|
|
||||||
RuleFile []string `protobuf:"bytes,4,rep,name=rule_file" json:"rule_file,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *GlobalConfig) Reset() { *m = GlobalConfig{} }
|
|
||||||
func (m *GlobalConfig) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*GlobalConfig) ProtoMessage() {}
|
|
||||||
|
|
||||||
const Default_GlobalConfig_ScrapeInterval string = "1m"
|
|
||||||
const Default_GlobalConfig_EvaluationInterval string = "1m"
|
|
||||||
|
|
||||||
func (m *GlobalConfig) GetScrapeInterval() string {
|
|
||||||
if m != nil && m.ScrapeInterval != nil {
|
|
||||||
return *m.ScrapeInterval
|
|
||||||
}
|
|
||||||
return Default_GlobalConfig_ScrapeInterval
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *GlobalConfig) GetEvaluationInterval() string {
|
|
||||||
if m != nil && m.EvaluationInterval != nil {
|
|
||||||
return *m.EvaluationInterval
|
|
||||||
}
|
|
||||||
return Default_GlobalConfig_EvaluationInterval
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *GlobalConfig) GetLabels() *LabelPairs {
|
|
||||||
if m != nil {
|
|
||||||
return m.Labels
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *GlobalConfig) GetRuleFile() []string {
|
|
||||||
if m != nil {
|
|
||||||
return m.RuleFile
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// A labeled group of targets to scrape for a job.
|
|
||||||
type TargetGroup struct {
|
|
||||||
// The list of endpoints to scrape via HTTP.
|
|
||||||
Target []string `protobuf:"bytes,1,rep,name=target" json:"target,omitempty"`
|
|
||||||
// The labels to add to any timeseries scraped for this target group.
|
|
||||||
Labels *LabelPairs `protobuf:"bytes,2,opt,name=labels" json:"labels,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TargetGroup) Reset() { *m = TargetGroup{} }
|
|
||||||
func (m *TargetGroup) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*TargetGroup) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *TargetGroup) GetTarget() []string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Target
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TargetGroup) GetLabels() *LabelPairs {
|
|
||||||
if m != nil {
|
|
||||||
return m.Labels
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// The configuration for a Prometheus job to scrape.
|
|
||||||
//
|
|
||||||
// The next field no. is 8.
|
|
||||||
type JobConfig struct {
|
|
||||||
// The job name. Must adhere to the regex "[a-zA-Z_][a-zA-Z0-9_-]*".
|
|
||||||
Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
|
|
||||||
// How frequently to scrape targets from this job. Overrides the global
|
|
||||||
// default. Must be a valid Prometheus duration string in the form
|
|
||||||
// "[0-9]+[smhdwy]".
|
|
||||||
ScrapeInterval *string `protobuf:"bytes,2,opt,name=scrape_interval" json:"scrape_interval,omitempty"`
|
|
||||||
// Per-target timeout when scraping this job. Must be a valid Prometheus
|
|
||||||
// duration string in the form "[0-9]+[smhdwy]".
|
|
||||||
ScrapeTimeout *string `protobuf:"bytes,7,opt,name=scrape_timeout,def=10s" json:"scrape_timeout,omitempty"`
|
|
||||||
// The DNS-SD service name pointing to SRV records containing endpoint
|
|
||||||
// information for a job. When this field is provided, no target_group
|
|
||||||
// elements may be set.
|
|
||||||
SdName *string `protobuf:"bytes,3,opt,name=sd_name" json:"sd_name,omitempty"`
|
|
||||||
// Discovery refresh period when using DNS-SD to discover targets. Must be a
|
|
||||||
// valid Prometheus duration string in the form "[0-9]+[smhdwy]".
|
|
||||||
SdRefreshInterval *string `protobuf:"bytes,4,opt,name=sd_refresh_interval,def=30s" json:"sd_refresh_interval,omitempty"`
|
|
||||||
// List of labeled target groups for this job. Only legal when DNS-SD isn't
|
|
||||||
// used for a job.
|
|
||||||
TargetGroup []*TargetGroup `protobuf:"bytes,5,rep,name=target_group" json:"target_group,omitempty"`
|
|
||||||
// The HTTP resource path to fetch metrics from on targets.
|
|
||||||
MetricsPath *string `protobuf:"bytes,6,opt,name=metrics_path,def=/metrics" json:"metrics_path,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *JobConfig) Reset() { *m = JobConfig{} }
|
|
||||||
func (m *JobConfig) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*JobConfig) ProtoMessage() {}
|
|
||||||
|
|
||||||
const Default_JobConfig_ScrapeTimeout string = "10s"
|
|
||||||
const Default_JobConfig_SdRefreshInterval string = "30s"
|
|
||||||
const Default_JobConfig_MetricsPath string = "/metrics"
|
|
||||||
|
|
||||||
func (m *JobConfig) GetName() string {
|
|
||||||
if m != nil && m.Name != nil {
|
|
||||||
return *m.Name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *JobConfig) GetScrapeInterval() string {
|
|
||||||
if m != nil && m.ScrapeInterval != nil {
|
|
||||||
return *m.ScrapeInterval
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *JobConfig) GetScrapeTimeout() string {
|
|
||||||
if m != nil && m.ScrapeTimeout != nil {
|
|
||||||
return *m.ScrapeTimeout
|
|
||||||
}
|
|
||||||
return Default_JobConfig_ScrapeTimeout
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *JobConfig) GetSdName() string {
|
|
||||||
if m != nil && m.SdName != nil {
|
|
||||||
return *m.SdName
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *JobConfig) GetSdRefreshInterval() string {
|
|
||||||
if m != nil && m.SdRefreshInterval != nil {
|
|
||||||
return *m.SdRefreshInterval
|
|
||||||
}
|
|
||||||
return Default_JobConfig_SdRefreshInterval
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *JobConfig) GetTargetGroup() []*TargetGroup {
|
|
||||||
if m != nil {
|
|
||||||
return m.TargetGroup
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *JobConfig) GetMetricsPath() string {
|
|
||||||
if m != nil && m.MetricsPath != nil {
|
|
||||||
return *m.MetricsPath
|
|
||||||
}
|
|
||||||
return Default_JobConfig_MetricsPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// The top-level Prometheus configuration.
|
|
||||||
type PrometheusConfig struct {
|
|
||||||
// Global Prometheus configuration options. If omitted, an empty global
|
|
||||||
// configuration with default values (see GlobalConfig definition) will be
|
|
||||||
// created.
|
|
||||||
Global *GlobalConfig `protobuf:"bytes,1,opt,name=global" json:"global,omitempty"`
|
|
||||||
// The list of jobs to scrape.
|
|
||||||
Job []*JobConfig `protobuf:"bytes,2,rep,name=job" json:"job,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *PrometheusConfig) Reset() { *m = PrometheusConfig{} }
|
|
||||||
func (m *PrometheusConfig) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*PrometheusConfig) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *PrometheusConfig) GetGlobal() *GlobalConfig {
|
|
||||||
if m != nil {
|
|
||||||
return m.Global
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *PrometheusConfig) GetJob() []*JobConfig {
|
|
||||||
if m != nil {
|
|
||||||
return m.Job
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
}
|
|
|
@ -1,53 +0,0 @@
|
||||||
// Copyright 2013 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package config
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
|
|
||||||
pb "github.com/prometheus/prometheus/config/generated"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LoadFromString returns a config parsed from the provided string.
|
|
||||||
func LoadFromString(configStr string) (Config, error) {
|
|
||||||
configProto := pb.PrometheusConfig{}
|
|
||||||
if err := proto.UnmarshalText(configStr, &configProto); err != nil {
|
|
||||||
return Config{}, err
|
|
||||||
}
|
|
||||||
if configProto.Global == nil {
|
|
||||||
configProto.Global = &pb.GlobalConfig{}
|
|
||||||
}
|
|
||||||
for _, job := range configProto.Job {
|
|
||||||
if job.ScrapeInterval == nil {
|
|
||||||
job.ScrapeInterval = proto.String(configProto.Global.GetScrapeInterval())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
config := Config{configProto}
|
|
||||||
err := config.Validate()
|
|
||||||
|
|
||||||
return config, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadFromFile returns a config parsed from the file of the provided name.
|
|
||||||
func LoadFromFile(fileName string) (Config, error) {
|
|
||||||
configStr, err := ioutil.ReadFile(fileName)
|
|
||||||
if err != nil {
|
|
||||||
return Config{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return LoadFromString(string(configStr))
|
|
||||||
}
|
|
|
@ -1,25 +0,0 @@
|
||||||
// Copyright 2013 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package config
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestLoadFromFile(t *testing.T) {
|
|
||||||
_, err := LoadFromFile("file-does-not-exist.conf")
|
|
||||||
if err == nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
74
config/testdata/conf.good.yml
vendored
Normal file
74
config/testdata/conf.good.yml
vendored
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
# my global config
|
||||||
|
global:
|
||||||
|
scrape_interval: 15s
|
||||||
|
evaluation_interval: 30s
|
||||||
|
# scrape_timeout is set to the global default (10s).
|
||||||
|
|
||||||
|
labels:
|
||||||
|
monitor: codelab
|
||||||
|
foo: bar
|
||||||
|
|
||||||
|
rule_files:
|
||||||
|
- "first.rules"
|
||||||
|
- "second.rules"
|
||||||
|
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
|
||||||
|
# scrape_interval is defined by the configured global (15s).
|
||||||
|
# scrape_timeout is defined by the global default (10s).
|
||||||
|
|
||||||
|
# metrics_path defaults to '/metrics'
|
||||||
|
# scheme defaults to 'http'.
|
||||||
|
|
||||||
|
labels:
|
||||||
|
foo: baz
|
||||||
|
|
||||||
|
file_sd_configs:
|
||||||
|
- names:
|
||||||
|
- foo/*.slow.json
|
||||||
|
- foo/*.slow.yml
|
||||||
|
- single/file.yml
|
||||||
|
refresh_interval: 10m
|
||||||
|
- names:
|
||||||
|
- bar/*.yaml
|
||||||
|
|
||||||
|
target_groups:
|
||||||
|
- targets: ['localhost:9090', 'localhost:9191']
|
||||||
|
labels:
|
||||||
|
my: label
|
||||||
|
your: label
|
||||||
|
|
||||||
|
relabel_configs:
|
||||||
|
- source_labels: [job, __meta_dns_srv_name]
|
||||||
|
regex: (.*)some-[regex]$
|
||||||
|
target_label: job
|
||||||
|
replacement: foo-${1}
|
||||||
|
# action defaults to 'replace'
|
||||||
|
|
||||||
|
|
||||||
|
- job_name: service-x
|
||||||
|
|
||||||
|
basic_auth:
|
||||||
|
username: admin
|
||||||
|
password: password
|
||||||
|
|
||||||
|
scrape_interval: 50s
|
||||||
|
scrape_timeout: 5s
|
||||||
|
|
||||||
|
metrics_path: /my_path
|
||||||
|
scheme: https
|
||||||
|
|
||||||
|
dns_sd_configs:
|
||||||
|
- refresh_interval: 15s
|
||||||
|
names:
|
||||||
|
- first.dns.address.domain.com
|
||||||
|
- second.dns.address.domain.com
|
||||||
|
- names:
|
||||||
|
- first.dns.address.domain.com
|
||||||
|
# refresh_interval defaults to 30s.
|
||||||
|
|
||||||
|
relabel_configs:
|
||||||
|
- source_labels: [job]
|
||||||
|
regex: (.*)some-[regex]$
|
||||||
|
action: drop
|
2
config/testdata/jobname.bad.yml
vendored
Normal file
2
config/testdata/jobname.bad.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prom^etheus
|
5
config/testdata/jobname_dup.bad.yml
vendored
Normal file
5
config/testdata/jobname_dup.bad.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
# Two scrape configs with the same job names are not allowed.
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
- job_name: service-x
|
||||||
|
- job_name: prometheus
|
3
config/testdata/labelname.bad.yml
vendored
Normal file
3
config/testdata/labelname.bad.yml
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
global:
|
||||||
|
labels:
|
||||||
|
not$allowed: value
|
4
config/testdata/regex.bad.yml
vendored
Normal file
4
config/testdata/regex.bad.yml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
relabel_configs:
|
||||||
|
- regex: abc(def
|
|
@ -1,30 +0,0 @@
|
||||||
# Global default settings.
|
|
||||||
global {
|
|
||||||
scrape_interval: "15s" # By default, scrape targets every 15 seconds.
|
|
||||||
evaluation_interval: "15s" # By default, evaluate rules every 15 seconds.
|
|
||||||
|
|
||||||
# Attach these extra labels to all timeseries collected by this Prometheus instance.
|
|
||||||
labels: {
|
|
||||||
label: {
|
|
||||||
name: "monitor"
|
|
||||||
value: "codelab-monitor"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Load and evaluate rules in this file every 'evaluation_interval' seconds. This field may be repeated.
|
|
||||||
#rule_file: "prometheus.rules"
|
|
||||||
}
|
|
||||||
|
|
||||||
# A job definition containing exactly one endpoint to scrape: Here it's prometheus itself.
|
|
||||||
job: {
|
|
||||||
# The job name is added as a label `job={job-name}` to any timeseries scraped from this job.
|
|
||||||
name: "prometheus"
|
|
||||||
# Override the global default and scrape targets from this job every 5 seconds.
|
|
||||||
scrape_interval: "5s"
|
|
||||||
|
|
||||||
# Let's define a group of targets to scrape for this job. In this case, only one.
|
|
||||||
target_group: {
|
|
||||||
# These endpoints are scraped via HTTP.
|
|
||||||
target: "http://localhost:9090/metrics"
|
|
||||||
}
|
|
||||||
}
|
|
30
documentation/examples/prometheus.yml
Normal file
30
documentation/examples/prometheus.yml
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
# my global config
|
||||||
|
global:
|
||||||
|
scrape_interval: 15s # By default, scrape targets every 15 seconds.
|
||||||
|
evaluation_interval: 15s # By default, scrape targets every 15 seconds.
|
||||||
|
# scrape_timeout is set to the global default (10s).
|
||||||
|
|
||||||
|
# Attach these extra labels to all timeseries collected by this Prometheus instance.
|
||||||
|
labels:
|
||||||
|
monitor: 'codelab-monitor'
|
||||||
|
|
||||||
|
# Load and evaluate rules in this file every 'evaluation_interval' seconds.
|
||||||
|
rule_files:
|
||||||
|
# - "first.rules"
|
||||||
|
# - "second.rules"
|
||||||
|
|
||||||
|
# A scrape configuration containing exactly one endpoint to scrape:
|
||||||
|
# Here it's Prometheus itself.
|
||||||
|
scrape_configs:
|
||||||
|
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
||||||
|
- job_name: 'prometheus'
|
||||||
|
|
||||||
|
# Override the global default and scrape targets from this job every 5 seconds.
|
||||||
|
scrape_interval: 5s
|
||||||
|
scrape_timeout: 10s
|
||||||
|
|
||||||
|
# metrics_path defaults to '/metrics'
|
||||||
|
# scheme defaults to 'http'.
|
||||||
|
|
||||||
|
target_groups:
|
||||||
|
- targets: ['localhost:9090']
|
97
main.go
97
main.go
|
@ -79,7 +79,7 @@ var (
|
||||||
type prometheus struct {
|
type prometheus struct {
|
||||||
queryEngine *promql.Engine
|
queryEngine *promql.Engine
|
||||||
ruleManager *rules.Manager
|
ruleManager *rules.Manager
|
||||||
targetManager retrieval.TargetManager
|
targetManager *retrieval.TargetManager
|
||||||
notificationHandler *notification.NotificationHandler
|
notificationHandler *notification.NotificationHandler
|
||||||
storage local.Storage
|
storage local.Storage
|
||||||
remoteStorageQueues []*remote.StorageQueueManager
|
remoteStorageQueues []*remote.StorageQueueManager
|
||||||
|
@ -92,12 +92,6 @@ type prometheus struct {
|
||||||
// NewPrometheus creates a new prometheus object based on flag values.
|
// NewPrometheus creates a new prometheus object based on flag values.
|
||||||
// Call Serve() to start serving and Close() for clean shutdown.
|
// Call Serve() to start serving and Close() for clean shutdown.
|
||||||
func NewPrometheus() *prometheus {
|
func NewPrometheus() *prometheus {
|
||||||
conf, err := config.LoadFromFile(*configFile)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Couldn't load configuration (-config.file=%s): %v\n", *configFile, err)
|
|
||||||
os.Exit(2)
|
|
||||||
}
|
|
||||||
|
|
||||||
notificationHandler := notification.NewNotificationHandler(*alertmanagerURL, *notificationQueueCapacity)
|
notificationHandler := notification.NewNotificationHandler(*alertmanagerURL, *notificationQueueCapacity)
|
||||||
|
|
||||||
var syncStrategy local.SyncStrategy
|
var syncStrategy local.SyncStrategy
|
||||||
|
@ -124,11 +118,7 @@ func NewPrometheus() *prometheus {
|
||||||
PedanticChecks: *storagePedanticChecks,
|
PedanticChecks: *storagePedanticChecks,
|
||||||
SyncStrategy: syncStrategy,
|
SyncStrategy: syncStrategy,
|
||||||
}
|
}
|
||||||
memStorage, err := local.NewMemorySeriesStorage(o)
|
memStorage := local.NewMemorySeriesStorage(o)
|
||||||
if err != nil {
|
|
||||||
glog.Error("Error opening memory series storage: ", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
var sampleAppender storage.SampleAppender
|
var sampleAppender storage.SampleAppender
|
||||||
var remoteStorageQueues []*remote.StorageQueueManager
|
var remoteStorageQueues []*remote.StorageQueueManager
|
||||||
|
@ -154,23 +144,17 @@ func NewPrometheus() *prometheus {
|
||||||
sampleAppender = fanout
|
sampleAppender = fanout
|
||||||
}
|
}
|
||||||
|
|
||||||
targetManager := retrieval.NewTargetManager(sampleAppender, conf.GlobalLabels())
|
targetManager := retrieval.NewTargetManager(sampleAppender)
|
||||||
targetManager.AddTargetsFromConfig(conf)
|
|
||||||
|
|
||||||
queryEngine := promql.NewEngine(memStorage)
|
queryEngine := promql.NewEngine(memStorage)
|
||||||
|
|
||||||
ruleManager := rules.NewManager(&rules.ManagerOptions{
|
ruleManager := rules.NewManager(&rules.ManagerOptions{
|
||||||
SampleAppender: sampleAppender,
|
SampleAppender: sampleAppender,
|
||||||
NotificationHandler: notificationHandler,
|
NotificationHandler: notificationHandler,
|
||||||
EvaluationInterval: conf.EvaluationInterval(),
|
|
||||||
QueryEngine: queryEngine,
|
QueryEngine: queryEngine,
|
||||||
PrometheusURL: web.MustBuildServerURL(*pathPrefix),
|
PrometheusURL: web.MustBuildServerURL(*pathPrefix),
|
||||||
PathPrefix: *pathPrefix,
|
PathPrefix: *pathPrefix,
|
||||||
})
|
})
|
||||||
if err := ruleManager.LoadRuleFiles(conf.Global.GetRuleFile()...); err != nil {
|
|
||||||
glog.Errorf("Error loading rule files: %s", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
flags := map[string]string{}
|
flags := map[string]string{}
|
||||||
flag.VisitAll(func(f *flag.Flag) {
|
flag.VisitAll(func(f *flag.Flag) {
|
||||||
|
@ -178,9 +162,8 @@ func NewPrometheus() *prometheus {
|
||||||
})
|
})
|
||||||
prometheusStatus := &web.PrometheusStatusHandler{
|
prometheusStatus := &web.PrometheusStatusHandler{
|
||||||
BuildInfo: BuildInfo,
|
BuildInfo: BuildInfo,
|
||||||
Config: conf.String(),
|
|
||||||
RuleManager: ruleManager,
|
RuleManager: ruleManager,
|
||||||
TargetPools: targetManager.Pools(),
|
TargetPools: targetManager.Pools,
|
||||||
Flags: flags,
|
Flags: flags,
|
||||||
Birth: time.Now(),
|
Birth: time.Now(),
|
||||||
PathPrefix: *pathPrefix,
|
PathPrefix: *pathPrefix,
|
||||||
|
@ -225,50 +208,83 @@ func NewPrometheus() *prometheus {
|
||||||
webService: webService,
|
webService: webService,
|
||||||
}
|
}
|
||||||
webService.QuitChan = make(chan struct{})
|
webService.QuitChan = make(chan struct{})
|
||||||
|
|
||||||
|
if !p.reloadConfig() {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *prometheus) reloadConfig() bool {
|
||||||
|
glog.Infof("Loading configuration file %s", *configFile)
|
||||||
|
|
||||||
|
conf, err := config.LoadFromFile(*configFile)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("Couldn't load configuration (-config.file=%s): %v", *configFile, err)
|
||||||
|
glog.Errorf("Note: The configuration format has changed with version 0.14, please check the documentation.")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
p.webService.StatusHandler.ApplyConfig(conf)
|
||||||
|
p.targetManager.ApplyConfig(conf)
|
||||||
|
p.ruleManager.ApplyConfig(conf)
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// Serve starts the Prometheus server. It returns after the server has been shut
|
// Serve starts the Prometheus server. It returns after the server has been shut
|
||||||
// down. The method installs an interrupt handler, allowing to trigger a
|
// down. The method installs an interrupt handler, allowing to trigger a
|
||||||
// shutdown by sending SIGTERM to the process.
|
// shutdown by sending SIGTERM to the process.
|
||||||
func (p *prometheus) Serve() {
|
func (p *prometheus) Serve() {
|
||||||
|
// Start all components.
|
||||||
|
if err := p.storage.Start(); err != nil {
|
||||||
|
glog.Error("Error opening memory series storage: ", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
defer p.storage.Stop()
|
||||||
|
|
||||||
|
// The storage has to be fully initialized before registering Prometheus.
|
||||||
|
registry.MustRegister(p)
|
||||||
|
|
||||||
for _, q := range p.remoteStorageQueues {
|
for _, q := range p.remoteStorageQueues {
|
||||||
go q.Run()
|
go q.Run()
|
||||||
|
defer q.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
go p.ruleManager.Run()
|
go p.ruleManager.Run()
|
||||||
|
defer p.ruleManager.Stop()
|
||||||
|
|
||||||
go p.notificationHandler.Run()
|
go p.notificationHandler.Run()
|
||||||
|
defer p.notificationHandler.Stop()
|
||||||
|
|
||||||
p.storage.Start()
|
go p.targetManager.Run()
|
||||||
|
defer p.targetManager.Stop()
|
||||||
|
|
||||||
|
defer p.queryEngine.Stop()
|
||||||
|
|
||||||
|
go p.webService.ServeForever(*pathPrefix)
|
||||||
|
|
||||||
|
// Wait for reload or termination signals.
|
||||||
|
hup := make(chan os.Signal)
|
||||||
|
signal.Notify(hup, syscall.SIGHUP)
|
||||||
go func() {
|
go func() {
|
||||||
err := p.webService.ServeForever(*pathPrefix)
|
for range hup {
|
||||||
if err != nil {
|
p.reloadConfig()
|
||||||
glog.Fatal(err)
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
notifier := make(chan os.Signal)
|
term := make(chan os.Signal)
|
||||||
signal.Notify(notifier, os.Interrupt, syscall.SIGTERM)
|
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
|
||||||
select {
|
select {
|
||||||
case <-notifier:
|
case <-term:
|
||||||
glog.Warning("Received SIGTERM, exiting gracefully...")
|
glog.Warning("Received SIGTERM, exiting gracefully...")
|
||||||
case <-p.webService.QuitChan:
|
case <-p.webService.QuitChan:
|
||||||
glog.Warning("Received termination request via web service, exiting gracefully...")
|
glog.Warning("Received termination request via web service, exiting gracefully...")
|
||||||
}
|
}
|
||||||
|
|
||||||
p.targetManager.Stop()
|
close(hup)
|
||||||
p.ruleManager.Stop()
|
|
||||||
p.queryEngine.Stop()
|
|
||||||
|
|
||||||
if err := p.storage.Stop(); err != nil {
|
|
||||||
glog.Error("Error stopping local storage: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, q := range p.remoteStorageQueues {
|
|
||||||
q.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
p.notificationHandler.Stop()
|
|
||||||
glog.Info("See you next time!")
|
glog.Info("See you next time!")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -371,6 +387,5 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
p := NewPrometheus()
|
p := NewPrometheus()
|
||||||
registry.MustRegister(p)
|
|
||||||
p.Serve()
|
p.Serve()
|
||||||
}
|
}
|
||||||
|
|
266
retrieval/discovery/consul.go
Normal file
266
retrieval/discovery/consul.go
Normal file
|
@ -0,0 +1,266 @@
|
||||||
|
package discovery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
|
consul "github.com/hashicorp/consul/api"
|
||||||
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
consulSourcePrefix = "consul"
|
||||||
|
consulWatchTimeout = 30 * time.Second
|
||||||
|
consulRetryInterval = 15 * time.Second
|
||||||
|
|
||||||
|
// ConsuleNodeLabel is the name for the label containing a target's node name.
|
||||||
|
ConsulNodeLabel = clientmodel.MetaLabelPrefix + "consul_node"
|
||||||
|
// ConsulTagsLabel is the name of the label containing the tags assigned to the target.
|
||||||
|
ConsulTagsLabel = clientmodel.MetaLabelPrefix + "consul_tags"
|
||||||
|
// ConsulServiceLabel is the name of the label containing the service name.
|
||||||
|
ConsulServiceLabel = clientmodel.MetaLabelPrefix + "consul_service"
|
||||||
|
// ConsulDCLabel is the name of the label containing the datacenter ID.
|
||||||
|
ConsulDCLabel = clientmodel.MetaLabelPrefix + "consul_dc"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConsulDiscovery retrieves target information from a Consul server
|
||||||
|
// and updates them via watches.
|
||||||
|
type ConsulDiscovery struct {
|
||||||
|
client *consul.Client
|
||||||
|
clientConf *consul.Config
|
||||||
|
tagSeparator string
|
||||||
|
scrapedServices map[string]struct{}
|
||||||
|
|
||||||
|
mu sync.RWMutex
|
||||||
|
services map[string]*consulService
|
||||||
|
runDone, srvsDone chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// consulService contains data belonging to the same service.
|
||||||
|
type consulService struct {
|
||||||
|
name string
|
||||||
|
tgroup *config.TargetGroup
|
||||||
|
lastIndex uint64
|
||||||
|
removed bool
|
||||||
|
running bool
|
||||||
|
done chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConsulDiscovery returns a new ConsulDiscovery for the given config.
|
||||||
|
func NewConsulDiscovery(conf *config.ConsulSDConfig) *ConsulDiscovery {
|
||||||
|
clientConf := &consul.Config{
|
||||||
|
Address: conf.Server,
|
||||||
|
Scheme: conf.Scheme,
|
||||||
|
Datacenter: conf.Datacenter,
|
||||||
|
Token: conf.Token,
|
||||||
|
HttpAuth: &consul.HttpBasicAuth{
|
||||||
|
Username: conf.Username,
|
||||||
|
Password: conf.Password,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
client, err := consul.NewClient(clientConf)
|
||||||
|
if err != nil {
|
||||||
|
// NewClient always returns a nil error.
|
||||||
|
panic(fmt.Errorf("discovery.NewConsulDiscovery: %s", err))
|
||||||
|
}
|
||||||
|
cd := &ConsulDiscovery{
|
||||||
|
client: client,
|
||||||
|
clientConf: clientConf,
|
||||||
|
tagSeparator: conf.TagSeparator,
|
||||||
|
runDone: make(chan struct{}),
|
||||||
|
srvsDone: make(chan struct{}, 1),
|
||||||
|
scrapedServices: map[string]struct{}{},
|
||||||
|
services: map[string]*consulService{},
|
||||||
|
}
|
||||||
|
for _, name := range conf.Services {
|
||||||
|
cd.scrapedServices[name] = struct{}{}
|
||||||
|
}
|
||||||
|
return cd
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sources implements the TargetProvider interface.
|
||||||
|
func (cd *ConsulDiscovery) Sources() []string {
|
||||||
|
clientConf := *cd.clientConf
|
||||||
|
clientConf.HttpClient = &http.Client{Timeout: 5 * time.Second}
|
||||||
|
|
||||||
|
client, err := consul.NewClient(&clientConf)
|
||||||
|
if err != nil {
|
||||||
|
// NewClient always returns a nil error.
|
||||||
|
panic(fmt.Errorf("discovery.ConsulDiscovery.Sources: %s", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
srvs, _, err := client.Catalog().Services(nil)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("Error refreshing service list: %s", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
cd.mu.Lock()
|
||||||
|
defer cd.mu.Unlock()
|
||||||
|
|
||||||
|
srcs := make([]string, 0, len(srvs))
|
||||||
|
for name := range srvs {
|
||||||
|
if _, ok := cd.scrapedServices[name]; ok {
|
||||||
|
srcs = append(srcs, consulSourcePrefix+":"+name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return srcs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run implements the TargetProvider interface.
|
||||||
|
func (cd *ConsulDiscovery) Run(ch chan<- *config.TargetGroup) {
|
||||||
|
defer close(ch)
|
||||||
|
|
||||||
|
update := make(chan *consulService, 10)
|
||||||
|
go cd.watchServices(update)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-cd.runDone:
|
||||||
|
return
|
||||||
|
case srv := <-update:
|
||||||
|
if srv.removed {
|
||||||
|
ch <- &config.TargetGroup{Source: consulSourcePrefix + ":" + srv.name}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Launch watcher for the service.
|
||||||
|
if !srv.running {
|
||||||
|
go cd.watchService(srv, ch)
|
||||||
|
srv.running = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop implements the TargetProvider interface.
|
||||||
|
func (cd *ConsulDiscovery) Stop() {
|
||||||
|
glog.V(1).Infof("Stopping Consul service discovery for %s", cd.clientConf.Address)
|
||||||
|
|
||||||
|
// The lock prevents Run from terminating while the watchers attempt
|
||||||
|
// to send on their channels.
|
||||||
|
cd.mu.Lock()
|
||||||
|
defer cd.mu.Unlock()
|
||||||
|
|
||||||
|
// The watching goroutines will terminate after their next watch timeout.
|
||||||
|
// As this can take long, the channel is buffered and we do not wait.
|
||||||
|
for _, srv := range cd.services {
|
||||||
|
srv.done <- struct{}{}
|
||||||
|
}
|
||||||
|
cd.srvsDone <- struct{}{}
|
||||||
|
|
||||||
|
// Terminate Run.
|
||||||
|
cd.runDone <- struct{}{}
|
||||||
|
|
||||||
|
glog.V(1).Infof("Consul service discovery for %s stopped.", cd.clientConf.Address)
|
||||||
|
}
|
||||||
|
|
||||||
|
// watchServices retrieves updates from Consul's services endpoint and sends
|
||||||
|
// potential updates to the update channel.
|
||||||
|
func (cd *ConsulDiscovery) watchServices(update chan<- *consulService) {
|
||||||
|
var lastIndex uint64
|
||||||
|
for {
|
||||||
|
catalog := cd.client.Catalog()
|
||||||
|
srvs, meta, err := catalog.Services(&consul.QueryOptions{
|
||||||
|
RequireConsistent: false,
|
||||||
|
WaitIndex: lastIndex,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("Error refreshing service list: %s", err)
|
||||||
|
<-time.After(consulRetryInterval)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// If the index equals the previous one, the watch timed out with no update.
|
||||||
|
if meta.LastIndex == lastIndex {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
lastIndex = meta.LastIndex
|
||||||
|
|
||||||
|
cd.mu.Lock()
|
||||||
|
select {
|
||||||
|
case <-cd.srvsDone:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
// Continue.
|
||||||
|
}
|
||||||
|
// Check for new services.
|
||||||
|
for name := range srvs {
|
||||||
|
if _, ok := cd.scrapedServices[name]; !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
srv, ok := cd.services[name]
|
||||||
|
if !ok {
|
||||||
|
srv = &consulService{
|
||||||
|
name: name,
|
||||||
|
tgroup: &config.TargetGroup{},
|
||||||
|
done: make(chan struct{}, 1),
|
||||||
|
}
|
||||||
|
srv.tgroup.Source = consulSourcePrefix + ":" + name
|
||||||
|
cd.services[name] = srv
|
||||||
|
}
|
||||||
|
srv.tgroup.Labels = clientmodel.LabelSet{
|
||||||
|
ConsulServiceLabel: clientmodel.LabelValue(name),
|
||||||
|
ConsulDCLabel: clientmodel.LabelValue(cd.clientConf.Datacenter),
|
||||||
|
}
|
||||||
|
update <- srv
|
||||||
|
}
|
||||||
|
// Check for removed services.
|
||||||
|
for name, srv := range cd.services {
|
||||||
|
if _, ok := srvs[name]; !ok {
|
||||||
|
srv.removed = true
|
||||||
|
update <- srv
|
||||||
|
srv.done <- struct{}{}
|
||||||
|
delete(cd.services, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cd.mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// watchService retrieves updates about srv from Consul's service endpoint.
|
||||||
|
// On a potential update the resulting target group is sent to ch.
|
||||||
|
func (cd *ConsulDiscovery) watchService(srv *consulService, ch chan<- *config.TargetGroup) {
|
||||||
|
catalog := cd.client.Catalog()
|
||||||
|
for {
|
||||||
|
nodes, meta, err := catalog.Service(srv.name, "", &consul.QueryOptions{
|
||||||
|
WaitIndex: srv.lastIndex,
|
||||||
|
WaitTime: consulWatchTimeout,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("Error refreshing service %s: %s", srv.name, err)
|
||||||
|
<-time.After(consulRetryInterval)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// If the index equals the previous one, the watch timed out with no update.
|
||||||
|
if meta.LastIndex == srv.lastIndex {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
srv.lastIndex = meta.LastIndex
|
||||||
|
srv.tgroup.Targets = make([]clientmodel.LabelSet, 0, len(nodes))
|
||||||
|
|
||||||
|
for _, node := range nodes {
|
||||||
|
addr := fmt.Sprintf("%s:%d", node.Address, node.ServicePort)
|
||||||
|
tags := strings.Join(node.ServiceTags, cd.tagSeparator)
|
||||||
|
|
||||||
|
srv.tgroup.Targets = append(srv.tgroup.Targets, clientmodel.LabelSet{
|
||||||
|
clientmodel.AddressLabel: clientmodel.LabelValue(addr),
|
||||||
|
ConsulNodeLabel: clientmodel.LabelValue(node.Node),
|
||||||
|
ConsulTagsLabel: clientmodel.LabelValue(tags),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
cd.mu.Lock()
|
||||||
|
select {
|
||||||
|
case <-srv.done:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
// Continue.
|
||||||
|
}
|
||||||
|
ch <- srv.tgroup
|
||||||
|
cd.mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2013 The Prometheus Authors
|
// Copyright 2015 The Prometheus Authors
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
|
@ -11,13 +11,13 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package retrieval
|
package discovery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
@ -25,12 +25,19 @@ import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/utility"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const resolvConf = "/etc/resolv.conf"
|
const (
|
||||||
|
resolvConf = "/etc/resolv.conf"
|
||||||
|
|
||||||
|
dnsSourcePrefix = "dns"
|
||||||
|
DNSNameLabel = clientmodel.MetaLabelPrefix + "dns_srv_name"
|
||||||
|
|
||||||
|
// Constants for instrumentation.
|
||||||
|
namespace = "prometheus"
|
||||||
|
interval = "interval"
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
dnsSDLookupsCount = prometheus.NewCounter(
|
dnsSDLookupsCount = prometheus.NewCounter(
|
||||||
|
@ -52,65 +59,84 @@ func init() {
|
||||||
prometheus.MustRegister(dnsSDLookupsCount)
|
prometheus.MustRegister(dnsSDLookupsCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TargetProvider encapsulates retrieving all targets for a job.
|
// DNSDiscovery periodically performs DNS-SD requests. It implements
|
||||||
type TargetProvider interface {
|
// the TargetProvider interface.
|
||||||
// Retrieves the current list of targets for this provider.
|
type DNSDiscovery struct {
|
||||||
Targets() ([]Target, error)
|
names []string
|
||||||
|
|
||||||
|
done chan struct{}
|
||||||
|
ticker *time.Ticker
|
||||||
|
m sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
type sdTargetProvider struct {
|
// NewDNSDiscovery returns a new DNSDiscovery which periodically refreshes its targets.
|
||||||
job config.JobConfig
|
func NewDNSDiscovery(conf *config.DNSSDConfig) *DNSDiscovery {
|
||||||
globalLabels clientmodel.LabelSet
|
return &DNSDiscovery{
|
||||||
targets []Target
|
names: conf.Names,
|
||||||
|
done: make(chan struct{}),
|
||||||
lastRefresh time.Time
|
ticker: time.NewTicker(time.Duration(conf.RefreshInterval)),
|
||||||
refreshInterval time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSdTargetProvider constructs a new sdTargetProvider for a job.
|
|
||||||
func NewSdTargetProvider(job config.JobConfig, globalLabels clientmodel.LabelSet) *sdTargetProvider {
|
|
||||||
i, err := utility.StringToDuration(job.GetSdRefreshInterval())
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("illegal refresh duration string %s: %s", job.GetSdRefreshInterval(), err))
|
|
||||||
}
|
|
||||||
return &sdTargetProvider{
|
|
||||||
job: job,
|
|
||||||
globalLabels: globalLabels,
|
|
||||||
refreshInterval: i,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *sdTargetProvider) Targets() ([]Target, error) {
|
// Run implements the TargetProvider interface.
|
||||||
var err error
|
func (dd *DNSDiscovery) Run(ch chan<- *config.TargetGroup) {
|
||||||
defer func() {
|
defer close(ch)
|
||||||
|
|
||||||
|
// Get an initial set right away.
|
||||||
|
dd.refreshAll(ch)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-dd.ticker.C:
|
||||||
|
dd.refreshAll(ch)
|
||||||
|
case <-dd.done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop implements the TargetProvider interface.
|
||||||
|
func (dd *DNSDiscovery) Stop() {
|
||||||
|
glog.V(1).Info("Stopping DNS discovery for %s...", dd.names)
|
||||||
|
|
||||||
|
dd.ticker.Stop()
|
||||||
|
dd.done <- struct{}{}
|
||||||
|
|
||||||
|
glog.V(1).Info("DNS discovery for %s stopped.", dd.names)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sources implements the TargetProvider interface.
|
||||||
|
func (dd *DNSDiscovery) Sources() []string {
|
||||||
|
var srcs []string
|
||||||
|
for _, name := range dd.names {
|
||||||
|
srcs = append(srcs, dnsSourcePrefix+":"+name)
|
||||||
|
}
|
||||||
|
return srcs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dd *DNSDiscovery) refreshAll(ch chan<- *config.TargetGroup) {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(dd.names))
|
||||||
|
for _, name := range dd.names {
|
||||||
|
go func(n string) {
|
||||||
|
if err := dd.refresh(n, ch); err != nil {
|
||||||
|
glog.Errorf("Error refreshing DNS targets: %s", err)
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}(name)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dd *DNSDiscovery) refresh(name string, ch chan<- *config.TargetGroup) error {
|
||||||
|
response, err := lookupSRV(name)
|
||||||
dnsSDLookupsCount.Inc()
|
dnsSDLookupsCount.Inc()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dnsSDLookupFailuresCount.Inc()
|
dnsSDLookupFailuresCount.Inc()
|
||||||
}
|
return err
|
||||||
}()
|
|
||||||
|
|
||||||
if time.Since(p.lastRefresh) < p.refreshInterval {
|
|
||||||
return p.targets, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := lookupSRV(p.job.GetSdName())
|
tg := &config.TargetGroup{}
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
baseLabels := clientmodel.LabelSet{
|
|
||||||
clientmodel.JobLabel: clientmodel.LabelValue(p.job.GetName()),
|
|
||||||
}
|
|
||||||
for n, v := range p.globalLabels {
|
|
||||||
baseLabels[n] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
targets := make([]Target, 0, len(response.Answer))
|
|
||||||
endpoint := &url.URL{
|
|
||||||
Scheme: "http",
|
|
||||||
Path: p.job.GetMetricsPath(),
|
|
||||||
}
|
|
||||||
for _, record := range response.Answer {
|
for _, record := range response.Answer {
|
||||||
addr, ok := record.(*dns.SRV)
|
addr, ok := record.(*dns.SRV)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -118,22 +144,25 @@ func (p *sdTargetProvider) Targets() ([]Target, error) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Remove the final dot from rooted DNS names to make them look more usual.
|
// Remove the final dot from rooted DNS names to make them look more usual.
|
||||||
if addr.Target[len(addr.Target)-1] == '.' {
|
addr.Target = strings.TrimRight(addr.Target, ".")
|
||||||
addr.Target = addr.Target[:len(addr.Target)-1]
|
|
||||||
}
|
target := clientmodel.LabelValue(fmt.Sprintf("%s:%d", addr.Target, addr.Port))
|
||||||
endpoint.Host = fmt.Sprintf("%s:%d", addr.Target, addr.Port)
|
tg.Targets = append(tg.Targets, clientmodel.LabelSet{
|
||||||
t := NewTarget(endpoint.String(), p.job.ScrapeTimeout(), baseLabels)
|
clientmodel.AddressLabel: target,
|
||||||
targets = append(targets, t)
|
DNSNameLabel: clientmodel.LabelValue(name),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
p.targets = targets
|
tg.Source = dnsSourcePrefix + ":" + name
|
||||||
return targets, nil
|
ch <- tg
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func lookupSRV(name string) (*dns.Msg, error) {
|
func lookupSRV(name string) (*dns.Msg, error) {
|
||||||
conf, err := dns.ClientConfigFromFile(resolvConf)
|
conf, err := dns.ClientConfigFromFile(resolvConf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't load resolv.conf: %s", err)
|
return nil, fmt.Errorf("could not load resolv.conf: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
client := &dns.Client{}
|
client := &dns.Client{}
|
||||||
|
@ -143,20 +172,20 @@ func lookupSRV(name string) (*dns.Msg, error) {
|
||||||
servAddr := net.JoinHostPort(server, conf.Port)
|
servAddr := net.JoinHostPort(server, conf.Port)
|
||||||
for _, suffix := range conf.Search {
|
for _, suffix := range conf.Search {
|
||||||
response, err = lookup(name, dns.TypeSRV, client, servAddr, suffix, false)
|
response, err = lookup(name, dns.TypeSRV, client, servAddr, suffix, false)
|
||||||
if err == nil {
|
if err != nil {
|
||||||
|
glog.Warningf("resolving %s.%s failed: %s", name, suffix, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
if len(response.Answer) > 0 {
|
if len(response.Answer) > 0 {
|
||||||
return response, nil
|
return response, nil
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
glog.Warningf("resolving %s.%s failed: %s", name, suffix, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
response, err = lookup(name, dns.TypeSRV, client, servAddr, "", false)
|
response, err = lookup(name, dns.TypeSRV, client, servAddr, "", false)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return response, nil
|
return response, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return response, fmt.Errorf("couldn't resolve %s: No server responded", name)
|
return response, fmt.Errorf("could not resolve %s: No server responded", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func lookup(name string, queryType uint16, client *dns.Client, servAddr string, suffix string, edns bool) (*dns.Msg, error) {
|
func lookup(name string, queryType uint16, client *dns.Client, servAddr string, suffix string, edns bool) (*dns.Msg, error) {
|
||||||
|
@ -179,7 +208,6 @@ func lookup(name string, queryType uint16, client *dns.Client, servAddr string,
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if msg.Id != response.Id {
|
if msg.Id != response.Id {
|
||||||
return nil, fmt.Errorf("DNS ID mismatch, request: %d, response: %d", msg.Id, response.Id)
|
return nil, fmt.Errorf("DNS ID mismatch, request: %d, response: %d", msg.Id, response.Id)
|
||||||
}
|
}
|
||||||
|
@ -188,11 +216,9 @@ func lookup(name string, queryType uint16, client *dns.Client, servAddr string,
|
||||||
if client.Net == "tcp" {
|
if client.Net == "tcp" {
|
||||||
return nil, fmt.Errorf("got truncated message on tcp")
|
return nil, fmt.Errorf("got truncated message on tcp")
|
||||||
}
|
}
|
||||||
|
|
||||||
if edns { // Truncated even though EDNS is used
|
if edns { // Truncated even though EDNS is used
|
||||||
client.Net = "tcp"
|
client.Net = "tcp"
|
||||||
}
|
}
|
||||||
|
|
||||||
return lookup(name, queryType, client, servAddr, suffix, !edns)
|
return lookup(name, queryType, client, servAddr, suffix, !edns)
|
||||||
}
|
}
|
||||||
|
|
255
retrieval/discovery/file.go
Normal file
255
retrieval/discovery/file.go
Normal file
|
@ -0,0 +1,255 @@
|
||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package discovery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"gopkg.in/fsnotify.v1"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/config"
|
||||||
|
|
||||||
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
const FileSDFilepathLabel = clientmodel.MetaLabelPrefix + "filepath"
|
||||||
|
|
||||||
|
// FileDiscovery provides service discovery functionality based
|
||||||
|
// on files that contain target groups in JSON or YAML format. Refreshing
|
||||||
|
// happens using file watches and periodic refreshes.
|
||||||
|
type FileDiscovery struct {
|
||||||
|
paths []string
|
||||||
|
watcher *fsnotify.Watcher
|
||||||
|
interval time.Duration
|
||||||
|
done chan struct{}
|
||||||
|
|
||||||
|
// lastRefresh stores which files were found during the last refresh
|
||||||
|
// and how many target groups they contained.
|
||||||
|
// This is used to detect deleted target groups.
|
||||||
|
lastRefresh map[string]int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFileDiscovery returns a new file discovery for the given paths.
|
||||||
|
func NewFileDiscovery(conf *config.FileSDConfig) *FileDiscovery {
|
||||||
|
return &FileDiscovery{
|
||||||
|
paths: conf.Names,
|
||||||
|
interval: time.Duration(conf.RefreshInterval),
|
||||||
|
done: make(chan struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sources implements the TargetProvider interface.
|
||||||
|
func (fd *FileDiscovery) Sources() []string {
|
||||||
|
var srcs []string
|
||||||
|
// As we allow multiple target groups per file we have no choice
|
||||||
|
// but to parse them all.
|
||||||
|
for _, p := range fd.listFiles() {
|
||||||
|
tgroups, err := readFile(p)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("Error reading file %q: ", p, err)
|
||||||
|
}
|
||||||
|
for _, tg := range tgroups {
|
||||||
|
srcs = append(srcs, tg.Source)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return srcs
|
||||||
|
}
|
||||||
|
|
||||||
|
// listFiles returns a list of all files that match the configured patterns.
|
||||||
|
func (fd *FileDiscovery) listFiles() []string {
|
||||||
|
var paths []string
|
||||||
|
for _, p := range fd.paths {
|
||||||
|
files, err := filepath.Glob(p)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("Error expanding glob %q: %s", p, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
paths = append(paths, files...)
|
||||||
|
}
|
||||||
|
return paths
|
||||||
|
}
|
||||||
|
|
||||||
|
// watchFiles sets watches on all full paths or directories that were configured for
|
||||||
|
// this file discovery.
|
||||||
|
func (fd *FileDiscovery) watchFiles() {
|
||||||
|
if fd.watcher == nil {
|
||||||
|
panic("no watcher configured")
|
||||||
|
}
|
||||||
|
for _, p := range fd.paths {
|
||||||
|
if idx := strings.LastIndex(p, "/"); idx > -1 {
|
||||||
|
p = p[:idx]
|
||||||
|
} else {
|
||||||
|
p = "./"
|
||||||
|
}
|
||||||
|
if err := fd.watcher.Add(p); err != nil {
|
||||||
|
glog.Errorf("Error adding file watch for %q: %s", p, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run implements the TargetProvider interface.
|
||||||
|
func (fd *FileDiscovery) Run(ch chan<- *config.TargetGroup) {
|
||||||
|
defer close(ch)
|
||||||
|
|
||||||
|
watcher, err := fsnotify.NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("Error creating file watcher: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fd.watcher = watcher
|
||||||
|
|
||||||
|
fd.refresh(ch)
|
||||||
|
|
||||||
|
ticker := time.NewTicker(fd.interval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Stopping has priority over refreshing. Thus we wrap the actual select
|
||||||
|
// clause to always catch done signals.
|
||||||
|
select {
|
||||||
|
case <-fd.done:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
select {
|
||||||
|
case event := <-fd.watcher.Events:
|
||||||
|
// fsnotify sometimes sends a bunch of events without name or operation.
|
||||||
|
// It's unclear what they are and why they are sent - filter them out.
|
||||||
|
if len(event.Name) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Everything but a chmod requires rereading.
|
||||||
|
if event.Op^fsnotify.Chmod == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Changes to a file can spawn various sequences of events with
|
||||||
|
// different combinations of operations. For all practical purposes
|
||||||
|
// this is inaccurate.
|
||||||
|
// The most reliable solution is to reload everything if anything happens.
|
||||||
|
fd.refresh(ch)
|
||||||
|
|
||||||
|
case <-ticker.C:
|
||||||
|
// Setting a new watch after an update might fail. Make sure we don't lose
|
||||||
|
// those files forever.
|
||||||
|
fd.refresh(ch)
|
||||||
|
|
||||||
|
case err := <-fd.watcher.Errors:
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("Error on file watch: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-fd.done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// refresh reads all files matching the discoveries patterns and sends the respective
|
||||||
|
// updated target groups through the channel.
|
||||||
|
func (fd *FileDiscovery) refresh(ch chan<- *config.TargetGroup) {
|
||||||
|
ref := map[string]int{}
|
||||||
|
for _, p := range fd.listFiles() {
|
||||||
|
tgroups, err := readFile(p)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("Error reading file %q: %s", p, err)
|
||||||
|
// Prevent deletion down below.
|
||||||
|
ref[p] = fd.lastRefresh[p]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, tg := range tgroups {
|
||||||
|
ch <- tg
|
||||||
|
}
|
||||||
|
ref[p] = len(tgroups)
|
||||||
|
}
|
||||||
|
// Send empty updates for sources that disappeared.
|
||||||
|
for f, n := range fd.lastRefresh {
|
||||||
|
m, ok := ref[f]
|
||||||
|
if !ok || n > m {
|
||||||
|
for i := m; i < n; i++ {
|
||||||
|
ch <- &config.TargetGroup{Source: fileSource(f, i)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fd.lastRefresh = ref
|
||||||
|
|
||||||
|
fd.watchFiles()
|
||||||
|
}
|
||||||
|
|
||||||
|
// fileSource returns a source ID for the i-th target group in the file.
|
||||||
|
func fileSource(filename string, i int) string {
|
||||||
|
return fmt.Sprintf("file:%s:%d", filename, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop implements the TargetProvider interface.
|
||||||
|
func (fd *FileDiscovery) Stop() {
|
||||||
|
glog.V(1).Infof("Stopping file discovery for %s...", fd.paths)
|
||||||
|
|
||||||
|
fd.done <- struct{}{}
|
||||||
|
// Closing the watcher will deadlock unless all events and errors are drained.
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-fd.watcher.Errors:
|
||||||
|
case <-fd.watcher.Events:
|
||||||
|
// Drain all events and errors.
|
||||||
|
case <-fd.done:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
fd.watcher.Close()
|
||||||
|
|
||||||
|
fd.done <- struct{}{}
|
||||||
|
|
||||||
|
glog.V(1).Infof("File discovery for %s stopped.", fd.paths)
|
||||||
|
}
|
||||||
|
|
||||||
|
// readFile reads a JSON or YAML list of targets groups from the file, depending on its
|
||||||
|
// file extension. It returns full configuration target groups.
|
||||||
|
func readFile(filename string) ([]*config.TargetGroup, error) {
|
||||||
|
content, err := ioutil.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var targetGroups []*config.TargetGroup
|
||||||
|
|
||||||
|
switch ext := filepath.Ext(filename); strings.ToLower(ext) {
|
||||||
|
case ".json":
|
||||||
|
if err := json.Unmarshal(content, &targetGroups); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case ".yml", ".yaml":
|
||||||
|
if err := yaml.Unmarshal(content, &targetGroups); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("retrieval.FileDiscovery.readFile: unhandled file extension %q", ext))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tg := range targetGroups {
|
||||||
|
tg.Source = fileSource(filename, i)
|
||||||
|
tg.Labels = clientmodel.LabelSet{
|
||||||
|
FileSDFilepathLabel: clientmodel.LabelValue(filename),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return targetGroups, nil
|
||||||
|
}
|
97
retrieval/discovery/file_test.go
Normal file
97
retrieval/discovery/file_test.go
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
package discovery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFileSD(t *testing.T) {
|
||||||
|
testFileSD(t, ".yml")
|
||||||
|
testFileSD(t, ".json")
|
||||||
|
os.Remove("fixtures/_test.yml")
|
||||||
|
os.Remove("fixtures/_test.json")
|
||||||
|
}
|
||||||
|
|
||||||
|
func testFileSD(t *testing.T, ext string) {
|
||||||
|
// As interval refreshing is more of a fallback, we only want to test
|
||||||
|
// whether file watches work as expected.
|
||||||
|
var conf config.FileSDConfig
|
||||||
|
conf.Names = []string{"fixtures/_*" + ext}
|
||||||
|
conf.RefreshInterval = config.Duration(1 * time.Hour)
|
||||||
|
|
||||||
|
fsd := NewFileDiscovery(&conf)
|
||||||
|
|
||||||
|
ch := make(chan *config.TargetGroup)
|
||||||
|
go fsd.Run(ch)
|
||||||
|
defer fsd.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(25 * time.Millisecond):
|
||||||
|
// Expected.
|
||||||
|
case tg := <-ch:
|
||||||
|
t.Fatalf("Unexpected target group in file discovery: %s", tg)
|
||||||
|
}
|
||||||
|
|
||||||
|
newf, err := os.Create("fixtures/_test" + ext)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer newf.Close()
|
||||||
|
|
||||||
|
f, err := os.Open("fixtures/target_groups" + ext)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
_, err = io.Copy(newf, f)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
newf.Close()
|
||||||
|
|
||||||
|
// The files contain two target groups which are read and sent in order.
|
||||||
|
select {
|
||||||
|
case <-time.After(15 * time.Second):
|
||||||
|
t.Fatalf("Expected new target group but got none")
|
||||||
|
case tg := <-ch:
|
||||||
|
if tg.String() != fmt.Sprintf("file:fixtures/_test%s:0", ext) {
|
||||||
|
t.Fatalf("Unexpected target group", tg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-time.After(15 * time.Second):
|
||||||
|
t.Fatalf("Expected new target group but got none")
|
||||||
|
case tg := <-ch:
|
||||||
|
if tg.String() != fmt.Sprintf("file:fixtures/_test%s:1", ext) {
|
||||||
|
t.Fatalf("Unexpected target group %s", tg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Based on unknown circumstances, sometimes fsnotify will trigger more events in
|
||||||
|
// some runs (which might be empty, chains of different operations etc.).
|
||||||
|
// We have to drain those (as the target manager would) to avoid deadlocking and must
|
||||||
|
// not try to make sense of it all...
|
||||||
|
go func() {
|
||||||
|
for tg := range ch {
|
||||||
|
// Below we will change the file to a bad syntax. Previously extracted target
|
||||||
|
// groups must not be deleted via sending an empty target group.
|
||||||
|
if len(tg.Targets) == 0 {
|
||||||
|
t.Fatalf("Unexpected empty target group received: %s", tg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
newf, err = os.Create("fixtures/_test" + ext)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err := newf.Write([]byte("]gibberish\n][")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
newf.Close()
|
||||||
|
}
|
11
retrieval/discovery/fixtures/target_groups.json
Normal file
11
retrieval/discovery/fixtures/target_groups.json
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"targets": ["localhost:9090", "example.org:443"],
|
||||||
|
"labels": {
|
||||||
|
"foo": "bar"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"targets": ["my.domain"]
|
||||||
|
}
|
||||||
|
]
|
5
retrieval/discovery/fixtures/target_groups.yml
Normal file
5
retrieval/discovery/fixtures/target_groups.yml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
- targets: ['localhost:9090', 'example.org:443']
|
||||||
|
labels:
|
||||||
|
test: success
|
||||||
|
|
||||||
|
- targets: ['my.domain']
|
|
@ -17,6 +17,8 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
type nopAppender struct{}
|
type nopAppender struct{}
|
||||||
|
@ -38,3 +40,25 @@ type collectResultAppender struct {
|
||||||
func (a *collectResultAppender) Append(s *clientmodel.Sample) {
|
func (a *collectResultAppender) Append(s *clientmodel.Sample) {
|
||||||
a.result = append(a.result, s)
|
a.result = append(a.result, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fakeTargetProvider implements a TargetProvider and allows manual injection
|
||||||
|
// of TargetGroups through the update channel.
|
||||||
|
type fakeTargetProvider struct {
|
||||||
|
sources []string
|
||||||
|
update chan *config.TargetGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tp *fakeTargetProvider) Run(ch chan<- *config.TargetGroup) {
|
||||||
|
defer close(ch)
|
||||||
|
for tg := range tp.update {
|
||||||
|
ch <- tg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tp *fakeTargetProvider) Stop() {
|
||||||
|
close(tp.update)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tp *fakeTargetProvider) Sources() []string {
|
||||||
|
return tp.sources
|
||||||
|
}
|
||||||
|
|
|
@ -1,25 +0,0 @@
|
||||||
// Copyright 2013 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package retrieval
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestInterfaces(t *testing.T) {
|
|
||||||
var (
|
|
||||||
_ Target = &target{}
|
|
||||||
_ TargetManager = &targetManager{}
|
|
||||||
)
|
|
||||||
}
|
|
63
retrieval/relabel.go
Normal file
63
retrieval/relabel.go
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
package retrieval
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Relabel returns a relabeled copy of the given label set. The relabel configurations
|
||||||
|
// are applied in order of input.
|
||||||
|
// If a label set is dropped, nil is returned.
|
||||||
|
func Relabel(labels clientmodel.LabelSet, cfgs ...*config.RelabelConfig) (clientmodel.LabelSet, error) {
|
||||||
|
out := clientmodel.LabelSet{}
|
||||||
|
for ln, lv := range labels {
|
||||||
|
out[ln] = lv
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
for _, cfg := range cfgs {
|
||||||
|
if out, err = relabel(out, cfg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if out == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func relabel(labels clientmodel.LabelSet, cfg *config.RelabelConfig) (clientmodel.LabelSet, error) {
|
||||||
|
values := make([]string, 0, len(cfg.SourceLabels))
|
||||||
|
for _, ln := range cfg.SourceLabels {
|
||||||
|
values = append(values, string(labels[ln]))
|
||||||
|
}
|
||||||
|
val := strings.Join(values, cfg.Separator)
|
||||||
|
|
||||||
|
switch cfg.Action {
|
||||||
|
case config.RelabelDrop:
|
||||||
|
if cfg.Regex.MatchString(val) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
case config.RelabelKeep:
|
||||||
|
if !cfg.Regex.MatchString(val) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
case config.RelabelReplace:
|
||||||
|
// If there is no match no replacement must take place.
|
||||||
|
if !cfg.Regex.MatchString(val) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
res := cfg.Regex.ReplaceAllString(val, cfg.Replacement)
|
||||||
|
if res == "" {
|
||||||
|
delete(labels, cfg.TargetLabel)
|
||||||
|
} else {
|
||||||
|
labels[cfg.TargetLabel] = clientmodel.LabelValue(res)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("retrieval.relabel: unknown relabel action type %q", cfg.Action))
|
||||||
|
}
|
||||||
|
return labels, nil
|
||||||
|
}
|
170
retrieval/relabel_test.go
Normal file
170
retrieval/relabel_test.go
Normal file
|
@ -0,0 +1,170 @@
|
||||||
|
package retrieval
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
clientmodel "github.com/prometheus/client_golang/model"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRelabel(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
input clientmodel.LabelSet
|
||||||
|
relabel []config.DefaultedRelabelConfig
|
||||||
|
output clientmodel.LabelSet
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
input: clientmodel.LabelSet{
|
||||||
|
"a": "foo",
|
||||||
|
"b": "bar",
|
||||||
|
"c": "baz",
|
||||||
|
},
|
||||||
|
relabel: []config.DefaultedRelabelConfig{
|
||||||
|
{
|
||||||
|
SourceLabels: clientmodel.LabelNames{"a"},
|
||||||
|
Regex: &config.Regexp{*regexp.MustCompile("f(.*)")},
|
||||||
|
TargetLabel: clientmodel.LabelName("d"),
|
||||||
|
Separator: ";",
|
||||||
|
Replacement: "ch${1}-ch${1}",
|
||||||
|
Action: config.RelabelReplace,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
output: clientmodel.LabelSet{
|
||||||
|
"a": "foo",
|
||||||
|
"b": "bar",
|
||||||
|
"c": "baz",
|
||||||
|
"d": "choo-choo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: clientmodel.LabelSet{
|
||||||
|
"a": "foo",
|
||||||
|
"b": "bar",
|
||||||
|
"c": "baz",
|
||||||
|
},
|
||||||
|
relabel: []config.DefaultedRelabelConfig{
|
||||||
|
{
|
||||||
|
SourceLabels: clientmodel.LabelNames{"a", "b"},
|
||||||
|
Regex: &config.Regexp{*regexp.MustCompile("^f(.*);(.*)r$")},
|
||||||
|
TargetLabel: clientmodel.LabelName("a"),
|
||||||
|
Separator: ";",
|
||||||
|
Replacement: "b${1}${2}m", // boobam
|
||||||
|
Action: config.RelabelReplace,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
SourceLabels: clientmodel.LabelNames{"c", "a"},
|
||||||
|
Regex: &config.Regexp{*regexp.MustCompile("(b).*b(.*)ba(.*)")},
|
||||||
|
TargetLabel: clientmodel.LabelName("d"),
|
||||||
|
Separator: ";",
|
||||||
|
Replacement: "$1$2$2$3",
|
||||||
|
Action: config.RelabelReplace,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
output: clientmodel.LabelSet{
|
||||||
|
"a": "boobam",
|
||||||
|
"b": "bar",
|
||||||
|
"c": "baz",
|
||||||
|
"d": "boooom",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: clientmodel.LabelSet{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
relabel: []config.DefaultedRelabelConfig{
|
||||||
|
{
|
||||||
|
SourceLabels: clientmodel.LabelNames{"a"},
|
||||||
|
Regex: &config.Regexp{*regexp.MustCompile("o$")},
|
||||||
|
Action: config.RelabelDrop,
|
||||||
|
}, {
|
||||||
|
SourceLabels: clientmodel.LabelNames{"a"},
|
||||||
|
Regex: &config.Regexp{*regexp.MustCompile("f(.*)")},
|
||||||
|
TargetLabel: clientmodel.LabelName("d"),
|
||||||
|
Separator: ";",
|
||||||
|
Replacement: "ch$1-ch$1",
|
||||||
|
Action: config.RelabelReplace,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
output: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: clientmodel.LabelSet{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
relabel: []config.DefaultedRelabelConfig{
|
||||||
|
{
|
||||||
|
SourceLabels: clientmodel.LabelNames{"a"},
|
||||||
|
Regex: &config.Regexp{*regexp.MustCompile("no-match")},
|
||||||
|
Action: config.RelabelDrop,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
output: clientmodel.LabelSet{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: clientmodel.LabelSet{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
relabel: []config.DefaultedRelabelConfig{
|
||||||
|
{
|
||||||
|
SourceLabels: clientmodel.LabelNames{"a"},
|
||||||
|
Regex: &config.Regexp{*regexp.MustCompile("no-match")},
|
||||||
|
Action: config.RelabelKeep,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
output: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: clientmodel.LabelSet{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
relabel: []config.DefaultedRelabelConfig{
|
||||||
|
{
|
||||||
|
SourceLabels: clientmodel.LabelNames{"a"},
|
||||||
|
Regex: &config.Regexp{*regexp.MustCompile("^f")},
|
||||||
|
Action: config.RelabelKeep,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
output: clientmodel.LabelSet{
|
||||||
|
"a": "foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// No replacement must be applied if there is no match.
|
||||||
|
input: clientmodel.LabelSet{
|
||||||
|
"a": "boo",
|
||||||
|
},
|
||||||
|
relabel: []config.DefaultedRelabelConfig{
|
||||||
|
{
|
||||||
|
SourceLabels: clientmodel.LabelNames{"a"},
|
||||||
|
Regex: &config.Regexp{*regexp.MustCompile("^f")},
|
||||||
|
TargetLabel: clientmodel.LabelName("b"),
|
||||||
|
Replacement: "bar",
|
||||||
|
Action: config.RelabelReplace,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
output: clientmodel.LabelSet{
|
||||||
|
"a": "boo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
var relabel []*config.RelabelConfig
|
||||||
|
for _, rl := range test.relabel {
|
||||||
|
relabel = append(relabel, &config.RelabelConfig{rl})
|
||||||
|
}
|
||||||
|
res, err := Relabel(test.input, relabel...)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Test %d: error relabeling: %s", i+1, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(res, test.output) {
|
||||||
|
t.Errorf("Test %d: relabel output mismatch: expected %#v, got %#v", i+1, test.output, res)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue